code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#! /usr/bin/env python3
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Classify applications into 104 classes given their raw code.
#
# The representation (graph) is created from IR.
#
import os
import sys
import glob
import numpy as np
from absl import app, flags, logging
from yacos.info import compy as R
from yacos.info.compy.extractors import LLVMDriver
def execute(argv):
"""Extract a graph representation."""
del argv
FLAGS = flags.FLAGS
# Instantiate the LLVM driver.
driver = LLVMDriver([])
# Instantiate the builder.
builder = R.LLVMIR2VecBuilder(driver)
# Verify datset directory.
if not os.path.isdir(FLAGS.dataset_directory):
logging.error('Dataset directory {} does not exist.'.format(
FLAGS.dataset_directory)
)
sys.exit(1)
folders = [
os.path.join(FLAGS.dataset_directory, subdir)
for subdir in os.listdir(FLAGS.dataset_directory)
if os.path.isdir(os.path.join(FLAGS.dataset_directory, subdir))
]
idx = FLAGS.dataset_directory.rfind('/')
last_folder = FLAGS.dataset_directory[idx+1:]
# Load data from all folders
for folder in folders:
# Create the output directory.
outdir = os.path.join(folder.replace(last_folder,
'{}_ir2vec'.format(last_folder)))
os.makedirs(outdir, exist_ok=True)
# Extract "ir2vec" from the file
sources = glob.glob('{}/*.ll'.format(folder))
for source in sources:
try:
extractionInfo = builder.ir_to_info(source)
except Exception:
logging.error('Error {}.'.format(source))
continue
filename = source.replace(folder, outdir)
filename = filename[:-3]
np.savez_compressed(filename,
values=extractionInfo.moduleInfo.ir2vec)
# Execute
if __name__ == '__main__':
# app
flags.DEFINE_string('dataset_directory',
None,
'Dataset directory')
flags.mark_flag_as_required('dataset_directory')
app.run(execute)
|
[
"os.makedirs",
"os.path.isdir",
"absl.flags.mark_flag_as_required",
"yacos.info.compy.LLVMIR2VecBuilder",
"absl.flags.DEFINE_string",
"numpy.savez_compressed",
"absl.app.run",
"yacos.info.compy.extractors.LLVMDriver",
"os.path.join",
"os.listdir",
"sys.exit"
] |
[((1023, 1037), 'yacos.info.compy.extractors.LLVMDriver', 'LLVMDriver', (['[]'], {}), '([])\n', (1033, 1037), False, 'from yacos.info.compy.extractors import LLVMDriver\n'), ((1083, 1110), 'yacos.info.compy.LLVMIR2VecBuilder', 'R.LLVMIR2VecBuilder', (['driver'], {}), '(driver)\n', (1102, 1110), True, 'from yacos.info import compy as R\n'), ((2511, 2578), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset_directory"""', 'None', '"""Dataset directory"""'], {}), "('dataset_directory', None, 'Dataset directory')\n", (2530, 2578), False, 'from absl import app, flags, logging\n'), ((2631, 2679), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""dataset_directory"""'], {}), "('dataset_directory')\n", (2658, 2679), False, 'from absl import app, flags, logging\n'), ((2685, 2701), 'absl.app.run', 'app.run', (['execute'], {}), '(execute)\n', (2692, 2701), False, 'from absl import app, flags, logging\n'), ((1154, 1192), 'os.path.isdir', 'os.path.isdir', (['FLAGS.dataset_directory'], {}), '(FLAGS.dataset_directory)\n', (1167, 1192), False, 'import os\n'), ((1318, 1329), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1326, 1329), False, 'import sys\n'), ((1363, 1408), 'os.path.join', 'os.path.join', (['FLAGS.dataset_directory', 'subdir'], {}), '(FLAGS.dataset_directory, subdir)\n', (1375, 1408), False, 'import os\n'), ((1898, 1932), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (1909, 1932), False, 'import os\n'), ((1439, 1474), 'os.listdir', 'os.listdir', (['FLAGS.dataset_directory'], {}), '(FLAGS.dataset_directory)\n', (1449, 1474), False, 'import os\n'), ((2355, 2425), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {'values': 'extractionInfo.moduleInfo.ir2vec'}), '(filename, values=extractionInfo.moduleInfo.ir2vec)\n', (2374, 2425), True, 'import numpy as np\n'), ((1508, 1553), 'os.path.join', 'os.path.join', (['FLAGS.dataset_directory', 'subdir'], {}), '(FLAGS.dataset_directory, subdir)\n', (1520, 1553), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Author <NAME>
# E-mail <EMAIL>
# License MIT
# Created 03/11/2016
# Updated 11/12/2016
# Version 1.0.0
#
"""
Description of classify.py
======================
save train & test in files
read train & test
for list of classifier
train/test
gather results prec/rec/f
print best clf and results
:Example:
source activate py27
ipython
run classify.py
Only for 100 percent precision
run classify.py --train /media/sf_DATA/Datasets/Simbals/yann/train.csv --test /media/sf_DATA/Datasets/Simbals/yann/test.csv
notes
RandomForest complexity
https://www.quora.com/What-is-in-general-time-complexity-of-random-forest-What-are-the-important-parameters-that-affect-this-complexity
n instances and m attributes
computational cost of building a tree is O(mn log n).
RandomForest done in 135939ms (3mn) for 13 attributes and 192 instances
mn log n = 13*192*math.log(192) = 13122 ( 135939ms)
mn log n = 39*186*math.log(186) = 37907 (~ms)
To know the element available
print((clf.get_params().keys())
..todo::
Add
AdaBoostClassifier
BaggingClassifier
BernoulliNB
CalibratedClassifierCV
DPGMM
http://scikit-learn.org/stable/modules/generated/sklearn.mixture.DPGMM.html
Deprecated since version 0.18: This class will be removed in 0.20.
Use sklearn.mixture.BayesianGaussianMixture with parameter
weight_concentration_prior_type='dirichlet_process' instead.
DecisionTreeClassifier
ExtraTreeClassifier
ExtraTreesClassifier
GMM
GaussianNB
GradientBoostingClassifier
KNeighborsClassifier
LDA
LabelPropagation
LabelSpreading
LinearDiscriminantAnalysis
LogisticRegression
LogisticRegressionCV
MultinomialNB
NuSVC
QDA
QuadraticDiscriminantAnalysis
RandomForestClassifier
SGDClassifier
SVC
VBGMM
_ConstantPredictor
"""
import os
import sys
import time
import json
import utils
import joblib
import argparse
import webbrowser
import multiprocessing
import numpy as np
import matplotlib.pyplot as plt
from statistics import stdev
from functools import partial
from sklearn import metrics
from sklearn.cross_validation import KFold, cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score
from sklearn.utils.testing import all_estimators
from sklearn import linear_model
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve, average_precision_score
def list_clf():
"""
..todo::
Do the same for:
class_weight
predict
predict_log_proba
"""
estimators = all_estimators()
for name, class_ in estimators:
if hasattr(class_, 'predict_proba'):
print(name)
def plot_clf(indir="res/"):
indir = utils.abs_path_dir(indir) + "/"
algos = []
measure = []
with open(indir + "global.csv", "r") as filep:
for line in filep:
line = line.split(",")
algos.append(line[0])
measure.append(tuple(map(float, line[1:4])))
n_groups = 3
fig, ax = plt.subplots(figsize=(10, 6))
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.4
error_config = {'ecolor': '0.3'}
color = utils.rand_color(len(algos))
rects = {}
offset = 0.15
for ind, algo in enumerate(algos):
print(ind)
print(tuple(measure[ind]))
rects[ind] = plt.bar(index + bar_width*ind + offset, tuple(measure[ind]), bar_width,
alpha=opacity,
color=color[ind],
label=algo)
plt.ylabel('Scores (in %)')
plt.xticks(index + bar_width*ind + offset, ('Precision', 'Recall', 'F-Measure'))
plt.legend()
plt.ylim(0, 1)
# spines & axis
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
art = []
lgd = ax.legend(loc=9, bbox_to_anchor=(1.1, 1.), frameon=False)
# lgd = pylab.legend(loc=9, bbox_to_anchor=(0.5, -0.1), ncol=2)
art.append(lgd)
# ax.legend()
plt.tight_layout()
img_name = "global.png"
plt.savefig(img_name, dpi=200, additional_artists=art, bbox_inches="tight")
# webbrowser.open(img_name)
# plt.show()
def read_file(filename):
"""Description of read_file
train/test example line:
filename,feat1,feat2,...,featn,tag
"""
filename = utils.abs_path_file(filename)
groundtruths = []
features = []
with open(filename, "r") as filep:
for row in filep:
line = row.split(",")
groundtruths.append(line[-1][:-1])
features.append([float(i) for i in line[1:-1]])
return features, groundtruths
def read_preds(filename):
"""Description of read_file
ex file:
ISRC,tag
"""
filename = utils.abs_path_file(filename)
isrcs = {}
with open(filename, "r") as filep:
for row in filep:
line = row.split(",")
# print(line)
isrcs[line[0]] = float(line[1])
# isrcs[line[0]] = 1.0-float(line[1])
return isrcs
def read_item_tag(filename):
"""Description of read_file
example line:
filename,tag
"""
filename = utils.abs_path_file(filename)
groundtruths = {}
with open(filename, "r") as filep:
for row in filep:
line = row.split(",")
groundtruths[line[0]] = line[1][:-1]
return groundtruths
def precision_100percent(train, test):
"""Description of precision_100percent
..todo::
1 Find best clf with default param
2 vary param of best clf and find best param
3 use best param and best clf to find recall for 100 percent precision
"""
utils.print_success("Find Recall for best Precision for each tag")
train = utils.abs_path_file(train)
test = utils.abs_path_file(test)
train_features, train_groundtruths = read_file(train)
test_features, test_groundtruths = read_file(test)
classifiers = {
# "RandomForest": RandomForestClassifier(),#n_estimators=5
"DecisionTree":DecisionTreeClassifier()#,#max_depth=10
# "SVM":SVC(kernel="linear", C=0.0205),
# "ExtraTreesClassifier":ExtraTreesClassifier(n_estimators=5, criterion="entropy", max_features="log2", max_depth=9),
# "LogisticRegression":LogisticRegression()
}
tags = list(set(test_groundtruths))
nb_tag = len(tags)
step = 0.01
# for index, tag in enumerate(["i"]):
for index, tag in enumerate(tags):
utils.print_success("Tag " + tag)
max_precision = 0
max_recall = 0
max_f_measure = 0
max_clf = ""
max_weight = 0
for key in classifiers:
clf = classifiers[key]
# for weight in np.arange(0., 0.01, 0.000001):
# for weight in np.arange(step, 1-step, step):
for weight in np.arange(0.0, 1.0, step):
print("Classifier " + key + " & Weight " + str(weight))
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
clf.set_params(class_weight={"i":weight, "s":1-weight})
clf.fit(train_features, train_groundtruths)
predictions = clf.predict(test_features)
precision = precision_score(test_groundtruths, predictions, average=None)[index]
if precision >= max_precision:
recall = recall_score(test_groundtruths, predictions, average=None)[index]
# if recall > max_recall:
max_precision = precision
max_recall = recall
max_f_measure = f1_score(test_groundtruths, predictions, average=None)[index]
max_weight = weight
max_clf = key
sys.stdout.write("\033[K")
utils.print_info("\tClassifier " + str(max_clf))
utils.print_info("\tPrecision " + str(max_precision))
utils.print_info("\tRecall " + str(max_recall))
utils.print_info("\tF-Measure " + str(max_f_measure))
utils.print_info("\tWeight " + str(max_weight))
def train_test(train, test, res_dir="res/", disp=True, outfilename=None):
"""Description of compare
compare multiple classifier and display the best one
"""
utils.print_success("Comparison of differents classifiers")
if train is not None and test is not None:
train_features = []
test_features = []
train_groundtruths = []
test_groundtruths = []
for elem in train:
train_groundtruths.append(elem)
train_features.append(train[elem])
for elem in test:
test_groundtruths.append(elem)
test_features.append(test[elem])
else:
utils.print_error("No valid data provided.")
res_dir = utils.create_dir(res_dir)
classifiers = {
# "RandomForest": RandomForestClassifier(n_estimators=5),
"KNeighbors":KNeighborsClassifier(1),
# "GaussianProcess":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
# "DecisionTree":DecisionTreeClassifier(max_depth=5),
# "MLP":MLPClassifier(),
# "AdaBoost":AdaBoostClassifier(),
# "GaussianNB":GaussianNB(),
# "QDA":QuadraticDiscriminantAnalysis(),
# "SVM":SVC(kernel="linear", C=0.025),
# "GradientBoosting":GradientBoostingClassifier(),
# "ExtraTrees":ExtraTreesClassifier(),
# "LogisticRegression":LogisticRegression(),
# "LinearDiscriminantAnalysis":LinearDiscriminantAnalysis()
}
for key in classifiers:
utils.print_success(key)
clf = classifiers[key]
utils.print_info("\tFit")
clf.fit(train_features, train_groundtruths)
utils.print_info("\tPredict")
predictions = clf.predict(test_features)
print("Precision weighted\t" + str(precision_score(test_groundtruths, predictions, average='weighted')))
print("Recall weighted\t" + str(recall_score(test_groundtruths, predictions, average='weighted')))
print("F1 weighted\t" + str(f1_score(test_groundtruths, predictions, average='weighted')))
# print("Precision weighted\t" + str(precision_score(test_groundtruths, predictions, average=None)))
# print("Recall weighted\t" + str(recall_score(test_groundtruths, predictions, average=None)))
# print("f1 weighted\t" + str(f1_score(test_groundtruths, predictions, average=None)))
def classify(train=None, test=None, data=None, res_dir="res/", disp=True, outfilename=None):
"""Description of compare
compare multiple classifier and display the best one
"""
utils.print_success("Comparison of differents classifiers")
if data is not None:
train_features = data["train_features"]
train_groundtruths = data["train_groundtruths"]
test_features = data["test_features"]
test_groundtruths = data["test_groundtruths"]
else:
train = utils.abs_path_file(train)
test = utils.abs_path_file(test)
train_features, train_groundtruths = read_file(train)
test_features, test_groundtruths = read_file(test)
if not utils.create_dir(res_dir):
res_dir = utils.abs_path_dir(res_dir)
classifiers = {
"RandomForest": RandomForestClassifier(n_jobs=-1)
# "RandomForest": RandomForestClassifier(n_estimators=5),
# "KNeighbors":KNeighborsClassifier(3),
# "GaussianProcess":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
# "DecisionTree":DecisionTreeClassifier(max_depth=5),
# "MLP":MLPClassifier(),
# "AdaBoost":AdaBoostClassifier(),
# "GaussianNB":GaussianNB(),
# "QDA":QuadraticDiscriminantAnalysis(),
# "SVM":SVC(kernel="linear", C=0.025),
# "GradientBoosting":GradientBoostingClassifier(),
# "ExtraTrees":ExtraTreesClassifier(),
# "LogisticRegression":LogisticRegression(),
# "LinearDiscriminantAnalysis":LinearDiscriminantAnalysis()
}
for key in classifiers:
utils.print_success(key)
clf = classifiers[key]
utils.print_info("\tFit")
clf.fit(train_features, train_groundtruths)
utils.print_info("\tPredict")
predictions = clf.predict(test_features)
if outfilename is not None:
with open(outfilename, "w") as filep:
for gt, pred in zip(test_groundtruths, predictions):
filep.write(gt + "," + pred + "\n")
# Global
data = [key]
data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))
data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))
data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))
data = ",".join(data)
if disp:
print(data)
else:
with open(res_dir + "global.csv", "a") as filep:
filep.write(data + ",\n")
# Local
for index, tag in enumerate(list(set(train_groundtruths))):
precision = precision_score(test_groundtruths, predictions, average=None)
recall = recall_score(test_groundtruths, predictions, average=None)
f1 = f1_score(test_groundtruths, predictions, average=None)
line = key + "," + str(precision[index]) + "," + str(recall[index]) + "," + str(f1[index])
if disp:
print(line)
else:
with open(res_dir + "tag_" + tag + ".csv", "a") as filep:
filep.write(line + ",\n")
return predictions
def read_train_files(indir, separator=" "):
"""Description of read_train_files
Gather local features and GT from every individual train songs
"""
utils.print_success("Reading multiple train files")
indir = utils.abs_path_dir(indir) + "/"
groundtruths = []
features = []
included_extenstions = ["csv"]
filenames = [fn for fn in os.listdir(indir)
if any(fn.endswith(ext) for ext in included_extenstions)]
for index, filename in enumerate(filenames):
print(str(index + 1) + "/" + str(len(filenames)) + " " + filename)
sys.stdout.write("\033[F") # Cursor up one line
sys.stdout.write("\033[K") # Clear line
with open(indir + filename, "r") as filep:
for row in filep:
line = row.split(separator)
features.append([float(i) for i in line[:-1]])
groundtruths.append(line[-1][:-1])
sys.stdout.write("\033[K") # Clear line
return features, groundtruths
def read_train_file(filename):
"""
Read ONE train file
"""
groundtruths = []
features = []
filename = utils.abs_path_file(filename)
with open(filename, "r") as filep:
for line in filep:
line = line.split(",")
groundtruths.append(line[-1][:-1])
features.append(line[1:-1])
return features, groundtruths
def create_model(clf_name, features, groundtruths, outdir, classifiers):
begin = int(round(time.time() * 1000))
utils.print_success("Starting " + clf_name)
clf_dir = outdir + clf_name + "/"
utils.create_dir(clf_dir)
clf = classifiers[clf_name]
clf.fit(features, groundtruths)
joblib.dump(clf, clf_dir + clf_name + ".pkl")
utils.print_info(clf_name + " done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
def create_models(outdir, train_features=None, train_groundtruths=None, train_file=None, train_dir=None, separator=" ", classifiers=None):
"""Description of create_models
Generate models for train data for different clf
In order to test later
..notes::
train_file must be formatted like:
item_name_1,feat1,feat2,...,featN,tag_or_class
item_name_2,feat1,feat2,...,featN,tag_or_class
...
item_name_N,feat1,feat2,...,featN,tag_or_class
..todo::
Manage when Provide train feat and gts or train_file
Find why commented clf cannot be used
pour train dir = /media/sf_github/yann/train/
20h04m49s Creating models
20h04m49s Reading multiple train files
20h05m04s Starting SVM
20h05m07s Starting RandomForest
20h05m11s Starting GradientBoosting
20h05m16s Starting DecisionTree
20h05m22s Starting ExtraTrees
20h05m27s Starting AdaBoost
20h05m34s Starting KNeighbors
20h05m50s KNeighbors done in 60836ms
20h06m18s ExtraTrees done in 89147ms
20h06m29s DecisionTree done in 100211ms
20h07m05s RandomForest done in 135939ms
20h08m56s AdaBoost done in 246550ms
20h13m40s GradientBoosting done in 530909ms
00h43m29s SVM done in 16719954ms
"""
utils.print_success("Creating models")
outdir = utils.abs_path_dir(outdir) + "/"
if train_file is not None:
features, groundtruths = read_train_file(train_file)
elif train_dir is not None:
features, groundtruths = read_train_files(train_dir, separator=separator)
else:
utils.print_warning("TODO Manage train feat and gts")
if classifiers is None:
classifiers = {
"RandomForest": RandomForestClassifier(),
"LogisticRegression":LogisticRegression(),
"KNeighbors":KNeighborsClassifier(),
"DecisionTree":DecisionTreeClassifier(),
"AdaBoost":AdaBoostClassifier(),
"GradientBoosting":GradientBoostingClassifier(),
"ExtraTrees":ExtraTreesClassifier(),
"SVM":SVC(kernel="linear", C=0.025, probability=True)
# "GaussianProcess":GaussianProcessClassifier(),
# "MLP":MLPClassifier(),
# "GaussianNB":GaussianNB(),
# "QDA":QuadraticDiscriminantAnalysis(),
# "LinearDiscriminantAnalysis":LinearDiscriminantAnalysis()
}
else:
if "RandomForest" in classifiers:
clf_name = "RandomForest"
begin = int(round(time.time() * 1000))
utils.print_success("Starting " + clf_name)
clf_dir = outdir + clf_name + "/"
utils.create_dir(clf_dir)
clf = RandomForestClassifier(n_jobs=-1)
# clf = RandomForestClassifier(verbose=100)
clf.fit(features, groundtruths)
joblib.dump(clf, clf_dir + clf_name + ".pkl")
utils.print_info(clf_name + " done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
# # Parallel computing
# clf = []
# for key in classifiers:
# clf.append(key)
# partial_create_model = partial(create_model, features=features, groundtruths=groundtruths, outdir=outdir, classifiers=classifiers)
# # pool = multiprocessing.Pool(4)
# pool = multiprocessing.Pool(len(classifiers))
# pool.map(partial_create_model, clf) #make our results with a map call
# pool.close() #we are not adding any more processes
# pool.join() #tell it to wait until all threads are done before going on
def read_test_file(filename):
"""
Read ONE test file with content like:
feat1 feat2 ... featN
feat1 feat2 ... featN
...
feat1 feat2 ... featN
"""
features = []
filename = utils.abs_path_file(filename)
with open(filename, "r") as filep:
for line in filep:
line = line.split(" ")
line[-1] = line[-1][:-1]
feat = []
for tmp_feat in line:
feat.append(float(tmp_feat))
features.append(feat)
return features
def column(matrix, i):
return [row[i] for row in matrix]
def test_models(models_dir, test_dir, out_dir):
models_dir = utils.abs_path_dir(models_dir) + "/"
test_dir = utils.abs_path_dir(test_dir) + "/"
utils.create_dir(out_dir)
test_files = os.listdir(test_dir)
models = os.listdir(models_dir)
for model in models:
utils.print_success(model)
pred_dir = out_dir + model + "/"
utils.create_dir(pred_dir)
clf = joblib.load(models_dir + model + "/" + model + ".pkl")
for index, test_file in enumerate(test_files):
print(str(index) + "\t" + test_file)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
test_features = read_test_file(test_dir + test_file)
predictions = clf.predict_proba(test_features)
with open(pred_dir + test_file, "w") as filep:
for pred in predictions:
filep.write(str(pred[0]) + "\n")
sys.stdout.write("\033[K")
def test_model(model, models_dir, test_dir, out_dir, test_files=None, test_file=None):
"""Description of test_model
Use one model previously fitted in order to predict_proba() or predict()
the tag for a bunch of test_files
..todo::
To enhance computation time: only compute file which are in groundtruths
if file already computed, do not recompute
"""
begin = int(round(time.time() * 1000))
utils.print_success("Testing " + model)
pred_dir = out_dir + model
clf = joblib.load(models_dir + model + "/" + model + ".pkl")
if test_files is not None:
pred_dir = pred_dir + "/"
utils.create_dir(pred_dir)
for index, test_file in enumerate(test_files):
# Check if isrc is in groundtruths to speed up computation time
if test_file[:12] in groundtruths:
test_features = read_test_file(test_dir + test_file)
try:
predictions = clf.predict_proba(test_features)
except AttributeError:
utils.print_warning("predict_proba does not exists for " + model + "\nRegular predict function is used.")
predictions = clf.predict(test_features)
with open(pred_dir + test_file, "w") as filep:
for pred in predictions:
filep.write(str(pred[0]) + "\n")
elif test_file is not None:
pred_dir = pred_dir + "_"
test_features = []
filename = []
with open(test_file, "r") as filep:
for index, line in enumerate(filep):
line = line.split(",")
# print(str(index) + " " + line[0])
test_features.append(line[1:-1])
filename.append(line[0])
try:
predictions = clf.predict_proba(test_features)
with open(pred_dir + "predict_proba.csv", "a") as filep2:
for filen, pred in zip(filename, predictions):
filep2.write(filen + "," + str(pred[0]) + "\n")
except:
pass
predictions = clf.predict(test_features)
with open(pred_dir + "predict.csv", "a") as filep2:
for filen, pred in zip(filename, predictions):
filep2.write(filen + "," + str(pred[0]) + "\n")
else:
utils.print_error("Error in arg for test_model() function")
utils.print_info(model + " done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
def test_models_parallel(models_dir, out_dir, test_dir=None, test_file=None):
"""Description of test_models_parallel
17h16m12s DecisionTree done in 16135373ms
17h25m08s GradientBoosting done in 16671109ms
18h59m05s RandomForest done in 22307811ms
18h59m07s AdaBoost done in 22310633ms
19h18m12s ExtraTrees done in 23455779ms
"""
models_dir = utils.abs_path_dir(models_dir) + "/"
models = os.listdir(models_dir)
utils.create_dir(out_dir)
if test_dir is not None:
test_dir = utils.abs_path_dir(test_dir) + "/"
test_files = os.listdir(test_dir)
test_file = None
elif test_file is not None:
test_files = None
else:
utils.print_warning("TODO Error in arg for test_models_parallel() function")
partial_test_model = partial(test_model, models_dir=models_dir, test_dir=test_dir, out_dir=out_dir, test_files=test_files, test_file=test_file)
pool = multiprocessing.Pool(len(models))
pool.map(partial_test_model, models) #make our results with a map call
pool.close() #we are not adding any more processes
pool.join() #tell it to wait until all threads are done before going on
def cross_validation(train_filename, n_folds, outfilename):
filename = utils.abs_path_file(train_filename)
features = []
groundtruths = []
with open(filename, "r") as filep:
for line in filep:
line = line.split(",")
features.append([float(x) for x in line[1:-1]])
groundtruths.append(line[-1][:-1])
features = np.array(features)
groundtruths = np.array(groundtruths)
# Init
# if os.path.exists(outfilename):
try:
with open(outfilename, "r") as filep:
data = json.load(filep)
except:
data = {}
# else:
# data = {}
algo_name = "Method 1"
data[algo_name] = {}
data[algo_name]["uneven"] = {}
data[algo_name]["balanced"] = {}
for distribution in data[algo_name]:
data[algo_name][distribution]["precision"] = {}
data[algo_name][distribution]["recall"] = {}
data[algo_name][distribution]["f1"] = {}
for tmp in data[algo_name][distribution]:
data[algo_name][distribution][tmp]["instru"] = []
data[algo_name][distribution][tmp]["song"] = []
skf = StratifiedKFold(n_splits=n_folds)
for i in range(0, 10):
utils.print_warning("TODO for i in range")
song_precis = []
song_recall = []
song_fmeasu = []
inst_precis = []
inst_recall = []
inst_fmeasu = []
cur_fold = 0
for train, test in skf.split(features, groundtruths):
cur_fold += 1
utils.print_success("Iteration " + str(i) + "\tFold " + str(cur_fold))
dataset = {}
dataset["train_features"] = features[train]
dataset["train_groundtruths"] = groundtruths[train]
dataset["test_features"] = features[test]
dataset["test_groundtruths"] = groundtruths[test]
predictions = classify(data=dataset)
song_precis.append(precision_score(dataset["test_groundtruths"], predictions, average=None)[1])
song_recall.append(recall_score(dataset["test_groundtruths"], predictions, average=None)[1])
song_fmeasu.append(f1_score(dataset["test_groundtruths"], predictions, average=None)[1])
inst_precis.append(precision_score(dataset["test_groundtruths"], predictions, average=None)[0])
inst_recall.append(recall_score(dataset["test_groundtruths"], predictions, average=None)[0])
inst_fmeasu.append(f1_score(dataset["test_groundtruths"], predictions, average=None)[0])
song_precis = sum(song_precis) / float(len(song_precis))
song_recall = sum(song_recall) / float(len(song_recall))
song_fmeasu = sum(song_fmeasu) / float(len(song_fmeasu))
inst_precis = sum(inst_precis) / float(len(inst_precis))
inst_recall = sum(inst_recall) / float(len(inst_recall))
inst_fmeasu = sum(inst_fmeasu) / float(len(inst_fmeasu))
# Song
data[algo_name]["balanced"]["precision"]["song"].append(song_precis)
data[algo_name]["balanced"]["recall"]["song"].append(song_recall)
data[algo_name]["balanced"]["f1"]["song"].append(song_fmeasu)
# Instru
data[algo_name]["balanced"]["precision"]["instru"].append(inst_precis)
data[algo_name]["balanced"]["recall"]["instru"].append(inst_recall)
data[algo_name]["balanced"]["f1"]["instru"].append(inst_fmeasu)
with open(outfilename, "w") as outfile:
json.dump(data, outfile, indent=2)
def split(features, groundtruths, n_split):
"""Description of split
1 tmp array containing all item for each tag
2 random split of array for each tag
..todo::
manage possible errors
randomize split selection
"""
if n_split == 1:
return features, groundtruths
tags = list(set(groundtruths))
new_index = {}
for tag in tags:
new_index[tag] = []
for index, gt in enumerate(groundtruths):
new_index[gt].append(index)
new_feats = []
new_gts = []
for i in range(0, n_split):
indexes = []
for tag in tags:
ref = len(new_index[tag])/n_split
indexes.append(new_index[tag][ref*i:ref*(i+1)])
"""
..todo:: manage multiple tags!
"""
indexes = indexes[0] + indexes[1]
# print(features[:5])
# print(len(indexes))
# print(len(indexes[0]))
# print(len(indexes[1]))
# sys.exit()
indexes.sort()
new_gts.append([groundtruths[j] for j in indexes])
new_feats.append([features[j] for j in indexes])
return new_feats, new_gts
def increasing_test(groundtruths_file, predictions_file, metric, tag):
gts = read_item_tag(groundtruths_file)
preds = read_item_tag(predictions_file)
test_groundtruths = []
predictions = []
for isrc in preds:
if isrc in gts:
test_groundtruths.append(gts[isrc])
predictions.append(preds[isrc])
res = []
if "accuracy" in metric:
res.append(accuracy_score(test_groundtruths, predictions))
elif "precision" in metric:
res.append(precision_score(test_groundtruths, predictions, average=None)[tag])
elif "recall" in metric:
res.append(recall_score(test_groundtruths, predictions, average=None)[tag])
elif "f1_score" in metric:
res.append(f1_score(test_groundtruths, predictions, average=None)[tag])
else:
utils.print_error("classify.py line 735 metric argument error")
# print("Accuracy : " + str(accuracy_score(test_groundtruths, predictions)))
# print("Precision: " + str(precision_score(test_groundtruths, predictions, average=None)))
# print("Recall : " + str(recall_score(test_groundtruths, predictions, average=None)))
# print("F-score : " + str(f1_score(test_groundtruths, predictions, average=None)))
n_splits = 10
# for n_split in range(2, n_splits+1):
for n_split in [2, 10, 100]:
print("\t" + str(n_split))
feats_array, gts_array = split(predictions, test_groundtruths, n_split)
tmp_acc = []
for feats, gts in zip(feats_array, gts_array):
if "accuracy" in metric:
cur_acc = accuracy_score(gts, feats)
elif "precision" in metric:
cur_acc = precision_score(gts, feats, average=None)[tag]
elif "recall" in metric:
cur_acc = recall_score(gts, feats, average=None)[tag]
elif "f1_score" in metric:
cur_acc = f1_score(gts, feats, average=None)[tag]
tmp_acc.append(cur_acc)
print("\t\t" + str(stdev(tmp_acc)))
accuracy = sum(tmp_acc) / float(len(tmp_acc))
res.append(accuracy)
return res
def growing_testset(train_filename, test_filename, clf, clf_name=None):
"""Description of growing_testset
1 Generate accuracy graph for global
2 Create precision / recall / f-measure figures for each tag
..todo::
intermediate file which stores predictions for each ISRC
param for number of steps
repet N times
division problem ! it does N N/2 ... N/10 but we want :
1*N/10 2*N/10 ... 10*N/10
"""
train_features, train_groundtruths = read_file(train_filename)
test_features, test_groundtruths = read_file(test_filename)
if clf_name is not None and "RANSAC" in clf_name:
train_groundtruths = [True if i =="s" else False for i in train_groundtruths]
test_groundtruths = [True if i =="s" else False for i in test_groundtruths]
clf.fit(train_features, train_groundtruths)
if clf_name is not None and "RANSAC" in clf_name:
preds_float = clf.predict(test_features)
predictions = [True if i > 0.5 else False for i in preds_float]
else:
predictions = clf.predict(test_features)
test_acc = []
# test_acc.append(accuracy_score(test_groundtruths, predictions))
test_acc.append(precision_score(test_groundtruths, predictions, average=None)[0])
print("Accuracy : " + str(test_acc))
print("Precision: " + str(precision_score(test_groundtruths, predictions, average=None)))
print("Recall : " + str(recall_score(test_groundtruths, predictions, average=None)))
print("F-score : " + str(f1_score(test_groundtruths, predictions, average=None)))
n_splits = 10
for n_split in range(2, n_splits+1):
print(n_split)
feats_array, gts_array = split(test_features, test_groundtruths, n_split)
tmp_acc = []
for feats, gts in zip(feats_array, gts_array):
if clf_name is not None and "RANSAC" in clf_name:
preds_float = clf.predict(feats)
predictions = [True if i > 0.5 else False for i in preds_float]
else:
predictions = clf.predict(feats)
# cur_acc = accuracy_score(gts, predictions)
cur_acc = precision_score(gts, predictions, average=None)[0]
tmp_acc.append(cur_acc)
print("\t" + str(cur_acc))
accuracy = sum(tmp_acc) / float(len(tmp_acc))
test_acc.append(accuracy)
return test_acc
def plot_roc(indir, gts_file, outdir):
groundtruths = read_item_tag(gts_file)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--', label="Random (0.5)")
indir = utils.abs_path_dir(indir)
for item in os.listdir(indir):
if ".csv" in item:
isrcs = read_preds(indir + "/" + item)
test_groundtruths = []
predictions = []
for isrc in isrcs:
if isrc in groundtruths:
test_groundtruths.append(groundtruths[isrc])
predictions.append(isrcs[isrc])
test_groundtruths = [tag=="s" for tag in test_groundtruths]
fpr_rf, tpr_rf, _ = roc_curve(test_groundtruths, predictions)
label = item[:-4] + " (" + str(round(roc_auc_score(test_groundtruths, predictions), 3)) + ")"
color = ""
if "VQMM" in item:
color = "ro"
elif "SVMBFF" in item:
color = "g-"
elif "GA" in item:
color = "b:"
plt.plot(fpr_rf, tpr_rf, color, label=label)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
# plt.title('ROC curve for Algo (AUC)')
plt.legend(loc='best')
outdir = utils.abs_path_dir(outdir)
roc_fn = outdir + "Figure_3_ROC.png"
plt.savefig(roc_fn, dpi=200, bbox_inches="tight")
plt.savefig(outdir + "Figure_3_ROC.eps")
# plt.show()
plt.close()
utils.print_success("ROC curve successfully created in " + roc_fn)
def plot_precision_recall(indir, gts_file, outdir):
groundtruths = read_item_tag(gts_file)
plt.figure(1)
indir = utils.abs_path_dir(indir)
for item in os.listdir(indir):
if ".csv" in item:
isrcs = read_preds(indir + "/" + item)
test_groundtruths = []
predictions = []
for isrc in isrcs:
if isrc in groundtruths:
test_groundtruths.append(groundtruths[isrc])
predictions.append(isrcs[isrc])
test_groundtruths = [tag=="s" for tag in test_groundtruths]
precision, recall, _ = precision_recall_curve(test_groundtruths, predictions)
plt.plot(recall, precision, label=item[:-4] + " (" + str(round(average_precision_score(test_groundtruths, predictions), 3)) + ")")
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([-0.05, 1.05])
plt.title('Precision-Recall curve for Algo (AUC)')
plt.legend(loc='best')
plt.savefig(outdir + "precision_recall.png", dpi=200, bbox_inches="tight")
# plt.show()
plt.close()
utils.print_success("Precision-Recall curve created in " + outdir)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description="Compare classifiers")
PARSER.add_argument(
"--train",
help="path to train file",
type=str,
default="data/proba_hist_train.csv",
metavar="train")
PARSER.add_argument(
"--test",
help="path to test file",
type=str,
default="data/proba_hist_test.csv",
metavar="test")
PARSER.add_argument(
"-o",
"--outdir",
help="path to output directory",
type=str,
default="res/",
metavar="outdir")
plot_roc("roc_curve/")
# plot_precision_recall("/media/sf_github/classifiers/roc_curve/")
# # models_dir = "models_paral/"
# # utils.create_dir(models_dir)
# # train_file_1 = "/media/sf_DATA/Datasets/Simbals/yann/train.csv"
# # train_dir_1 = "/media/sf_github/yann/train/"
# # create_models(train_file=train_file_1)
# # create_models(outdir=models_dir, train_dir=train_dir_1)
# # test_models_parallel(models_dir, "/media/sf_DATA/Datasets/Simbals/yaafe/results/processed/", "/media/sf_DATA/Datasets/Simbals/yaafe/proba_preds/")
# # classify(PARSER.parse_args().train, PARSER.parse_args().test, PARSER.parse_args().outdir)
# # precision_100percent(PARSER.parse_args().train, PARSER.parse_args().test)
# # plot_clf()
# """
# Samedi 26 Novembre 2016 test finaux pour mon algo
# demandé par <NAME> Matthias
# """
# train_file = "/media/sf_github/yann/2_local_predictions/method_3_trainset_normalized.txt"
# models_dir = "final_models/"
# utils.create_dir(models_dir)
# # create_models(outdir=models_dir, train_file=train_file)
# out_dir = "/media/sf_DATA/Datasets/Simbals/yann/algo_final/"
# utils.create_dir(out_dir)
# test_file="/media/sf_github/yann/2_local_predictions/method_3_testset_normalized_with_tag.txt"
# # test_models_parallel(
# # models_dir=models_dir,
# # test_file=test_file,
# # out_dir=out_dir)
# test_features = []
# isrc_order = []
# utils.print_info("Loading clf")
# clf = joblib.load("/media/sf_github/classifiers/final_modelsRandomForest/RandomForest.pkl")
# with open(test_file, "r") as filep:
# for index, line in enumerate(filep):
# line = line.split(",")
# utils.print_info(str(index) + "\t" + line[0])
# test_features.append(line[1:-1])
# isrc_order.append(line[0])
# utils.print_info("Predict_proba")
# predictions = clf.predict(test_features)
# # predictions = clf.predict_proba(test_features)
# utils.print_info("Writing results")
# with open("/media/sf_DATA/Datasets/Simbals/yann/algo_final/RF.txt" , "w") as filep2:
# for index, pred in enumerate(predictions):
# filep2.write(isrc_order[index] + "," + str(pred[0]) + "\n")
# utils.print_info("Done")
# test_groundtruths = {}
# with open("/media/sf_github/repro/groundtruths.csv", "r") as filep:
# for row in filep:
# line = row.split(",")
# test_groundtruths[line[0]] = line[1][:-1]
# for i in np.arange(0.1, 1.0, 0.1):
# outfile = open("results/Bayle2_"+str(i)+".csv", "w")
# utils.print_progress_start(str(i))
# with open("/media/sf_DATA/Datasets/Simbals/yann/algo_final/RFproba.txt", "r") as filep:
# for line in filep:
# line = line.split(",")
# if line[0] in test_groundtruths:
# if float(line[-1][:-1]) > i:
# prediction = "i"
# else:
# prediction = "s"
# outfile.write(line[0] + "," + prediction + "\n")
# utils.print_progress_end()
# outfile.close()
# # groundtruths = []
# # predictions = []
# outfile = open("results/Bayle.csv", "w")
# with open("/media/sf_DATA/Datasets/Simbals/yann/algo_final/RF.txt", "r") as filep:
# for line in filep:
# line = line.split(",")
# if line[0] in test_groundtruths:
# outfile.write(line[0] + "," + line[-1][:-1] + "\n")
# # groundtruths.append(test_groundtruths[line[0]])
# # predictions.append(line[-1][:-1])
# outfile.close()
# # utils.scores("bayle", predictions, groundtruths)
|
[
"sys.stdout.write",
"matplotlib.pyplot.title",
"utils.print_info",
"argparse.ArgumentParser",
"sklearn.metrics.accuracy_score",
"joblib.dump",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.figure",
"sklearn.metrics.f1_score",
"numpy.arange",
"matplotlib.pyplot.gca",
"sklearn.svm.SVC",
"matplotlib.pyplot.tight_layout",
"utils.create_dir",
"matplotlib.pyplot.close",
"utils.abs_path_dir",
"sklearn.ensemble.ExtraTreesClassifier",
"sklearn.metrics.average_precision_score",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"sklearn.ensemble.RandomForestClassifier",
"functools.partial",
"json.dump",
"sklearn.ensemble.AdaBoostClassifier",
"matplotlib.pyplot.ylim",
"sklearn.utils.testing.all_estimators",
"utils.print_error",
"matplotlib.pyplot.legend",
"statistics.stdev",
"utils.print_success",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_recall_curve",
"sklearn.linear_model.LogisticRegression",
"utils.print_warning",
"utils.abs_path_file",
"matplotlib.pyplot.ylabel",
"os.listdir",
"matplotlib.pyplot.xlim",
"json.load",
"matplotlib.pyplot.plot",
"sklearn.metrics.roc_curve",
"time.time",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"sklearn.metrics.precision_score",
"joblib.load",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((3293, 3309), 'sklearn.utils.testing.all_estimators', 'all_estimators', ([], {}), '()\n', (3307, 3309), False, 'from sklearn.utils.testing import all_estimators\n'), ((3757, 3786), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3769, 3786), True, 'import matplotlib.pyplot as plt\n'), ((3800, 3819), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (3809, 3819), True, 'import numpy as np\n'), ((4270, 4297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scores (in %)"""'], {}), "('Scores (in %)')\n", (4280, 4297), True, 'import matplotlib.pyplot as plt\n'), ((4302, 4388), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(index + bar_width * ind + offset)', "('Precision', 'Recall', 'F-Measure')"], {}), "(index + bar_width * ind + offset, ('Precision', 'Recall',\n 'F-Measure'))\n", (4312, 4388), True, 'import matplotlib.pyplot as plt\n'), ((4387, 4399), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4397, 4399), True, 'import matplotlib.pyplot as plt\n'), ((4404, 4418), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4412, 4418), True, 'import matplotlib.pyplot as plt\n'), ((4449, 4458), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4456, 4458), True, 'import matplotlib.pyplot as plt\n'), ((4814, 4832), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4830, 4832), True, 'import matplotlib.pyplot as plt\n'), ((4865, 4940), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_name'], {'dpi': '(200)', 'additional_artists': 'art', 'bbox_inches': '"""tight"""'}), "(img_name, dpi=200, additional_artists=art, bbox_inches='tight')\n", (4876, 4940), True, 'import matplotlib.pyplot as plt\n'), ((5140, 5169), 'utils.abs_path_file', 'utils.abs_path_file', (['filename'], {}), '(filename)\n', (5159, 5169), False, 'import utils\n'), ((5559, 5588), 'utils.abs_path_file', 'utils.abs_path_file', (['filename'], {}), '(filename)\n', (5578, 5588), False, 'import utils\n'), ((5962, 5991), 'utils.abs_path_file', 'utils.abs_path_file', (['filename'], {}), '(filename)\n', (5981, 5991), False, 'import utils\n'), ((6470, 6536), 'utils.print_success', 'utils.print_success', (['"""Find Recall for best Precision for each tag"""'], {}), "('Find Recall for best Precision for each tag')\n", (6489, 6536), False, 'import utils\n'), ((6549, 6575), 'utils.abs_path_file', 'utils.abs_path_file', (['train'], {}), '(train)\n', (6568, 6575), False, 'import utils\n'), ((6587, 6612), 'utils.abs_path_file', 'utils.abs_path_file', (['test'], {}), '(test)\n', (6606, 6612), False, 'import utils\n'), ((9069, 9128), 'utils.print_success', 'utils.print_success', (['"""Comparison of differents classifiers"""'], {}), "('Comparison of differents classifiers')\n", (9088, 9128), False, 'import utils\n'), ((9603, 9628), 'utils.create_dir', 'utils.create_dir', (['res_dir'], {}), '(res_dir)\n', (9619, 9628), False, 'import utils\n'), ((11438, 11497), 'utils.print_success', 'utils.print_success', (['"""Comparison of differents classifiers"""'], {}), "('Comparison of differents classifiers')\n", (11457, 11497), False, 'import utils\n'), ((14585, 14636), 'utils.print_success', 'utils.print_success', (['"""Reading multiple train files"""'], {}), "('Reading multiple train files')\n", (14604, 14636), False, 'import utils\n'), ((15352, 15378), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[K"""'], {}), "('\\x1b[K')\n", (15368, 15378), False, 'import sys\n'), ((15553, 15582), 'utils.abs_path_file', 'utils.abs_path_file', (['filename'], {}), '(filename)\n', (15572, 15582), False, 'import utils\n'), ((15926, 15969), 'utils.print_success', 'utils.print_success', (["('Starting ' + clf_name)"], {}), "('Starting ' + clf_name)\n", (15945, 15969), False, 'import utils\n'), ((16012, 16037), 'utils.create_dir', 'utils.create_dir', (['clf_dir'], {}), '(clf_dir)\n', (16028, 16037), False, 'import utils\n'), ((16110, 16155), 'joblib.dump', 'joblib.dump', (['clf', "(clf_dir + clf_name + '.pkl')"], {}), "(clf, clf_dir + clf_name + '.pkl')\n", (16121, 16155), False, 'import joblib\n'), ((17583, 17621), 'utils.print_success', 'utils.print_success', (['"""Creating models"""'], {}), "('Creating models')\n", (17602, 17621), False, 'import utils\n'), ((20069, 20098), 'utils.abs_path_file', 'utils.abs_path_file', (['filename'], {}), '(filename)\n', (20088, 20098), False, 'import utils\n'), ((20611, 20636), 'utils.create_dir', 'utils.create_dir', (['out_dir'], {}), '(out_dir)\n', (20627, 20636), False, 'import utils\n'), ((20654, 20674), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (20664, 20674), False, 'import os\n'), ((20688, 20710), 'os.listdir', 'os.listdir', (['models_dir'], {}), '(models_dir)\n', (20698, 20710), False, 'import os\n'), ((21847, 21886), 'utils.print_success', 'utils.print_success', (["('Testing ' + model)"], {}), "('Testing ' + model)\n", (21866, 21886), False, 'import utils\n'), ((21928, 21982), 'joblib.load', 'joblib.load', (["(models_dir + model + '/' + model + '.pkl')"], {}), "(models_dir + model + '/' + model + '.pkl')\n", (21939, 21982), False, 'import joblib\n'), ((24336, 24358), 'os.listdir', 'os.listdir', (['models_dir'], {}), '(models_dir)\n', (24346, 24358), False, 'import os\n'), ((24363, 24388), 'utils.create_dir', 'utils.create_dir', (['out_dir'], {}), '(out_dir)\n', (24379, 24388), False, 'import utils\n'), ((24719, 24846), 'functools.partial', 'partial', (['test_model'], {'models_dir': 'models_dir', 'test_dir': 'test_dir', 'out_dir': 'out_dir', 'test_files': 'test_files', 'test_file': 'test_file'}), '(test_model, models_dir=models_dir, test_dir=test_dir, out_dir=\n out_dir, test_files=test_files, test_file=test_file)\n', (24726, 24846), False, 'from functools import partial\n'), ((25174, 25209), 'utils.abs_path_file', 'utils.abs_path_file', (['train_filename'], {}), '(train_filename)\n', (25193, 25209), False, 'import utils\n'), ((25473, 25491), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (25481, 25491), True, 'import numpy as np\n'), ((25511, 25533), 'numpy.array', 'np.array', (['groundtruths'], {}), '(groundtruths)\n', (25519, 25533), True, 'import numpy as np\n'), ((26250, 26283), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_folds'}), '(n_splits=n_folds)\n', (26265, 26283), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((34342, 34355), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (34352, 34355), True, 'import matplotlib.pyplot as plt\n'), ((34360, 34413), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'label': '"""Random (0.5)"""'}), "([0, 1], [0, 1], 'k--', label='Random (0.5)')\n", (34368, 34413), True, 'import matplotlib.pyplot as plt\n'), ((34431, 34456), 'utils.abs_path_dir', 'utils.abs_path_dir', (['indir'], {}), '(indir)\n', (34449, 34456), False, 'import utils\n'), ((34473, 34490), 'os.listdir', 'os.listdir', (['indir'], {}), '(indir)\n', (34483, 34490), False, 'import os\n'), ((35362, 35371), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35369, 35371), True, 'import matplotlib.pyplot as plt\n'), ((35538, 35571), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False positive rate"""'], {}), "('False positive rate')\n", (35548, 35571), True, 'import matplotlib.pyplot as plt\n'), ((35576, 35608), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True positive rate"""'], {}), "('True positive rate')\n", (35586, 35608), True, 'import matplotlib.pyplot as plt\n'), ((35657, 35679), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (35667, 35679), True, 'import matplotlib.pyplot as plt\n'), ((35693, 35719), 'utils.abs_path_dir', 'utils.abs_path_dir', (['outdir'], {}), '(outdir)\n', (35711, 35719), False, 'import utils\n'), ((35765, 35814), 'matplotlib.pyplot.savefig', 'plt.savefig', (['roc_fn'], {'dpi': '(200)', 'bbox_inches': '"""tight"""'}), "(roc_fn, dpi=200, bbox_inches='tight')\n", (35776, 35814), True, 'import matplotlib.pyplot as plt\n'), ((35819, 35859), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outdir + 'Figure_3_ROC.eps')"], {}), "(outdir + 'Figure_3_ROC.eps')\n", (35830, 35859), True, 'import matplotlib.pyplot as plt\n'), ((35881, 35892), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (35890, 35892), True, 'import matplotlib.pyplot as plt\n'), ((35897, 35963), 'utils.print_success', 'utils.print_success', (["('ROC curve successfully created in ' + roc_fn)"], {}), "('ROC curve successfully created in ' + roc_fn)\n", (35916, 35963), False, 'import utils\n'), ((36064, 36077), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (36074, 36077), True, 'import matplotlib.pyplot as plt\n'), ((36095, 36120), 'utils.abs_path_dir', 'utils.abs_path_dir', (['indir'], {}), '(indir)\n', (36113, 36120), False, 'import utils\n'), ((36137, 36154), 'os.listdir', 'os.listdir', (['indir'], {}), '(indir)\n', (36147, 36154), False, 'import os\n'), ((36797, 36817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (36807, 36817), True, 'import matplotlib.pyplot as plt\n'), ((36822, 36845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (36832, 36845), True, 'import matplotlib.pyplot as plt\n'), ((36850, 36871), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (36858, 36871), True, 'import matplotlib.pyplot as plt\n'), ((36876, 36899), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (36884, 36899), True, 'import matplotlib.pyplot as plt\n'), ((36904, 36954), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision-Recall curve for Algo (AUC)"""'], {}), "('Precision-Recall curve for Algo (AUC)')\n", (36913, 36954), True, 'import matplotlib.pyplot as plt\n'), ((36959, 36981), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (36969, 36981), True, 'import matplotlib.pyplot as plt\n'), ((36986, 37060), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outdir + 'precision_recall.png')"], {'dpi': '(200)', 'bbox_inches': '"""tight"""'}), "(outdir + 'precision_recall.png', dpi=200, bbox_inches='tight')\n", (36997, 37060), True, 'import matplotlib.pyplot as plt\n'), ((37082, 37093), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (37091, 37093), True, 'import matplotlib.pyplot as plt\n'), ((37098, 37164), 'utils.print_success', 'utils.print_success', (["('Precision-Recall curve created in ' + outdir)"], {}), "('Precision-Recall curve created in ' + outdir)\n", (37117, 37164), False, 'import utils\n'), ((37206, 37264), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compare classifiers"""'}), "(description='Compare classifiers')\n", (37229, 37264), False, 'import argparse\n'), ((3457, 3482), 'utils.abs_path_dir', 'utils.abs_path_dir', (['indir'], {}), '(indir)\n', (3475, 3482), False, 'import utils\n'), ((6836, 6860), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (6858, 6860), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((7276, 7309), 'utils.print_success', 'utils.print_success', (["('Tag ' + tag)"], {}), "('Tag ' + tag)\n", (7295, 7309), False, 'import utils\n'), ((8565, 8591), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[K"""'], {}), "('\\x1b[K')\n", (8581, 8591), False, 'import sys\n'), ((9544, 9588), 'utils.print_error', 'utils.print_error', (['"""No valid data provided."""'], {}), "('No valid data provided.')\n", (9561, 9588), False, 'import utils\n'), ((9736, 9759), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', (['(1)'], {}), '(1)\n', (9756, 9759), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((10389, 10413), 'utils.print_success', 'utils.print_success', (['key'], {}), '(key)\n', (10408, 10413), False, 'import utils\n'), ((10453, 10478), 'utils.print_info', 'utils.print_info', (['"""\tFit"""'], {}), "('\\tFit')\n", (10469, 10478), False, 'import utils\n'), ((10539, 10568), 'utils.print_info', 'utils.print_info', (['"""\tPredict"""'], {}), "('\\tPredict')\n", (10555, 10568), False, 'import utils\n'), ((11753, 11779), 'utils.abs_path_file', 'utils.abs_path_file', (['train'], {}), '(train)\n', (11772, 11779), False, 'import utils\n'), ((11795, 11820), 'utils.abs_path_file', 'utils.abs_path_file', (['test'], {}), '(test)\n', (11814, 11820), False, 'import utils\n'), ((11953, 11978), 'utils.create_dir', 'utils.create_dir', (['res_dir'], {}), '(res_dir)\n', (11969, 11978), False, 'import utils\n'), ((11998, 12025), 'utils.abs_path_dir', 'utils.abs_path_dir', (['res_dir'], {}), '(res_dir)\n', (12016, 12025), False, 'import utils\n'), ((12070, 12103), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (12092, 12103), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier\n'), ((12846, 12870), 'utils.print_success', 'utils.print_success', (['key'], {}), '(key)\n', (12865, 12870), False, 'import utils\n'), ((12910, 12935), 'utils.print_info', 'utils.print_info', (['"""\tFit"""'], {}), "('\\tFit')\n", (12926, 12935), False, 'import utils\n'), ((12996, 13025), 'utils.print_info', 'utils.print_info', (['"""\tPredict"""'], {}), "('\\tPredict')\n", (13012, 13025), False, 'import utils\n'), ((14649, 14674), 'utils.abs_path_dir', 'utils.abs_path_dir', (['indir'], {}), '(indir)\n', (14667, 14674), False, 'import utils\n'), ((15006, 15032), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[F"""'], {}), "('\\x1b[F')\n", (15022, 15032), False, 'import sys\n'), ((15069, 15095), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[K"""'], {}), "('\\x1b[K')\n", (15085, 15095), False, 'import sys\n'), ((17636, 17662), 'utils.abs_path_dir', 'utils.abs_path_dir', (['outdir'], {}), '(outdir)\n', (17654, 17662), False, 'import utils\n'), ((20520, 20550), 'utils.abs_path_dir', 'utils.abs_path_dir', (['models_dir'], {}), '(models_dir)\n', (20538, 20550), False, 'import utils\n'), ((20572, 20600), 'utils.abs_path_dir', 'utils.abs_path_dir', (['test_dir'], {}), '(test_dir)\n', (20590, 20600), False, 'import utils\n'), ((20744, 20770), 'utils.print_success', 'utils.print_success', (['model'], {}), '(model)\n', (20763, 20770), False, 'import utils\n'), ((20820, 20846), 'utils.create_dir', 'utils.create_dir', (['pred_dir'], {}), '(pred_dir)\n', (20836, 20846), False, 'import utils\n'), ((20861, 20915), 'joblib.load', 'joblib.load', (["(models_dir + model + '/' + model + '.pkl')"], {}), "(models_dir + model + '/' + model + '.pkl')\n", (20872, 20915), False, 'import joblib\n'), ((21383, 21409), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[K"""'], {}), "('\\x1b[K')\n", (21399, 21409), False, 'import sys\n'), ((22056, 22082), 'utils.create_dir', 'utils.create_dir', (['pred_dir'], {}), '(pred_dir)\n', (22072, 22082), False, 'import utils\n'), ((24286, 24316), 'utils.abs_path_dir', 'utils.abs_path_dir', (['models_dir'], {}), '(models_dir)\n', (24304, 24316), False, 'import utils\n'), ((24494, 24514), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (24504, 24514), False, 'import os\n'), ((26319, 26361), 'utils.print_warning', 'utils.print_warning', (['"""TODO for i in range"""'], {}), "('TODO for i in range')\n", (26338, 26361), False, 'import utils\n'), ((28568, 28602), 'json.dump', 'json.dump', (['data', 'outfile'], {'indent': '(2)'}), '(data, outfile, indent=2)\n', (28577, 28602), False, 'import json\n'), ((7640, 7665), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', 'step'], {}), '(0.0, 1.0, step)\n', (7649, 7665), True, 'import numpy as np\n'), ((13894, 13955), 'sklearn.metrics.precision_score', 'precision_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (13909, 13955), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((13977, 14035), 'sklearn.metrics.recall_score', 'recall_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (13989, 14035), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((14053, 14107), 'sklearn.metrics.f1_score', 'f1_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (14061, 14107), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((14786, 14803), 'os.listdir', 'os.listdir', (['indir'], {}), '(indir)\n', (14796, 14803), False, 'import os\n'), ((17894, 17947), 'utils.print_warning', 'utils.print_warning', (['"""TODO Manage train feat and gts"""'], {}), "('TODO Manage train feat and gts')\n", (17913, 17947), False, 'import utils\n'), ((18029, 18053), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (18051, 18053), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier\n'), ((18088, 18108), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (18106, 18108), False, 'from sklearn.linear_model import LogisticRegression\n'), ((18135, 18157), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (18155, 18157), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((18186, 18210), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (18208, 18210), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((18235, 18255), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {}), '()\n', (18253, 18255), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier\n'), ((18288, 18316), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (18314, 18316), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier\n'), ((18343, 18365), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {}), '()\n', (18363, 18365), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier\n'), ((18385, 18432), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'C': '(0.025)', 'probability': '(True)'}), "(kernel='linear', C=0.025, probability=True)\n", (18388, 18432), False, 'from sklearn.svm import SVC\n'), ((18861, 18904), 'utils.print_success', 'utils.print_success', (["('Starting ' + clf_name)"], {}), "('Starting ' + clf_name)\n", (18880, 18904), False, 'import utils\n'), ((18963, 18988), 'utils.create_dir', 'utils.create_dir', (['clf_dir'], {}), '(clf_dir)\n', (18979, 18988), False, 'import utils\n'), ((19007, 19040), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (19029, 19040), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier\n'), ((19153, 19198), 'joblib.dump', 'joblib.dump', (['clf', "(clf_dir + clf_name + '.pkl')"], {}), "(clf, clf_dir + clf_name + '.pkl')\n", (19164, 19198), False, 'import joblib\n'), ((21032, 21058), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[F"""'], {}), "('\\x1b[F')\n", (21048, 21058), False, 'import sys\n'), ((21071, 21097), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[K"""'], {}), "('\\x1b[K')\n", (21087, 21097), False, 'import sys\n'), ((23754, 23813), 'utils.print_error', 'utils.print_error', (['"""Error in arg for test_model() function"""'], {}), "('Error in arg for test_model() function')\n", (23771, 23813), False, 'import utils\n'), ((24438, 24466), 'utils.abs_path_dir', 'utils.abs_path_dir', (['test_dir'], {}), '(test_dir)\n', (24456, 24466), False, 'import utils\n'), ((24616, 24692), 'utils.print_warning', 'utils.print_warning', (['"""TODO Error in arg for test_models_parallel() function"""'], {}), "('TODO Error in arg for test_models_parallel() function')\n", (24635, 24692), False, 'import utils\n'), ((25665, 25681), 'json.load', 'json.load', (['filep'], {}), '(filep)\n', (25674, 25681), False, 'import json\n'), ((30150, 30196), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_groundtruths', 'predictions'], {}), '(test_groundtruths, predictions)\n', (30164, 30196), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((33064, 33125), 'sklearn.metrics.precision_score', 'precision_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (33079, 33125), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((34927, 34968), 'sklearn.metrics.roc_curve', 'roc_curve', (['test_groundtruths', 'predictions'], {}), '(test_groundtruths, predictions)\n', (34936, 34968), False, 'from sklearn.metrics import roc_curve\n'), ((35307, 35351), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr_rf', 'tpr_rf', 'color'], {'label': 'label'}), '(fpr_rf, tpr_rf, color, label=label)\n', (35315, 35351), True, 'import matplotlib.pyplot as plt\n'), ((36594, 36648), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['test_groundtruths', 'predictions'], {}), '(test_groundtruths, predictions)\n', (36616, 36648), False, 'from sklearn.metrics import precision_recall_curve, average_precision_score\n'), ((7755, 7781), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[F"""'], {}), "('\\x1b[F')\n", (7771, 7781), False, 'import sys\n'), ((7798, 7824), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[K"""'], {}), "('\\x1b[K')\n", (7814, 7824), False, 'import sys\n'), ((13350, 13417), 'sklearn.metrics.precision_score', 'precision_score', (['test_groundtruths', 'predictions'], {'average': '"""weighted"""'}), "(test_groundtruths, predictions, average='weighted')\n", (13365, 13417), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((13444, 13508), 'sklearn.metrics.recall_score', 'recall_score', (['test_groundtruths', 'predictions'], {'average': '"""weighted"""'}), "(test_groundtruths, predictions, average='weighted')\n", (13456, 13508), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((13535, 13595), 'sklearn.metrics.f1_score', 'f1_score', (['test_groundtruths', 'predictions'], {'average': '"""weighted"""'}), "(test_groundtruths, predictions, average='weighted')\n", (13543, 13595), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((15901, 15912), 'time.time', 'time.time', ([], {}), '()\n', (15910, 15912), False, 'import time\n'), ((21822, 21833), 'time.time', 'time.time', ([], {}), '()\n', (21831, 21833), False, 'import time\n'), ((31331, 31357), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['gts', 'feats'], {}), '(gts, feats)\n', (31345, 31357), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((33201, 33262), 'sklearn.metrics.precision_score', 'precision_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (33216, 33262), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((33295, 33353), 'sklearn.metrics.recall_score', 'recall_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (33307, 33353), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((33386, 33440), 'sklearn.metrics.f1_score', 'f1_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (33394, 33440), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((34021, 34068), 'sklearn.metrics.precision_score', 'precision_score', (['gts', 'predictions'], {'average': 'None'}), '(gts, predictions, average=None)\n', (34036, 34068), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((8042, 8103), 'sklearn.metrics.precision_score', 'precision_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (8057, 8103), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((10662, 10729), 'sklearn.metrics.precision_score', 'precision_score', (['test_groundtruths', 'predictions'], {'average': '"""weighted"""'}), "(test_groundtruths, predictions, average='weighted')\n", (10677, 10729), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((10772, 10836), 'sklearn.metrics.recall_score', 'recall_score', (['test_groundtruths', 'predictions'], {'average': '"""weighted"""'}), "(test_groundtruths, predictions, average='weighted')\n", (10784, 10836), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((10875, 10935), 'sklearn.metrics.f1_score', 'f1_score', (['test_groundtruths', 'predictions'], {'average': '"""weighted"""'}), "(test_groundtruths, predictions, average='weighted')\n", (10883, 10935), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((27046, 27118), 'sklearn.metrics.precision_score', 'precision_score', (["dataset['test_groundtruths']", 'predictions'], {'average': 'None'}), "(dataset['test_groundtruths'], predictions, average=None)\n", (27061, 27118), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((27154, 27223), 'sklearn.metrics.recall_score', 'recall_score', (["dataset['test_groundtruths']", 'predictions'], {'average': 'None'}), "(dataset['test_groundtruths'], predictions, average=None)\n", (27166, 27223), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((27259, 27324), 'sklearn.metrics.f1_score', 'f1_score', (["dataset['test_groundtruths']", 'predictions'], {'average': 'None'}), "(dataset['test_groundtruths'], predictions, average=None)\n", (27267, 27324), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((27360, 27432), 'sklearn.metrics.precision_score', 'precision_score', (["dataset['test_groundtruths']", 'predictions'], {'average': 'None'}), "(dataset['test_groundtruths'], predictions, average=None)\n", (27375, 27432), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((27468, 27537), 'sklearn.metrics.recall_score', 'recall_score', (["dataset['test_groundtruths']", 'predictions'], {'average': 'None'}), "(dataset['test_groundtruths'], predictions, average=None)\n", (27480, 27537), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((27573, 27638), 'sklearn.metrics.f1_score', 'f1_score', (["dataset['test_groundtruths']", 'predictions'], {'average': 'None'}), "(dataset['test_groundtruths'], predictions, average=None)\n", (27581, 27638), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((30249, 30310), 'sklearn.metrics.precision_score', 'precision_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (30264, 30310), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((30559, 30622), 'utils.print_error', 'utils.print_error', (['"""classify.py line 735 metric argument error"""'], {}), "('classify.py line 735 metric argument error')\n", (30576, 30622), False, 'import utils\n'), ((31746, 31760), 'statistics.stdev', 'stdev', (['tmp_acc'], {}), '(tmp_acc)\n', (31751, 31760), False, 'from statistics import stdev\n'), ((8187, 8245), 'sklearn.metrics.recall_score', 'recall_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (8199, 8245), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((8421, 8475), 'sklearn.metrics.f1_score', 'f1_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (8429, 8475), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((18828, 18839), 'time.time', 'time.time', ([], {}), '()\n', (18837, 18839), False, 'import time\n'), ((22477, 22589), 'utils.print_warning', 'utils.print_warning', (['(\'predict_proba does not exists for \' + model +\n """\nRegular predict function is used.""")'], {}), '(\'predict_proba does not exists for \' + model +\n """\nRegular predict function is used.""")\n', (22496, 22589), False, 'import utils\n'), ((30365, 30423), 'sklearn.metrics.recall_score', 'recall_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (30377, 30423), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((31424, 31465), 'sklearn.metrics.precision_score', 'precision_score', (['gts', 'feats'], {'average': 'None'}), '(gts, feats, average=None)\n', (31439, 31465), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((30480, 30534), 'sklearn.metrics.f1_score', 'f1_score', (['test_groundtruths', 'predictions'], {'average': 'None'}), '(test_groundtruths, predictions, average=None)\n', (30488, 30534), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((31534, 31572), 'sklearn.metrics.recall_score', 'recall_score', (['gts', 'feats'], {'average': 'None'}), '(gts, feats, average=None)\n', (31546, 31572), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((35018, 35063), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['test_groundtruths', 'predictions'], {}), '(test_groundtruths, predictions)\n', (35031, 35063), False, 'from sklearn.metrics import roc_auc_score\n'), ((31643, 31677), 'sklearn.metrics.f1_score', 'f1_score', (['gts', 'feats'], {'average': 'None'}), '(gts, feats, average=None)\n', (31651, 31677), False, 'from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score\n'), ((16216, 16227), 'time.time', 'time.time', ([], {}), '()\n', (16225, 16227), False, 'import time\n'), ((23871, 23882), 'time.time', 'time.time', ([], {}), '()\n', (23880, 23882), False, 'import time\n'), ((36724, 36779), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['test_groundtruths', 'predictions'], {}), '(test_groundtruths, predictions)\n', (36747, 36779), False, 'from sklearn.metrics import precision_recall_curve, average_precision_score\n'), ((19267, 19278), 'time.time', 'time.time', ([], {}), '()\n', (19276, 19278), False, 'import time\n')]
|
import numpy as np
import pandas as pd
from .nlp_utils.classifier import NaiveBayesClassifier
from .nlp_utils.tokenizer import NGramTokenizer
DATASET_PATH = 'spam_filter/data/spam.csv'
def preprocess_data():
dataset = pd.read_csv(DATASET_PATH, encoding='latin-1')
dataset.rename(columns={'v1': 'labels', 'v2': 'message'}, inplace=True)
dataset['label'] = dataset['labels'].map({'ham': 0, 'spam': 1})
dataset.drop(['labels'], axis=1, inplace=True)
train_indices, test_indices = [], []
for i in range(dataset.shape[0]):
if np.random.uniform(0, 1) < 0.75:
train_indices += [i]
else:
test_indices += [i]
train_dataset = dataset.loc[train_indices]
test_dataset = dataset.loc[test_indices]
train_dataset.reset_index(inplace=True)
train_dataset.drop(['index'], axis=1, inplace=True)
test_dataset.reset_index(inplace=True)
test_dataset.drop(['index'], axis=1, inplace=True)
return train_dataset, test_dataset
def metrics(labels, predictions):
true_pos, true_neg, false_pos, false_neg = 0, 0, 0, 0
for i in range(len(labels)):
true_pos += int(labels[i] == 1 and predictions[i] == 1)
true_neg += int(labels[i] == 0 and predictions[i] == 0)
false_pos += int(labels[i] == 0 and predictions[i] == 1)
false_neg += int(labels[i] == 1 and predictions[i] == 0)
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
f_score = 2 * precision * recall / (precision + recall)
accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)
print("Precision: ", precision)
print("Recall: ", recall)
print("F-score: ", f_score)
print("Accuracy: ", accuracy)
if __name__ == '__main__':
train_dataset, test_dataset = preprocess_data()
classifier = NaiveBayesClassifier()
classifier.train(train_dataset)
prediction_list = classifier.predict(test_dataset['message'])
metrics(test_dataset['label'], prediction_list)
|
[
"pandas.read_csv",
"numpy.random.uniform"
] |
[((226, 271), 'pandas.read_csv', 'pd.read_csv', (['DATASET_PATH'], {'encoding': '"""latin-1"""'}), "(DATASET_PATH, encoding='latin-1')\n", (237, 271), True, 'import pandas as pd\n'), ((558, 581), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (575, 581), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
# Translation is the shifting of objects location. If you know the shift in
# (x,y) direction, let it be (t_x,t_y), you can create the transformation matrix
# M as follows:
#
# M = | 1 0 t_x |
# | 0 1 t_y |
#
# You'll need to make it into a Numpy array of type np.float32 and pass it into
# cv2.warpAffine() function.
img = cv2.imread('images/saturn.png', 0)
rows, cols = img.shape
translate_x = -150
translate_y = 50
M = np.float32([[1, 0, translate_x], [0, 1, translate_y]])
img_translated = cv2.warpAffine(img, M, (cols, rows))
cv2.imshow('Translated Image', img_translated)
cv2.waitKey(0)
cv2.destroyAllWindows()
# WARNING: Third argument of the cv2.warpAffine() function is the size of the
# output image, which should be in the form of (width, height).
# Remember width = number of columns, and height = number of rows.
|
[
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.float32",
"cv2.imread",
"cv2.warpAffine",
"cv2.imshow"
] |
[((377, 411), 'cv2.imread', 'cv2.imread', (['"""images/saturn.png"""', '(0)'], {}), "('images/saturn.png', 0)\n", (387, 411), False, 'import cv2\n'), ((475, 529), 'numpy.float32', 'np.float32', (['[[1, 0, translate_x], [0, 1, translate_y]]'], {}), '([[1, 0, translate_x], [0, 1, translate_y]])\n', (485, 529), True, 'import numpy as np\n'), ((548, 584), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(cols, rows)'], {}), '(img, M, (cols, rows))\n', (562, 584), False, 'import cv2\n'), ((585, 631), 'cv2.imshow', 'cv2.imshow', (['"""Translated Image"""', 'img_translated'], {}), "('Translated Image', img_translated)\n", (595, 631), False, 'import cv2\n'), ((632, 646), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (643, 646), False, 'import cv2\n'), ((647, 670), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (668, 670), False, 'import cv2\n')]
|
import os
import pandas as pd
import pdb
import seaborn as sns
import matplotlib.pyplot as plt
#import pymrmr
from scipy.stats import kendalltau, pearsonr, spearmanr
from sklearn.feature_selection import SelectKBest, mutual_info_classif, chi2, f_classif, RFE
import numpy as np
# Feature Importance Sklearn
# https://machinelearningmastery.com/calculate-feature-importance-with-python/
class Feature_Selection(object):
"""
Class with preprocessments to apply in dataframe
"""
def __init__(self):
super().__init__()
self.correlation_matrix = Correlation_Matrix()
def select_features(self,df,columns):
raise NotImplementedError()
def get_missing_values_df(self,df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': df.columns,'percent_missing': percent_missing})
return missing_value_df
def get_correlation_matrix(self,df,method):
return self.correlation_matrix.get_correlation_matrix(df,method)
def plot_correlation_matrix(self,df_corr,plot=True):
return self.correlation_matrix.plot_correlation_matrix(df_corr,plot)
def get_correlation_with_target(self,df,target_column,method,num_feats=10):
return self.correlation_matrix.get_correlation_with_target(df,target_column,method,num_feats)
def get_IG_feature_scores(self,df,n_features_to_select):
"""
IG calculates the importance of each feature by measuring the increase in entropy when the feature is given vs. absent.
"""
bestfeatures = SelectKBest(score_func=mutual_info_classif, k=n_features_to_select) # n is number of features you want to select
fit = bestfeatures.fit(xs,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(xs.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Feature','Score']
return featureScores
# def get_mRMR_feature_scores(self,df,n_features_to_select):
# # https://medium.com/subex-ai-labs/feature-selection-techniques-for-machine-learning-in-python-455dadcd3869
# """
# (Minimal Redundancy and Maximal Relevance)
# Intuition: It selects the features, based on their relevancy with the target variable, as well as their redundancy with the other features.
# """
# selected_features = pymrmr.mRMR(df, 'MIQ',n_features_to_select)
# return selected_features
def get_chisquare_feature_scores(self,df,target_column,n_features_to_select):
"""
It calculates the correlation between the feature and target and selects the best k features according to their chi square score calculated using following chi square test.
"""
X,y = self._split_df_in_xy(df,target_column)
import pdb;pdb.set_trace()
bestfeatures = SelectKBest(score_func=chi2, k=n_features_to_select) # n is number of features you want to select
fit = bestfeatures.fit(X,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(xs.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Feature','Score']
return featureScores
def get_anova_feature_scores(self,df,n_features_to_select):
"""
We perform Anova between features and target to check if they belong to same population.
"""
bestfeatures = SelectKBest(score_func=f_classif, k=n_features_to_select) # n is number of features you want to select
fit = bestfeatures.fit(xs,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(xs.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Feature','Score']
return featureScores
def get_features_by_RFE(self,df,model):
"""
It is a greedy optimization algorithm which aims to find the best performing feature subset. It repeatedly creates models and keeps aside the best or the worst performing feature at each iteration. It constructs the next model with the left features until all the features are exhausted. It then ranks the features based on the order of their elimination.
"""
#model = LogisticRegression(max_iter=1000)
rfe_model = RFE(model, 20)
rfe_fit = rfe_model.fit(x, y)
selected = df[df.columns[rfe_fit.get_support(indices=True)]]
return selected
def get_feature_selection_summary(self,df):
# https://towardsdatascience.com/the-5-feature-selection-algorithms-every-data-scientist-need-to-know-3a6b566efd2
# put all selection together
feature_selection_df = pd.DataFrame({'Feature':feature_name, 'Pearson':cor_support, 'Chi-2':chi_support, 'RFE':rfe_support, 'Logistics':embeded_lr_support,
'Random Forest':embeded_rf_support, 'LightGBM':embeded_lgb_support})
# count the selected times for each feature
feature_selection_df['Total'] = np.sum(feature_selection_df, axis=1)
# display the top 100
feature_selection_df = feature_selection_df.sort_values(['Total','Feature'] , ascending=False)
feature_selection_df.index = range(1, len(feature_selection_df)+1)
feature_selection_df.head(num_feats)
class Correlation_Matrix(object):
def __init__(self):
super().__init__()
def get_correlation_with_target(self,df,target_column,method,num_feats):
corr_dict = self.get_correlation_matrix(df,method)
df_k,df_p = corr_dict['df_k'],corr_dict['df_p']
correlations_with_target = df_k[target_column]
correlations_with_target = correlations_with_target.fillna(0)
correlations_with_target = correlations_with_target[correlations_with_target.index.difference([target_column])]
correlations_with_target = correlations_with_target.map(lambda x : x).abs().sort_values(ascending = False)
correlations_with_target = correlations_with_target[:num_feats]
return correlations_with_target
def plot_correlation_matrix(self,df_corr,plot=True):
plt.figure(figsize=(16, 6))
heatmap = sns.heatmap(df_corr, vmin=-1, vmax=1, annot=True, cmap='coolwarm')
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':18}, pad=12)
if plot:
plt.show()
else:
return heatmap
def get_correlation_matrix(self,df,method):
accpedted_correlations = ['pearson','spearman','kendall']
if method not in accpedted_correlations:
raise ValueError(f"O método deve ser um entre {accpedted_correlations}")
if method == 'pearson':
method_k = self._pearsonr_rval
method_p = self._pearsonr_pval
elif method == 'spearman':
method_k = self._spearmanr_rval
method_p = self._spearmanr_pval
elif method == 'kendall':
method_k = self._kendall_rval
method_p = self._kendall_pval
df_k = df.corr(method=method_k)
df_p = df.corr(method=method_p)
return {'df_k':df_k,'df_p':df_p}
def _kendall_rval(self,x,y):
return np.round(kendalltau(x,y)[0],6)
def _pearsonr_rval(self,x,y):
return np.round(pearsonr(x,y)[0],6)
def _spearmanr_rval(self,x,y):
return np.round(spearmanr(x,y)[0],6)
def _kendall_pval(self,x,y):
return np.round(kendalltau(x,y)[1],6)
def _pearsonr_pval(self,x,y):
return np.round(pearsonr(x,y)[1],6)
def _spearmanr_pval(self,x,y):
return np.round(spearmanr(x,y)[1],6)
|
[
"pandas.DataFrame",
"numpy.sum",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"sklearn.feature_selection.RFE",
"scipy.stats.spearmanr",
"scipy.stats.pearsonr",
"matplotlib.pyplot.figure",
"pdb.set_trace",
"scipy.stats.kendalltau",
"sklearn.feature_selection.SelectKBest",
"pandas.concat"
] |
[((804, 881), 'pandas.DataFrame', 'pd.DataFrame', (["{'column_name': df.columns, 'percent_missing': percent_missing}"], {}), "({'column_name': df.columns, 'percent_missing': percent_missing})\n", (816, 881), True, 'import pandas as pd\n'), ((1598, 1665), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'mutual_info_classif', 'k': 'n_features_to_select'}), '(score_func=mutual_info_classif, k=n_features_to_select)\n', (1609, 1665), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif, chi2, f_classif, RFE\n'), ((1767, 1792), 'pandas.DataFrame', 'pd.DataFrame', (['fit.scores_'], {}), '(fit.scores_)\n', (1779, 1792), True, 'import pandas as pd\n'), ((1813, 1837), 'pandas.DataFrame', 'pd.DataFrame', (['xs.columns'], {}), '(xs.columns)\n', (1825, 1837), True, 'import pandas as pd\n'), ((1862, 1902), 'pandas.concat', 'pd.concat', (['[dfcolumns, dfscores]'], {'axis': '(1)'}), '([dfcolumns, dfscores], axis=1)\n', (1871, 1902), True, 'import pandas as pd\n'), ((2891, 2906), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2904, 2906), False, 'import pdb\n'), ((2930, 2982), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'chi2', 'k': 'n_features_to_select'}), '(score_func=chi2, k=n_features_to_select)\n', (2941, 2982), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif, chi2, f_classif, RFE\n'), ((3083, 3108), 'pandas.DataFrame', 'pd.DataFrame', (['fit.scores_'], {}), '(fit.scores_)\n', (3095, 3108), True, 'import pandas as pd\n'), ((3129, 3153), 'pandas.DataFrame', 'pd.DataFrame', (['xs.columns'], {}), '(xs.columns)\n', (3141, 3153), True, 'import pandas as pd\n'), ((3178, 3218), 'pandas.concat', 'pd.concat', (['[dfcolumns, dfscores]'], {'axis': '(1)'}), '([dfcolumns, dfscores], axis=1)\n', (3187, 3218), True, 'import pandas as pd\n'), ((3507, 3564), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'f_classif', 'k': 'n_features_to_select'}), '(score_func=f_classif, k=n_features_to_select)\n', (3518, 3564), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif, chi2, f_classif, RFE\n'), ((3666, 3691), 'pandas.DataFrame', 'pd.DataFrame', (['fit.scores_'], {}), '(fit.scores_)\n', (3678, 3691), True, 'import pandas as pd\n'), ((3712, 3736), 'pandas.DataFrame', 'pd.DataFrame', (['xs.columns'], {}), '(xs.columns)\n', (3724, 3736), True, 'import pandas as pd\n'), ((3761, 3801), 'pandas.concat', 'pd.concat', (['[dfcolumns, dfscores]'], {'axis': '(1)'}), '([dfcolumns, dfscores], axis=1)\n', (3770, 3801), True, 'import pandas as pd\n'), ((4386, 4400), 'sklearn.feature_selection.RFE', 'RFE', (['model', '(20)'], {}), '(model, 20)\n', (4389, 4400), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif, chi2, f_classif, RFE\n'), ((4771, 4987), 'pandas.DataFrame', 'pd.DataFrame', (["{'Feature': feature_name, 'Pearson': cor_support, 'Chi-2': chi_support,\n 'RFE': rfe_support, 'Logistics': embeded_lr_support, 'Random Forest':\n embeded_rf_support, 'LightGBM': embeded_lgb_support}"], {}), "({'Feature': feature_name, 'Pearson': cor_support, 'Chi-2':\n chi_support, 'RFE': rfe_support, 'Logistics': embeded_lr_support,\n 'Random Forest': embeded_rf_support, 'LightGBM': embeded_lgb_support})\n", (4783, 4987), True, 'import pandas as pd\n'), ((5109, 5145), 'numpy.sum', 'np.sum', (['feature_selection_df'], {'axis': '(1)'}), '(feature_selection_df, axis=1)\n', (5115, 5145), True, 'import numpy as np\n'), ((6226, 6253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 6)'}), '(figsize=(16, 6))\n', (6236, 6253), True, 'import matplotlib.pyplot as plt\n'), ((6272, 6338), 'seaborn.heatmap', 'sns.heatmap', (['df_corr'], {'vmin': '(-1)', 'vmax': '(1)', 'annot': '(True)', 'cmap': '"""coolwarm"""'}), "(df_corr, vmin=-1, vmax=1, annot=True, cmap='coolwarm')\n", (6283, 6338), True, 'import seaborn as sns\n'), ((6451, 6461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6459, 6461), True, 'import matplotlib.pyplot as plt\n'), ((7307, 7323), 'scipy.stats.kendalltau', 'kendalltau', (['x', 'y'], {}), '(x, y)\n', (7317, 7323), False, 'from scipy.stats import kendalltau, pearsonr, spearmanr\n'), ((7388, 7402), 'scipy.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (7396, 7402), False, 'from scipy.stats import kendalltau, pearsonr, spearmanr\n'), ((7468, 7483), 'scipy.stats.spearmanr', 'spearmanr', (['x', 'y'], {}), '(x, y)\n', (7477, 7483), False, 'from scipy.stats import kendalltau, pearsonr, spearmanr\n'), ((7547, 7563), 'scipy.stats.kendalltau', 'kendalltau', (['x', 'y'], {}), '(x, y)\n', (7557, 7563), False, 'from scipy.stats import kendalltau, pearsonr, spearmanr\n'), ((7628, 7642), 'scipy.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (7636, 7642), False, 'from scipy.stats import kendalltau, pearsonr, spearmanr\n'), ((7708, 7723), 'scipy.stats.spearmanr', 'spearmanr', (['x', 'y'], {}), '(x, y)\n', (7717, 7723), False, 'from scipy.stats import kendalltau, pearsonr, spearmanr\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 07:05:07 2018
@author: massimo
"""
from brightway2 import *
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
projects
projects.set_current('bw2_import_ecoinvent_3.4')
databases
db = Database("ecoinvent 3.4 conseq")
ipcc = ('IPCC 2013', 'climate change', 'GTP 100a')
# Simple montecarlo on ecoinvent process as we know it.
mydemand = {db.random(): 1} # select a random process
lca = LCA(mydemand, ipcc)
lca.lci()
lca.lcia()
lca.score
mc = MonteCarloLCA(mydemand, ipcc)
mc_results = [next(mc) for x in range(500)]
plt.hist(mc_results, density=True)
plt.ylabel("Probability")
plt.xlabel(methods[ipcc]["unit"])
pd.DataFrame(mc_results).describe()
lca.score
np.exp(np.mean(np.log(mc_results))) # geometric mean
# Now comparative analysis
db.search('lorry transport euro5') # look at the names
activity_name = 'transport, freight, lorry >32 metric ton, EURO5'
for activity in Database("ecoinvent 3.4 conseq"):
if activity['name'] == activity_name:
truckE5 = Database("ecoinvent 3.4 conseq").get(activity['code'])
activity_name = 'transport, freight, lorry >32 metric ton, EURO6'
for activity in Database("ecoinvent 3.4 conseq"):
if activity['name'] == activity_name:
truckE6 = Database("ecoinvent 3.4 conseq").get(activity['code'])
truckE5.as_dict()
truckE6.as_dict()
# make a list with the alternatives
demands = [{truckE5: 1}, {truckE6: 1}]
mc = MonteCarloLCA(demands[0], ipcc)
next(mc)
# look at this first
mc.redo_lcia(demands[0])
mc.score
mc.redo_lcia(demands[1])
mc.score
mc.redo_lcia(demands[0])
mc.score
# Now for several iterations
iterations = 100
simulations = []
for _ in range(iterations):
print(_)
next(mc)
mcresults = []
for i in demands:
mc.redo_lcia(i)
mcresults.append(mc.score)
simulations.append(mcresults)
simulations
df = pd.DataFrame(simulations, columns = ['truckE5','truckE6'])
df.to_csv('ComparativeMCsimulation.csv') # to save it
#plot stuff (using the matplotlib package)
df.plot(kind = 'box')
#df.T.melt()
plt.plot(df.truckE5, df.truckE6, 'o')
plt.xlabel('truckE5 - kg CO2-eq')
plt.ylabel('truckE6 - kg CO2-eq')
# You can see how many times the difference is positive. This is what Simapro does
df.diffe = df.truckE5 - df.truckE6
plt.hist(df.diffe.values)
len(df.diffe[df.diffe < 0])
len(df.diffe[df.diffe > 0])
len(df.diffe[df.diffe == 0])
# Statistical testing (using the stats package)
# I can use a paired t-test
t_value, p_value = stats.ttest_rel(df.truckE5,df.truckE6)
t_value
p_value
# But wait! did we check for normality?
plt.hist(df.truckE5.values)
plt.xlabel('truckE5 - kg CO2-eq')
SW_value, SW_p_value = stats.shapiro(df.truckE5)
SW_p_value # Not normally distributed...
plt.hist(df.truckE6.values)
SW_value, SW_p_value = stats.shapiro(df.truckE6)
SW_p_value # Normally distributed if alpha = 0.05...Not strong though if we hasd say 1000 samples
# Alright need a non-parametric test. Wilcox sign rank test
s_value, p_value = stats.wilcoxon(df.truckE5, df.truckE6)
s_value
p_value # Not bad, significant difference!
# What if we had done the MC on the processes independently.
mc1 = MonteCarloLCA({truckE5: 1}, ipcc)
mc1_results = [next(mc1) for x in range(100)]
mc2 = MonteCarloLCA({truckE5: 1}, ipcc) # it's still truckE5!
mc2_results = [next(mc2) for x in range(100)]
df_ind = pd.DataFrame({'mc1': mc1_results, 'mc2' : mc2_results})
# compare to this
demands = [{truckE5: 1}, {truckE5: 1}] # I am using the smae process two times.
mc = MonteCarloLCA(demands[0], ipcc)
iterations = 100
simulations = []
for _ in range(iterations):
print(_)
next(mc)
mcresults = []
for i in demands:
mc.redo_lcia(i)
mcresults.append(mc.score)
simulations.append(mcresults)
simulations
df_dep = pd.DataFrame(simulations, columns = ['mc1','mc2'])
# Plot stuff
df_dep.plot(kind = 'box')
df_ind.plot(kind = 'box')
plt.plot(df_dep.mc1, df_dep.mc2, 'o')
plt.plot(df_ind.mc1, df_ind.mc2, 'o') # see?
# and of course:
t_value, p_value = stats.ttest_rel(df_dep.mc1, df_dep.mc2)
t_value
p_value # no difference AT ALL (as expected)
t_value, p_value = stats.ttest_rel(df_ind.mc1, df_ind.mc2)
t_value
p_value # no difference (as expected! But still some variance!)
s_value, p_value = stats.wilcoxon(df_ind.mc1, df_ind.mc2)
s_value
p_value
|
[
"pandas.DataFrame",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"scipy.stats.ttest_rel",
"scipy.stats.shapiro",
"matplotlib.pyplot.ylabel",
"scipy.stats.wilcoxon",
"matplotlib.pyplot.xlabel"
] |
[((646, 680), 'matplotlib.pyplot.hist', 'plt.hist', (['mc_results'], {'density': '(True)'}), '(mc_results, density=True)\n', (654, 680), True, 'from matplotlib import pyplot as plt\n'), ((681, 706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (691, 706), True, 'from matplotlib import pyplot as plt\n'), ((707, 740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["methods[ipcc]['unit']"], {}), "(methods[ipcc]['unit'])\n", (717, 740), True, 'from matplotlib import pyplot as plt\n'), ((1969, 2026), 'pandas.DataFrame', 'pd.DataFrame', (['simulations'], {'columns': "['truckE5', 'truckE6']"}), "(simulations, columns=['truckE5', 'truckE6'])\n", (1981, 2026), True, 'import pandas as pd\n'), ((2163, 2200), 'matplotlib.pyplot.plot', 'plt.plot', (['df.truckE5', 'df.truckE6', '"""o"""'], {}), "(df.truckE5, df.truckE6, 'o')\n", (2171, 2200), True, 'from matplotlib import pyplot as plt\n'), ((2201, 2234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""truckE5 - kg CO2-eq"""'], {}), "('truckE5 - kg CO2-eq')\n", (2211, 2234), True, 'from matplotlib import pyplot as plt\n'), ((2235, 2268), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""truckE6 - kg CO2-eq"""'], {}), "('truckE6 - kg CO2-eq')\n", (2245, 2268), True, 'from matplotlib import pyplot as plt\n'), ((2389, 2414), 'matplotlib.pyplot.hist', 'plt.hist', (['df.diffe.values'], {}), '(df.diffe.values)\n', (2397, 2414), True, 'from matplotlib import pyplot as plt\n'), ((2597, 2636), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (['df.truckE5', 'df.truckE6'], {}), '(df.truckE5, df.truckE6)\n', (2612, 2636), False, 'from scipy import stats\n'), ((2693, 2720), 'matplotlib.pyplot.hist', 'plt.hist', (['df.truckE5.values'], {}), '(df.truckE5.values)\n', (2701, 2720), True, 'from matplotlib import pyplot as plt\n'), ((2721, 2754), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""truckE5 - kg CO2-eq"""'], {}), "('truckE5 - kg CO2-eq')\n", (2731, 2754), True, 'from matplotlib import pyplot as plt\n'), ((2779, 2804), 'scipy.stats.shapiro', 'stats.shapiro', (['df.truckE5'], {}), '(df.truckE5)\n', (2792, 2804), False, 'from scipy import stats\n'), ((2847, 2874), 'matplotlib.pyplot.hist', 'plt.hist', (['df.truckE6.values'], {}), '(df.truckE6.values)\n', (2855, 2874), True, 'from matplotlib import pyplot as plt\n'), ((2898, 2923), 'scipy.stats.shapiro', 'stats.shapiro', (['df.truckE6'], {}), '(df.truckE6)\n', (2911, 2923), False, 'from scipy import stats\n'), ((3102, 3140), 'scipy.stats.wilcoxon', 'stats.wilcoxon', (['df.truckE5', 'df.truckE6'], {}), '(df.truckE5, df.truckE6)\n', (3116, 3140), False, 'from scipy import stats\n'), ((3462, 3516), 'pandas.DataFrame', 'pd.DataFrame', (["{'mc1': mc1_results, 'mc2': mc2_results}"], {}), "({'mc1': mc1_results, 'mc2': mc2_results})\n", (3474, 3516), True, 'import pandas as pd\n'), ((3913, 3962), 'pandas.DataFrame', 'pd.DataFrame', (['simulations'], {'columns': "['mc1', 'mc2']"}), "(simulations, columns=['mc1', 'mc2'])\n", (3925, 3962), True, 'import pandas as pd\n'), ((4031, 4068), 'matplotlib.pyplot.plot', 'plt.plot', (['df_dep.mc1', 'df_dep.mc2', '"""o"""'], {}), "(df_dep.mc1, df_dep.mc2, 'o')\n", (4039, 4068), True, 'from matplotlib import pyplot as plt\n'), ((4069, 4106), 'matplotlib.pyplot.plot', 'plt.plot', (['df_ind.mc1', 'df_ind.mc2', '"""o"""'], {}), "(df_ind.mc1, df_ind.mc2, 'o')\n", (4077, 4106), True, 'from matplotlib import pyplot as plt\n'), ((4151, 4190), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (['df_dep.mc1', 'df_dep.mc2'], {}), '(df_dep.mc1, df_dep.mc2)\n', (4166, 4190), False, 'from scipy import stats\n'), ((4265, 4304), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (['df_ind.mc1', 'df_ind.mc2'], {}), '(df_ind.mc1, df_ind.mc2)\n', (4280, 4304), False, 'from scipy import stats\n'), ((4398, 4436), 'scipy.stats.wilcoxon', 'stats.wilcoxon', (['df_ind.mc1', 'df_ind.mc2'], {}), '(df_ind.mc1, df_ind.mc2)\n', (4412, 4436), False, 'from scipy import stats\n'), ((742, 766), 'pandas.DataFrame', 'pd.DataFrame', (['mc_results'], {}), '(mc_results)\n', (754, 766), True, 'import pandas as pd\n'), ((805, 823), 'numpy.log', 'np.log', (['mc_results'], {}), '(mc_results)\n', (811, 823), True, 'import numpy as np\n')]
|
from network import Regressor, Loss_gamma_0_6
import numpy as np
import skorch
from skorch import NeuralNetRegressor
from torch import optim
def load_model(load_cp, n_in=106,device='cuda'):
cp = skorch.callbacks.Checkpoint(dirname=load_cp)
net = NeuralNetRegressor(
Regressor(n_in=n_in),
criterion=Loss_gamma_0_6,
max_epochs=2000,
optimizer=optim.Adam,
optimizer__amsgrad=True,
optimizer__weight_decay=0.1,
lr=0.0003,
iterator_train__shuffle=True,
iterator_train__num_workers=32,
iterator_train__pin_memory=True,
device=device,
batch_size=50000,
iterator_train__batch_size=50000,
)
net.initialize()
net.load_params(checkpoint=cp)
return net
def save_model_para(model_cp):
'''
convert trained model paras(saved at checkpoint model_cp) to numpy format
:param model_cp:
:return:
'''
model = load_model(model_cp, n_in=106,device='cpu')
paras = []
for para in model.get_params()['module'].parameters():
paras.append(para.data.cpu().numpy())
np.save("model_paras.npy", paras)
save_model_para('cp_gamma_0_6')
|
[
"skorch.callbacks.Checkpoint",
"numpy.save",
"network.Regressor"
] |
[((201, 245), 'skorch.callbacks.Checkpoint', 'skorch.callbacks.Checkpoint', ([], {'dirname': 'load_cp'}), '(dirname=load_cp)\n', (228, 245), False, 'import skorch\n'), ((1112, 1145), 'numpy.save', 'np.save', (['"""model_paras.npy"""', 'paras'], {}), "('model_paras.npy', paras)\n", (1119, 1145), True, 'import numpy as np\n'), ((284, 304), 'network.Regressor', 'Regressor', ([], {'n_in': 'n_in'}), '(n_in=n_in)\n', (293, 304), False, 'from network import Regressor, Loss_gamma_0_6\n')]
|
import numpy as np
from gensim.models import Word2Vec
from src.utils import io
def run(
random_walk_files, output_file, dimensions=128, context_size=10, epochs=1, workers=1
):
"""Generates node vector embeddings from a list of files containing random
walks performed on different layers of a multilayer network.
Parameters
----------
random_walk_files: list
List of files containing random walks. Each file should correspond to random walks perform on a different layer
of the network of interest.
output_file: str
The file in which the node embeddings will be saved.
dimensions: int (default: 128)
Number of dimensions of the generated vector embeddings.
context_size: int (default: 10)
Context size in Word2Vec.
epochs: int (default: 1)
Number of epochs in stochastic gradient descent.
workers: int (default: 1)
Number of worker threads used to train the model.
"""
walks = np.concatenate([io.read_random_walks(file) for file in random_walk_files])
#print(walks.shape)
walks_trim = np.split(walks, walks.shape[0])
walks_trim = [walk[walk!=0].astype(str).tolist() for walk in walks]
#print(walks_trim)
model = Word2Vec(
walks_trim,
size=dimensions,
window=context_size,
min_count=0,
sg=1, # use skip-gram
workers=workers,
iter=epochs,
)
model.wv.save_word2vec_format(output_file)
|
[
"src.utils.io.read_random_walks",
"gensim.models.Word2Vec",
"numpy.split"
] |
[((1109, 1140), 'numpy.split', 'np.split', (['walks', 'walks.shape[0]'], {}), '(walks, walks.shape[0])\n', (1117, 1140), True, 'import numpy as np\n'), ((1248, 1360), 'gensim.models.Word2Vec', 'Word2Vec', (['walks_trim'], {'size': 'dimensions', 'window': 'context_size', 'min_count': '(0)', 'sg': '(1)', 'workers': 'workers', 'iter': 'epochs'}), '(walks_trim, size=dimensions, window=context_size, min_count=0, sg=\n 1, workers=workers, iter=epochs)\n', (1256, 1360), False, 'from gensim.models import Word2Vec\n'), ((1009, 1035), 'src.utils.io.read_random_walks', 'io.read_random_walks', (['file'], {}), '(file)\n', (1029, 1035), False, 'from src.utils import io\n')]
|
import numpy as np
import abc
class ProbabilityDistribution(abc.ABC):
"""
Class representing the interface for a probability distribution
"""
@abc.abstractmethod
def sample(self, size):
"""
This method must return an array with length "size", sampling the distribution
# Arguments:
size: Size of the sampling
"""
class NormalDistribution(ProbabilityDistribution):
"""
Implements Normal Distribution
# Arguments:
mean: Mean of the normal distribution.
std: Standard deviation of the normal distribution
"""
def __init__(self, mean, std):
self._mean = mean
self._std = std
def sample(self, size):
"""
This method provides a sample of the given size of a gaussian distributions
# Arguments:
size: size of the sample
# Returns:
Sample of a gaussian distribution of a given size
"""
return np.random.normal(self._mean, self._std, size)
class GaussianMixture(ProbabilityDistribution):
"""
Implements the combination of Normal Distributions
# Arguments:
params: Array of arrays with mean and std for every gaussian distribution.
weights: Array of weights for every distribution with sum 1.
# Example:
```python
# Parameters for two Gaussian
mu_M = 178
mu_F = 162
sigma_M = 7
sigma_F = 7
# Parameters
norm_params = np.array([[mu_M, sigma_M],
[mu_F, sigma_F]])
weights = np.ones(2) / 2.0
# Creating combination of gaussian
distribution = GaussianMixture(norm_params, weights)
```
"""
def __init__(self, params, weights):
self._gaussian_distributions = []
for param in params:
self._gaussian_distributions.append(NormalDistribution(param[0], param[1]))
self._weights = weights
def sample(self, size):
"""
This method provides a sample of the given size of a mixture of gaussian distributions
# Arguments:
size: size of the sample
# Returns:
Sample of a mixture of gaussian distributions of a given size
"""
mixture_idx = np.random.choice(len(self._weights), size=size, replace=True, p=self._weights)
values = []
for i in mixture_idx:
gaussian_distributions = self._gaussian_distributions[i]
values.append(gaussian_distributions.sample(1))
return np.fromiter(values, dtype=np.float64)
|
[
"numpy.random.normal",
"numpy.fromiter"
] |
[((987, 1032), 'numpy.random.normal', 'np.random.normal', (['self._mean', 'self._std', 'size'], {}), '(self._mean, self._std, size)\n', (1003, 1032), True, 'import numpy as np\n'), ((2570, 2607), 'numpy.fromiter', 'np.fromiter', (['values'], {'dtype': 'np.float64'}), '(values, dtype=np.float64)\n', (2581, 2607), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import copy
from morphodynamics.landscapes.utils import get_meshgrid
from morphodynamics.landscapes.analysis.get_fields import *
from morphodynamics.landscapes.analysis.sde_forward import *
class Save_Ims():
"""
Save losses during training and the potential as an array
"""
def __init__(self, model, save_dir):
"""
- model: this is the physics-informed neural network (PINN)
- save_dir: where to save the plot and potential to
"""
self.model = model
self.save_dir = save_dir
x_test, y_test = get_meshgrid(model.xlims, model.ylims, model.dims, flatBool = True)
self.x_test , self.y_test = tf.convert_to_tensor(x_test), tf.convert_to_tensor(y_test)
self.fig = plt.figure(figsize = (30, 20))
self.gs = gridspec.GridSpec(nrows = 15, ncols = 17)
def __call__(self):
self._plot_losses()
self._plot_pdfs_getUandD()
self._plot_and_save_U()
#plt.savefig(self.save_dir + 'View_{}_{}.png'.format(self.model.save_append, self.model.idx_save))
#plt.close()
def _setup_ax(self, ax):
ax.set_aspect('equal', adjustable = 'box')
ax.set_xlim(self.model.xlims)
ax.set_ylim(self.model.ylims)
def _plot_losses(self):
"""
Plot how each of the loss terms changes in time
"""
ax = self.fig.add_subplot(self.gs[2:5, :7])
losses = [self.model.data_losses, self.model.BC_losses, self.model.pde_losses, self.model.total_losses, self.model.norm_losses]
labels = ['pdf', 'BC', 'pde', 'total', 'norm']
zipped = zip(losses, labels)
for loss_list, label in zipped:
ax.plot(np.log10(loss_list), label = label)
ax.legend()
def _plot_pdfs_getUandD(self):
"""
Run inference to get the pdf, potential (U) and diffusivity (D)
"""
p_max = 0
D_max = 0
for idx_t, test_time in enumerate(np.linspace(self.model.tlims[0], self.model.tlims[1], 7)): # test for a range of unseen times
t_test = np.tile(np.array([test_time]), (self.x_test.shape[0], 1))
t_test = tf.convert_to_tensor(t_test)
xyt_test = tf.concat((self.x_test, self.y_test, t_test), axis = 1)
p_out, D_out, U_out = self.model.predict(xyt_test)
D_out = D_out.numpy()
p_max = max(p_max, np.max(p_out))
D_max = max(D_max, np.max(D_out))
for idx_t, test_time in enumerate(np.linspace(self.model.tlims[0], self.model.tlims[1], 7)): # test for a range of unseen times
t_test = np.tile(np.array([test_time]), (self.x_test.shape[0], 1))
t_test = tf.convert_to_tensor(t_test)
xyt_test = tf.concat((self.x_test, self.y_test, t_test), axis = 1)
p_out, D_out, U_out = self.model.predict(xyt_test)
p_out = p_out.numpy()
D_out = D_out.numpy()
U_out = U_out.numpy()
ax_p = self.fig.add_subplot(self.gs[6, idx_t])
p_out[p_out<1e-7] = np.nan
ax_p.scatter(self.x_test, self.y_test, c = np.log10(p_out), vmin = -7, vmax = max(np.log10(p_max), -7))
self._setup_ax(ax_p)
ax_D = self.fig.add_subplot(self.gs[6, 8+idx_t])
ax_D.scatter(self.x_test, self.y_test, c = D_out, vmin = 0, vmax = D_max)
self._setup_ax(ax_D)
for idx_t, arr in enumerate(self.model.pdf_list):
ax = self.fig.add_subplot(self.gs[14, idx_t])
to_log = copy.deepcopy(arr)
to_log[to_log<1e-7] = np.nan
ax.imshow(np.log10(to_log.reshape((200, 200))[::-1, :]))
self.U_out = U_out
def _plot_and_save_U(self):
"""
Plot and save the potential as an array
"""
U = np.reshape(self.U_out, (self.model.dims, self.model.dims))
path = self.save_dir + 'potential.pickle'
dump_pickle(U, path)
ax = self.fig.add_subplot(self.gs[:4, 10:14])
gx, gy = np.gradient(U)
ax.imshow(np.log10(np.sqrt(gx**2 + gy**2))[::-1, :])
ax.set_aspect('equal', adjustable = 'box')
|
[
"copy.deepcopy",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"numpy.log10",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.array",
"morphodynamics.landscapes.utils.get_meshgrid",
"numpy.reshape",
"numpy.linspace",
"numpy.max",
"matplotlib.gridspec.GridSpec",
"numpy.gradient",
"numpy.sqrt"
] |
[((61, 82), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (75, 82), False, 'import matplotlib\n'), ((725, 790), 'morphodynamics.landscapes.utils.get_meshgrid', 'get_meshgrid', (['model.xlims', 'model.ylims', 'model.dims'], {'flatBool': '(True)'}), '(model.xlims, model.ylims, model.dims, flatBool=True)\n', (737, 790), False, 'from morphodynamics.landscapes.utils import get_meshgrid\n'), ((908, 936), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 20)'}), '(figsize=(30, 20))\n', (918, 936), True, 'import matplotlib.pyplot as plt\n'), ((957, 994), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'nrows': '(15)', 'ncols': '(17)'}), '(nrows=15, ncols=17)\n', (974, 994), True, 'import matplotlib.gridspec as gridspec\n'), ((3971, 4029), 'numpy.reshape', 'np.reshape', (['self.U_out', '(self.model.dims, self.model.dims)'], {}), '(self.U_out, (self.model.dims, self.model.dims))\n', (3981, 4029), True, 'import numpy as np\n'), ((4182, 4196), 'numpy.gradient', 'np.gradient', (['U'], {}), '(U)\n', (4193, 4196), True, 'import numpy as np\n'), ((829, 857), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_test'], {}), '(x_test)\n', (849, 857), True, 'import tensorflow as tf\n'), ((859, 887), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y_test'], {}), '(y_test)\n', (879, 887), True, 'import tensorflow as tf\n'), ((2124, 2180), 'numpy.linspace', 'np.linspace', (['self.model.tlims[0]', 'self.model.tlims[1]', '(7)'], {}), '(self.model.tlims[0], self.model.tlims[1], 7)\n', (2135, 2180), True, 'import numpy as np\n'), ((2318, 2346), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['t_test'], {}), '(t_test)\n', (2338, 2346), True, 'import tensorflow as tf\n'), ((2370, 2423), 'tensorflow.concat', 'tf.concat', (['(self.x_test, self.y_test, t_test)'], {'axis': '(1)'}), '((self.x_test, self.y_test, t_test), axis=1)\n', (2379, 2423), True, 'import tensorflow as tf\n'), ((2659, 2715), 'numpy.linspace', 'np.linspace', (['self.model.tlims[0]', 'self.model.tlims[1]', '(7)'], {}), '(self.model.tlims[0], self.model.tlims[1], 7)\n', (2670, 2715), True, 'import numpy as np\n'), ((2854, 2882), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['t_test'], {}), '(t_test)\n', (2874, 2882), True, 'import tensorflow as tf\n'), ((2906, 2959), 'tensorflow.concat', 'tf.concat', (['(self.x_test, self.y_test, t_test)'], {'axis': '(1)'}), '((self.x_test, self.y_test, t_test), axis=1)\n', (2915, 2959), True, 'import tensorflow as tf\n'), ((3695, 3713), 'copy.deepcopy', 'copy.deepcopy', (['arr'], {}), '(arr)\n', (3708, 3713), False, 'import copy\n'), ((1857, 1876), 'numpy.log10', 'np.log10', (['loss_list'], {}), '(loss_list)\n', (1865, 1876), True, 'import numpy as np\n'), ((2247, 2268), 'numpy.array', 'np.array', (['[test_time]'], {}), '([test_time])\n', (2255, 2268), True, 'import numpy as np\n'), ((2554, 2567), 'numpy.max', 'np.max', (['p_out'], {}), '(p_out)\n', (2560, 2567), True, 'import numpy as np\n'), ((2600, 2613), 'numpy.max', 'np.max', (['D_out'], {}), '(D_out)\n', (2606, 2613), True, 'import numpy as np\n'), ((2783, 2804), 'numpy.array', 'np.array', (['[test_time]'], {}), '([test_time])\n', (2791, 2804), True, 'import numpy as np\n'), ((3282, 3297), 'numpy.log10', 'np.log10', (['p_out'], {}), '(p_out)\n', (3290, 3297), True, 'import numpy as np\n'), ((4224, 4250), 'numpy.sqrt', 'np.sqrt', (['(gx ** 2 + gy ** 2)'], {}), '(gx ** 2 + gy ** 2)\n', (4231, 4250), True, 'import numpy as np\n'), ((3321, 3336), 'numpy.log10', 'np.log10', (['p_max'], {}), '(p_max)\n', (3329, 3336), True, 'import numpy as np\n')]
|
import numpy as np
import inspect
from scipy.linalg import qr as qr_factorization
from copy import deepcopy
from pyapprox.utilities import cartesian_product, outer_product
from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D
from pyapprox.barycentric_interpolation import (
compute_barycentric_weights_1d,
multivariate_barycentric_lagrange_interpolation
)
from pyapprox.models.wrappers import (
evaluate_1darray_function_on_2d_array
)
from pyapprox.utilities import qr_solve
def kronecker_product_2d(matrix1, matrix2):
"""
TODO: I can store kroneker as a sparse matrix see ( scipy.kron )
"""
assert matrix1.shape == matrix2.shape
assert matrix1.ndim == 2
block_num_rows = matrix1.shape[0]
matrix_num_rows = block_num_rows**2
matrix = np.empty((matrix_num_rows, matrix_num_rows), float)
# loop through blocks
start_col = 0
for jj in range(block_num_rows):
start_row = 0
for ii in range(block_num_rows):
matrix[start_row:start_row+block_num_rows,
start_col:start_col+block_num_rows] = \
matrix2*matrix1[ii, jj]
start_row += block_num_rows
start_col += block_num_rows
return matrix
def chebyshev_derivative_matrix(order):
if order == 0:
pts = np.array([1], float)
derivative_matrix = np.array([0], float)
else:
# this is reverse order used by matlab cheb function
pts = -np.cos(np.linspace(0., np.pi, order+1))
scalars = np.ones((order+1), float)
scalars[0] = 2.
scalars[order] = 2.
scalars[1:order+1:2] *= -1
derivative_matrix = np.empty((order+1, order+1), float)
for ii in range(order+1):
row_sum = 0.
for jj in range(order+1):
if (ii == jj):
denominator = 1.
else:
denominator = pts[ii]-pts[jj]
numerator = scalars[ii] / scalars[jj]
derivative_matrix[ii, jj] = numerator / denominator
row_sum += derivative_matrix[ii, jj]
derivative_matrix[ii, ii] -= row_sum
# I return points and calculate derivatives using reverse order of points
# compared to what is used by Matlab cheb function thus the
# derivative matrix I return will be the negative of the matlab version
return pts, derivative_matrix
class SteadyStateDiffusionEquation1D(object):
"""
solve (a(x)*u_x)_x = f; x in [0,1]; subject to u(0)=a; u(1)=b
"""
def __init__(self):
self.diffusivity = None
self.forcing_function = None
self.bndry_cond = [0., 0.]
self.xlim = [0, 1]
self.adjoint_derivative_matrix = None
self.adjoint_mesh_pts = None
self.num_time_steps = 0
self.time_step_size = None
self.initial_sol = None
self.num_stored_timesteps = 1
self.time_step_method = 'crank-nicholson'
# default qoi functional is integral of solution over entire domain
self.qoi_functional = self.integrate
self.qoi_functional_deriv = lambda x: x*0.+1.
def scale_canonical_pts(self, pts):
return (self.xlim[1]-self.xlim[0])*(pts+1.)/2.+self.xlim[0]
def initialize(self, order, bndry_cond=None, xlim=None):
self.order = order
if xlim is not None:
self.xlim = xlim
if bndry_cond is not None:
self.bndry_cond = bndry_cond
mesh_pts, self.derivative_matrix = chebyshev_derivative_matrix(order)
# scale mesh points to from [-1,1] to [a,b]
self.mesh_pts_1d = self.scale_canonical_pts(mesh_pts)
self.mesh_pts = self.mesh_pts_1d
# scale derivative matrix from [-1,1] to [a,b]
self.derivative_matrix *= 2./(self.xlim[1]-self.xlim[0])
def set_diffusivity(self, func):
assert callable(func)
assert len(inspect.getargspec(func)[0]) == 2
self.diffusivity = func
def set_forcing(self, func):
assert callable(func)
assert len(inspect.getargspec(func)[0]) == 2
self.forcing_function = func
def form_collocation_matrix(self, derivative_matrix, diagonal):
scaled_matrix = np.empty(derivative_matrix.shape)
for i in range(scaled_matrix.shape[0]):
scaled_matrix[i, :] = derivative_matrix[i, :] * diagonal[i]
matrix = np.dot(derivative_matrix, scaled_matrix)
return matrix
def apply_boundary_conditions_to_matrix(self, matrix):
matrix[0, :] = 0
matrix[-1, :] = 0
matrix[0, 0] = 1
matrix[-1, -1] = 1
return matrix
def apply_boundary_conditions_to_rhs(self, rhs):
rhs[0] = self.bndry_cond[0]
rhs[-1] = self.bndry_cond[1]
return rhs
def apply_boundary_conditions(self, matrix, forcing):
assert len(self.bndry_cond) == 2
matrix = self.apply_boundary_conditions_to_matrix(matrix)
forcing = self.apply_boundary_conditions_to_rhs(forcing)
return matrix, forcing
def explicit_runge_kutta(self, rhs, sol, time, time_step_size):
assert callable(rhs)
dt2 = time_step_size/2.
k1 = rhs(time, sol)
k2 = rhs(time+dt2, sol+dt2*k1)
k3 = rhs(time+dt2, sol+dt2*k2)
k4 = rhs(time+time_step_size, sol+time_step_size*k3)
new_sol = sol+time_step_size/6.*(k1+2.*k2+2.*k3+k4)
new_sol[0] = self.bndry_cond[0]
new_sol[-1] = self.bndry_cond[1]
return new_sol
def form_adams_moulton_3rd_order_system(self, matrix, current_sol,
current_forcing, future_forcing,
prev_forcing, prev_sol,
time_step_size):
""" 3rd order Adams-Moultobn method
WARNING: seems to be unstable (at least my implementation)
y_{n+2} = y_{n+1}+h(c_0y_{n+2}+c_1y_{n+1}+c_3y_{n})
c = (5/12,2/3,-1./12)
"""
dt12 = time_step_size/12.
dt12matrix = dt12*matrix
identity = np.eye(matrix.shape[0])
matrix = identity-5.*dt12matrix
forcing = np.dot(identity+8.*dt12matrix, current_sol)
forcing += dt12*(5.*future_forcing+8.*current_forcing-prev_forcing)
forcing -= np.dot(dt12matrix, prev_sol)
# currently I do not support time varying boundary conditions
return self.apply_boundary_conditions(matrix, forcing)
def get_implicit_time_step_rhs(self, current_sol, time, sample):
future_forcing = self.forcing_function(
self.mesh_pts, time+self.time_step_size, sample)
if (self.time_step_method == "backward-euler"):
forcing = current_sol + self.time_step_size*future_forcing
elif (self.time_step_method == "crank-nicholson"):
identity = np.eye(self.collocation_matrix.shape[0])
forcing = np.dot(
identity+0.5*self.time_step_size*self.collocation_matrix, current_sol)
current_forcing = self.forcing_function(
self.mesh_pts, time, sample)
forcing += 0.5*self.time_step_size*(current_forcing+future_forcing)
else:
raise Exception('incorrect timestepping method specified')
# apply boundary conditions
forcing[0] = self.bndry_cond[0]
forcing[-1] = self.bndry_cond[1]
return forcing
def get_implicit_timestep_matrix_inverse_factors(self, matrix):
identity = np.eye(matrix.shape[0])
if (self.time_step_method == "backward-euler"):
matrix = identity-self.time_step_size*matrix
elif (self.time_step_method == "crank-nicholson"):
matrix = identity-self.time_step_size/2.*matrix
else:
raise Exception('incorrect timestepping method specified')
self.apply_boundary_conditions_to_matrix(matrix)
return qr_factorization(matrix)
def time_step(self, current_sol, time, sample):
if self.time_step_method == 'RK4':
def rhs_func(t, u): return np.dot(
self.collocation_matrix, u) +\
self.forcing_function(self.mesh_pts, t, sample)
current_sol = self.explicit_runge_kutta(
rhs_func, current_sol, time, self.time_step_size)
else:
rhs = self.get_implicit_time_step_rhs(current_sol, time, sample)
current_sol = qr_solve(
self.implicit_matrix_factors[0], self.implicit_matrix_factors[1],
rhs[:, None])[:, 0]
#current_sol = np.linalg.solve( matrix, rhs )
return current_sol
def transient_solve(self, sample):
# in future consider supporting time varying diffusivity. This would
# require updating collocation matrix at each time-step
# for now make diffusivity time-independent
# assert self.diffusivity_function.__code__.co_argcount == 3
diffusivity = self.diffusivity_function(self.mesh_pts, sample)
self.collocation_matrix = self.form_collocation_matrix(
self.derivative_matrix, diffusivity)
# consider replacing time = 0 with time = self.initial_time
time = 0.
assert self.forcing_function.__code__.co_argcount == 3
current_forcing = self.forcing_function(self.mesh_pts, time, sample)
if self.num_time_steps > 0:
assert self.initial_sol is not None
assert self.time_step_size is not None
current_sol = self.initial_sol.copy()
assert self.num_stored_timesteps <= self.num_time_steps
# num_time_steps is number of steps taken after initial time
self.times = np.empty((self.num_stored_timesteps), float)
sols = np.empty((self.initial_sol.shape[0],
self.num_stored_timesteps), float)
sol_cntr = 0
sol_storage_stride = self.num_time_steps/self.num_stored_timesteps
if self.time_step_method != 'RK4':
self.implicit_matrix_factors = \
self.get_implicit_timestep_matrix_inverse_factors(
self.collocation_matrix)
for i in range(1, self.num_time_steps+1):
# Construct linear system
current_sol = self.time_step(current_sol, time, sample)
time += self.time_step_size
# Store history if requested
if i % sol_storage_stride == 0:
sols[:, sol_cntr] = current_sol
self.times[sol_cntr] = time
sol_cntr += 1
assert sol_cntr == self.num_stored_timesteps
return sols
else:
current_forcing = self.forcing_function(
self.mesh_pts, time, sample)
matrix, rhs = self.apply_boundary_conditions(
self.collocation_matrix.copy(), current_forcing)
return np.linalg.solve(matrix, rhs)
def solve(self, diffusivity, forcing):
assert diffusivity.ndim == 1
assert forcing.ndim == 1
# forcing will be overwritten with bounary values so must take a
# deep copy
forcing = forcing.copy()
# we need another copy so that forcing can be used when solving adjoint
self.forcing_vals = forcing.copy()
assert not np.any(diffusivity <= 0.)
self.collocation_matrix = self.form_collocation_matrix(
self.derivative_matrix, diffusivity)
matrix, forcing = self.apply_boundary_conditions(
self.collocation_matrix.copy(), forcing)
solution = np.linalg.solve(matrix, forcing)
# store solution for use with adjoints
self.fwd_solution = solution.copy()
return solution
def run(self, sample):
assert sample.ndim == 1
diffusivity = self.diffusivity_function(self.mesh_pts, sample)
forcing = self.forcing_function(self.mesh_pts, sample)
solution = self.solve(diffusivity, forcing)
return solution
def solve_adjoint(self, sample, order):
"""
Typically with FEM we solve Ax=b and the discrete adjoint equation
is A'y=z. But with collocation this does not work. Instead of
taking the adjoint of the discrete system as the aforemntioned
approach does. We discretize continuous adjoint equation. Which for
the ellipic diffusion equation is just Ay=z. That is the adjoint
of A is A.
"""
if order == self.order:
# used when computing gradient from adjoint solution
matrix = self.collocation_matrix.copy()
else:
# used when computing error estimate from adjoint solution
if self.adjoint_derivative_matrix is None:
adjoint_mesh_pts, self.adjoint_derivative_matrix = \
chebyshev_derivative_matrix(order)
self.adjoint_mesh_pts = self.scale_canonical_pts(
adjoint_mesh_pts)
# scale derivative matrix from [-1,1] to [a,b]
self.adjoint_derivative_matrix *= 2. / \
(self.xlim[1]-self.xlim[0])
diffusivity = self.diffusivity_function(
self.adjoint_mesh_pts, sample)
matrix = self.form_collocation_matrix(
self.adjoint_derivative_matrix, diffusivity)
self.adjoint_collocation_matrix = matrix.copy()
# regardless of whether computing error estimate or
# computing gradient, rhs is always derivative (with respect to the
# solution) of the qoi_functional
qoi_deriv = self.qoi_functional_deriv(self.fwd_solution)
matrix = self.apply_boundary_conditions_to_matrix(matrix)
qoi_deriv = self.apply_adjoint_boundary_conditions_to_rhs(qoi_deriv)
adj_solution = np.linalg.solve(matrix, qoi_deriv)
return adj_solution
def apply_adjoint_boundary_conditions_to_rhs(self, qoi_deriv):
# adjoint always has zero Dirichlet BC
qoi_deriv[0] = 0
qoi_deriv[-1] = 0
return qoi_deriv
def compute_residual(self, matrix, solution, forcing):
matrix, forcing = self.apply_boundary_conditions(matrix, forcing)
return forcing - np.dot(matrix, solution)
def compute_residual_derivative(self, solution, diagonal,
forcing_deriv):
matrix = self.form_collocation_matrix(self.derivative_matrix,
diagonal)
# Todo: check if boundary conditions need to be applied to both
# matrix and forcing_derivs or just matrix. If the former
# what boundary conditions to I impose on the focing deriv
matrix = self.apply_boundary_conditions_to_matrix(
matrix)
# the values here are the derivative of the boundary conditions
# with respect to the random parameters. I assume that
# this is always zero
forcing_deriv[0] = 0
forcing_deriv[-1] = 0
return forcing_deriv.squeeze() - np.dot(matrix, solution)
def compute_error_estimate(self, sample):
raise NotImplementedError("Not passing tests")
# must solve adjoint with a higher order grid
adj_solution = self.solve_adjoint(sample, self.order*2)
# interpolate forward solution onto higher-order grid
interp_fwd_solution = self.interpolate(
self.fwd_solution, self.adjoint_mesh_pts)
# compute residual of forward solution using higher-order grid
forcing_vals = self.forcing_function(self.adjoint_mesh_pts,
sample)
# compute residual
residual = self.compute_residual(self.adjoint_collocation_matrix,
interp_fwd_solution, forcing_vals)
# self.plot(interp_fwd_solution+adj_solution,
# plot_mesh_coords=self.adjoint_mesh_pts )
# self.plot(residual, plot_mesh_coords=self.adjoint_mesh_pts,
# color='r')
# pylab.show()
# print self.integrate((adj_solution+interp_fwd_solution )**2)
# print(np.dot(residual, adj_solution )/self.integrate(
# residual * adj_solution)
print('cond', np.linalg.cond(self.adjoint_collocation_matrix))
error_estimate = self.integrate(residual * adj_solution, self.order*2)
return error_estimate
def evaluate_gradient(self, sample):
assert sample.ndim == 1
num_stoch_dims = sample.shape[0]
# qoi_deriv = self.qoi_functional_deriv(self.mesh_pts)
adj_solution = self.solve_adjoint(sample, self.order)
gradient = np.empty((num_stoch_dims), float)
for i in range(num_stoch_dims):
diffusivity_deriv_vals_i = self.diffusivity_derivs_function(
self.mesh_pts.squeeze(), sample, i)
forcing_deriv_vals_i = self.forcing_derivs_function(
self.mesh_pts.squeeze(), sample, i)
residual_deriv = self.compute_residual_derivative(
self.fwd_solution, diffusivity_deriv_vals_i,
forcing_deriv_vals_i)
gradient[i] = self.integrate(residual_deriv * adj_solution)
return gradient
def value(self, sample):
assert sample.ndim == 1
solution = self.run(sample)
qoi = self.qoi_functional(solution)
if np.isscalar(qoi) or qoi.ndim == 0:
qoi = np.array([qoi])
return qoi
def integrate(self, mesh_values, order=None):
if order is None:
order = self.order
# Get Gauss-Legendre rule
gl_pts, gl_wts = gauss_jacobi_pts_wts_1D(order, 0, 0)
# Scale points from [-1,1] to to physical domain
x_range = self.xlim[1]-self.xlim[0]
gl_pts = x_range*(gl_pts+1.)/2.+self.xlim[0]
# Remove factor of 0.5 from weights
gl_wts *= x_range
# Interpolate mesh values onto quadrature nodes
gl_vals = self.interpolate(mesh_values, gl_pts)
# Compute and return integral
return np.dot(gl_vals[:, 0], gl_wts)
def interpolate(self, mesh_values, eval_samples):
if eval_samples.ndim == 1:
eval_samples = eval_samples[None, :]
if mesh_values.ndim == 1:
mesh_values = mesh_values[:, None]
assert mesh_values.ndim == 2
num_dims = eval_samples.shape[0]
abscissa_1d = [self.mesh_pts_1d]*num_dims
weights_1d = [compute_barycentric_weights_1d(xx) for xx in abscissa_1d]
interp_vals = multivariate_barycentric_lagrange_interpolation(
eval_samples,
abscissa_1d,
weights_1d,
mesh_values,
np.arange(num_dims))
return interp_vals
def plot(self, mesh_values, num_plot_pts_1d=None, plot_mesh_coords=None,
color='k'):
import pylab
if num_plot_pts_1d is not None:
# interpolate values onto plot points
plot_mesh = np.linspace(
self.xlim[0], self.xlim[1], num_plot_pts_1d)
interp_vals = self.interpolate(mesh_values, plot_mesh)
pylab.plot(plot_mesh, interp_vals, color+'-')
elif plot_mesh_coords is not None:
assert mesh_values.shape[0] == plot_mesh_coords.squeeze().shape[0]
pylab.plot(plot_mesh_coords, mesh_values, 'o-'+color)
else:
# just plot values on mesh points
pylab.plot(self.mesh_pts, mesh_values, color)
def get_collocation_points(self):
return np.atleast_2d(self.mesh_pts)
def get_derivative_matrix(self):
return self.derivative_matrix
def __call__(self, samples):
return evaluate_1darray_function_on_2d_array(
self.value, samples, None)
class SteadyStateDiffusionEquation2D(SteadyStateDiffusionEquation1D):
"""
solve (a(x)*u_x)_x = f; x in [0,1]x[0,1];
subject to u(0,:)=a(x); u(:,0)=b(x), u(1,:)=c(x), u(:,1)=d(x)
"""
def __init__(self):
self.diffusivity = None
self.forcing_function = None
self.bndry_cond = [0., 0., 0., 0.]
self.xlim = [0, 1]
self.ylim = [0, 1]
self.left_bc, self.right_bc = None, None
self.top_bc, self.bottom_bc = None, None
# default qoi functional is integral of solution over entire domain
self.qoi_functional = self.integrate
self.qoi_functional_deriv = lambda x: x*0.+1.
def determine_boundary_indices(self):
# boundary edges are stored with the following order,
# left, right, bottom, top
self.boundary_edges = [[], [], [], []]
self.boundary_indices = np.empty((4*self.order), int)
# To avoid double counting the bottom and upper boundaries
# will not include the edge indices
cntr = 0
for i in range(self.mesh_pts.shape[1]):
if (self.mesh_pts[0, i] == self.xlim[0]):
self.boundary_indices[cntr] = i
self.boundary_edges[0].append(cntr)
cntr += 1
elif (self.mesh_pts[0, i] == self.xlim[1]):
self.boundary_indices[cntr] = i
self.boundary_edges[1].append(cntr)
cntr += 1
elif (self.mesh_pts[1, i] == self.ylim[0]):
self.boundary_indices[cntr] = i
self.boundary_edges[2].append(cntr)
cntr += 1
elif (self.mesh_pts[1, i] == self.ylim[1]):
self.boundary_indices[cntr] = i
self.boundary_edges[3].append(cntr)
cntr += 1
def initialize(self, order, bndry_cond=None, lims=None):
# 1d model transforms mesh pts 1d from are on [-1,1] to [a,b]
# I will asssume that second physical dimension is also [a,b]
super(SteadyStateDiffusionEquation2D, self).initialize(order,
bndry_cond[:2],
lims[:2])
self.ylim = lims[2:]
self.bndry_cond = bndry_cond
self.order = order
self.mesh_pts_1d = self.mesh_pts
self.mesh_pts = cartesian_product([self.mesh_pts_1d]*2, 1)
# note scaling of self.derivative_matrix to [a,b] happens at base class
self.determine_boundary_indices()
# form derivative (in x1-direction) matrix of a 2d polynomial
# this assumes that 2d-mesh_pts varies in x1 faster than x2,
# e.g. points are
# [[x11,x21],[x12,x21],[x13,x12],[x11,x22],[x12,x22],...]
Ident = np.eye(self.order+1)
derivative_matrix_1d = self.get_derivative_matrix()
self.derivative_matrix_1 = np.kron(Ident, derivative_matrix_1d)
# form derivative (in x2-direction) matrix of a 2d polynomial
self.derivative_matrix_2 = np.kron(derivative_matrix_1d, Ident)
def form_collocation_matrix(self, derivative_matrix, diagonal):
scaled_matrix_1 = np.empty(self.derivative_matrix_1.shape)
scaled_matrix_2 = np.empty(self.derivative_matrix_2.shape)
for i in range(scaled_matrix_1.shape[0]):
scaled_matrix_1[i, :] = self.derivative_matrix_1[i, :]*diagonal[i]
scaled_matrix_2[i, :] = self.derivative_matrix_2[i, :]*diagonal[i]
matrix_1 = np.dot(self.derivative_matrix_1, scaled_matrix_1)
matrix_2 = np.dot(self.derivative_matrix_2, scaled_matrix_2)
return matrix_1 + matrix_2
def apply_boundary_conditions_to_matrix(self, matrix):
# apply default homogeenous zero value direchlet conditions if
# necessary
if self.left_bc is None:
self.left_bc = lambda x: 0.
if self.right_bc is None:
self.right_bc = lambda x: 0.
if self.bottom_bc is None:
self.bottom_bc = lambda x: 0.
if self.top_bc is None:
self.top_bc = lambda x: 0.
# adjust collocation matrix
matrix[self.boundary_indices, :] = 0.
for i in range(self.boundary_indices.shape[0]):
index = self.boundary_indices[i]
matrix[index, index] = 1.
return matrix
def apply_boundary_conditions_to_rhs(self, forcing):
# apply left boundary condition
indices = self.boundary_indices[self.boundary_edges[0]]
forcing[indices] = self.left_bc(self.mesh_pts[0, indices])
# apply right boundary condition
indices = self.boundary_indices[self.boundary_edges[1]]
forcing[indices] = self.right_bc(self.mesh_pts[0, indices])
# apply bottom boundary condition
indices = self.boundary_indices[self.boundary_edges[2]]
forcing[indices] = self.bottom_bc(self.mesh_pts[1, indices])
# apply top boundary condition
indices = self.boundary_indices[self.boundary_edges[3]]
forcing[indices] = self.top_bc(self.mesh_pts[1, indices])
return forcing
def plot(self, mesh_values, num_plot_pts_1d=100):
if num_plot_pts_1d is not None:
# interpolate values onto plot points
def func(x): return self.interpolate(mesh_values, x)
from utilities.visualisation import plot_surface_from_function
plot_surface_from_function(func, [self.xlim[0], self.xlim[1],
self.ylim[0], self.ylim[1]],
num_plot_pts_1d, False)
def apply_adjoint_boundary_conditions_to_rhs(self, qoi_deriv):
# adjoint always has zero Dirichlet BC
# apply left boundary condition
for ii in range(4):
indices = self.boundary_indices[self.boundary_edges[ii]]
qoi_deriv[indices] = 0
return qoi_deriv
def integrate(self, mesh_values, order=None):
if order is None:
order = self.order
# Get Gauss-Legendre rule
gl_pts, gl_wts = gauss_jacobi_pts_wts_1D(order, 0, 0)
pts_1d, wts_1d = [], []
lims = self.xlim+self.ylim
for ii in range(2):
# Scale points from [-1,1] to to physical domain
x_range = lims[2*ii+1]-lims[2*ii]
# Remove factor of 0.5 from weights and shift to [a,b]
wts_1d.append(gl_wts*x_range)
pts_1d.append(x_range*(gl_pts+1.)/2.+lims[2*ii])
# Interpolate mesh values onto quadrature nodes
pts = cartesian_product(pts_1d)
wts = outer_product(wts_1d)
gl_vals = self.interpolate(mesh_values, pts)
# Compute and return integral
return np.dot(gl_vals[:, 0], wts)
|
[
"numpy.empty",
"pyapprox.utilities.qr_solve",
"numpy.ones",
"numpy.linalg.cond",
"pyapprox.barycentric_interpolation.compute_barycentric_weights_1d",
"numpy.arange",
"pyapprox.utilities.cartesian_product",
"numpy.linalg.solve",
"numpy.atleast_2d",
"pylab.plot",
"numpy.kron",
"numpy.linspace",
"utilities.visualisation.plot_surface_from_function",
"scipy.linalg.qr",
"numpy.dot",
"pyapprox.univariate_polynomials.quadrature.gauss_jacobi_pts_wts_1D",
"pyapprox.models.wrappers.evaluate_1darray_function_on_2d_array",
"numpy.isscalar",
"numpy.any",
"numpy.array",
"inspect.getargspec",
"numpy.eye",
"pyapprox.utilities.outer_product"
] |
[((807, 858), 'numpy.empty', 'np.empty', (['(matrix_num_rows, matrix_num_rows)', 'float'], {}), '((matrix_num_rows, matrix_num_rows), float)\n', (815, 858), True, 'import numpy as np\n'), ((1327, 1347), 'numpy.array', 'np.array', (['[1]', 'float'], {}), '([1], float)\n', (1335, 1347), True, 'import numpy as np\n'), ((1376, 1396), 'numpy.array', 'np.array', (['[0]', 'float'], {}), '([0], float)\n', (1384, 1396), True, 'import numpy as np\n'), ((1541, 1566), 'numpy.ones', 'np.ones', (['(order + 1)', 'float'], {}), '(order + 1, float)\n', (1548, 1566), True, 'import numpy as np\n'), ((1682, 1721), 'numpy.empty', 'np.empty', (['(order + 1, order + 1)', 'float'], {}), '((order + 1, order + 1), float)\n', (1690, 1721), True, 'import numpy as np\n'), ((4251, 4284), 'numpy.empty', 'np.empty', (['derivative_matrix.shape'], {}), '(derivative_matrix.shape)\n', (4259, 4284), True, 'import numpy as np\n'), ((4422, 4462), 'numpy.dot', 'np.dot', (['derivative_matrix', 'scaled_matrix'], {}), '(derivative_matrix, scaled_matrix)\n', (4428, 4462), True, 'import numpy as np\n'), ((6117, 6140), 'numpy.eye', 'np.eye', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (6123, 6140), True, 'import numpy as np\n'), ((6199, 6247), 'numpy.dot', 'np.dot', (['(identity + 8.0 * dt12matrix)', 'current_sol'], {}), '(identity + 8.0 * dt12matrix, current_sol)\n', (6205, 6247), True, 'import numpy as np\n'), ((6338, 6366), 'numpy.dot', 'np.dot', (['dt12matrix', 'prev_sol'], {}), '(dt12matrix, prev_sol)\n', (6344, 6366), True, 'import numpy as np\n'), ((7538, 7561), 'numpy.eye', 'np.eye', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (7544, 7561), True, 'import numpy as np\n'), ((7951, 7975), 'scipy.linalg.qr', 'qr_factorization', (['matrix'], {}), '(matrix)\n', (7967, 7975), True, 'from scipy.linalg import qr as qr_factorization\n'), ((11685, 11717), 'numpy.linalg.solve', 'np.linalg.solve', (['matrix', 'forcing'], {}), '(matrix, forcing)\n', (11700, 11717), True, 'import numpy as np\n'), ((13926, 13960), 'numpy.linalg.solve', 'np.linalg.solve', (['matrix', 'qoi_deriv'], {}), '(matrix, qoi_deriv)\n', (13941, 13960), True, 'import numpy as np\n'), ((16789, 16820), 'numpy.empty', 'np.empty', (['num_stoch_dims', 'float'], {}), '(num_stoch_dims, float)\n', (16797, 16820), True, 'import numpy as np\n'), ((17771, 17807), 'pyapprox.univariate_polynomials.quadrature.gauss_jacobi_pts_wts_1D', 'gauss_jacobi_pts_wts_1D', (['order', '(0)', '(0)'], {}), '(order, 0, 0)\n', (17794, 17807), False, 'from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D\n'), ((18197, 18226), 'numpy.dot', 'np.dot', (['gl_vals[:, 0]', 'gl_wts'], {}), '(gl_vals[:, 0], gl_wts)\n', (18203, 18226), True, 'import numpy as np\n'), ((19684, 19712), 'numpy.atleast_2d', 'np.atleast_2d', (['self.mesh_pts'], {}), '(self.mesh_pts)\n', (19697, 19712), True, 'import numpy as np\n'), ((19838, 19902), 'pyapprox.models.wrappers.evaluate_1darray_function_on_2d_array', 'evaluate_1darray_function_on_2d_array', (['self.value', 'samples', 'None'], {}), '(self.value, samples, None)\n', (19875, 19902), False, 'from pyapprox.models.wrappers import evaluate_1darray_function_on_2d_array\n'), ((20802, 20831), 'numpy.empty', 'np.empty', (['(4 * self.order)', 'int'], {}), '(4 * self.order, int)\n', (20810, 20831), True, 'import numpy as np\n'), ((22316, 22360), 'pyapprox.utilities.cartesian_product', 'cartesian_product', (['([self.mesh_pts_1d] * 2)', '(1)'], {}), '([self.mesh_pts_1d] * 2, 1)\n', (22333, 22360), False, 'from pyapprox.utilities import cartesian_product, outer_product\n'), ((22729, 22751), 'numpy.eye', 'np.eye', (['(self.order + 1)'], {}), '(self.order + 1)\n', (22735, 22751), True, 'import numpy as np\n'), ((22845, 22881), 'numpy.kron', 'np.kron', (['Ident', 'derivative_matrix_1d'], {}), '(Ident, derivative_matrix_1d)\n', (22852, 22881), True, 'import numpy as np\n'), ((22987, 23023), 'numpy.kron', 'np.kron', (['derivative_matrix_1d', 'Ident'], {}), '(derivative_matrix_1d, Ident)\n', (22994, 23023), True, 'import numpy as np\n'), ((23119, 23159), 'numpy.empty', 'np.empty', (['self.derivative_matrix_1.shape'], {}), '(self.derivative_matrix_1.shape)\n', (23127, 23159), True, 'import numpy as np\n'), ((23186, 23226), 'numpy.empty', 'np.empty', (['self.derivative_matrix_2.shape'], {}), '(self.derivative_matrix_2.shape)\n', (23194, 23226), True, 'import numpy as np\n'), ((23454, 23503), 'numpy.dot', 'np.dot', (['self.derivative_matrix_1', 'scaled_matrix_1'], {}), '(self.derivative_matrix_1, scaled_matrix_1)\n', (23460, 23503), True, 'import numpy as np\n'), ((23523, 23572), 'numpy.dot', 'np.dot', (['self.derivative_matrix_2', 'scaled_matrix_2'], {}), '(self.derivative_matrix_2, scaled_matrix_2)\n', (23529, 23572), True, 'import numpy as np\n'), ((26044, 26080), 'pyapprox.univariate_polynomials.quadrature.gauss_jacobi_pts_wts_1D', 'gauss_jacobi_pts_wts_1D', (['order', '(0)', '(0)'], {}), '(order, 0, 0)\n', (26067, 26080), False, 'from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D\n'), ((26523, 26548), 'pyapprox.utilities.cartesian_product', 'cartesian_product', (['pts_1d'], {}), '(pts_1d)\n', (26540, 26548), False, 'from pyapprox.utilities import cartesian_product, outer_product\n'), ((26563, 26584), 'pyapprox.utilities.outer_product', 'outer_product', (['wts_1d'], {}), '(wts_1d)\n', (26576, 26584), False, 'from pyapprox.utilities import cartesian_product, outer_product\n'), ((26691, 26717), 'numpy.dot', 'np.dot', (['gl_vals[:, 0]', 'wts'], {}), '(gl_vals[:, 0], wts)\n', (26697, 26717), True, 'import numpy as np\n'), ((9745, 9787), 'numpy.empty', 'np.empty', (['self.num_stored_timesteps', 'float'], {}), '(self.num_stored_timesteps, float)\n', (9753, 9787), True, 'import numpy as np\n'), ((9809, 9880), 'numpy.empty', 'np.empty', (['(self.initial_sol.shape[0], self.num_stored_timesteps)', 'float'], {}), '((self.initial_sol.shape[0], self.num_stored_timesteps), float)\n', (9817, 9880), True, 'import numpy as np\n'), ((11005, 11033), 'numpy.linalg.solve', 'np.linalg.solve', (['matrix', 'rhs'], {}), '(matrix, rhs)\n', (11020, 11033), True, 'import numpy as np\n'), ((11416, 11442), 'numpy.any', 'np.any', (['(diffusivity <= 0.0)'], {}), '(diffusivity <= 0.0)\n', (11422, 11442), True, 'import numpy as np\n'), ((14339, 14363), 'numpy.dot', 'np.dot', (['matrix', 'solution'], {}), '(matrix, solution)\n', (14345, 14363), True, 'import numpy as np\n'), ((15154, 15178), 'numpy.dot', 'np.dot', (['matrix', 'solution'], {}), '(matrix, solution)\n', (15160, 15178), True, 'import numpy as np\n'), ((16371, 16418), 'numpy.linalg.cond', 'np.linalg.cond', (['self.adjoint_collocation_matrix'], {}), '(self.adjoint_collocation_matrix)\n', (16385, 16418), True, 'import numpy as np\n'), ((17516, 17532), 'numpy.isscalar', 'np.isscalar', (['qoi'], {}), '(qoi)\n', (17527, 17532), True, 'import numpy as np\n'), ((17569, 17584), 'numpy.array', 'np.array', (['[qoi]'], {}), '([qoi])\n', (17577, 17584), True, 'import numpy as np\n'), ((18597, 18631), 'pyapprox.barycentric_interpolation.compute_barycentric_weights_1d', 'compute_barycentric_weights_1d', (['xx'], {}), '(xx)\n', (18627, 18631), False, 'from pyapprox.barycentric_interpolation import compute_barycentric_weights_1d, multivariate_barycentric_lagrange_interpolation\n'), ((18838, 18857), 'numpy.arange', 'np.arange', (['num_dims'], {}), '(num_dims)\n', (18847, 18857), True, 'import numpy as np\n'), ((19124, 19180), 'numpy.linspace', 'np.linspace', (['self.xlim[0]', 'self.xlim[1]', 'num_plot_pts_1d'], {}), '(self.xlim[0], self.xlim[1], num_plot_pts_1d)\n', (19135, 19180), True, 'import numpy as np\n'), ((19277, 19324), 'pylab.plot', 'pylab.plot', (['plot_mesh', 'interp_vals', "(color + '-')"], {}), "(plot_mesh, interp_vals, color + '-')\n", (19287, 19324), False, 'import pylab\n'), ((25365, 25483), 'utilities.visualisation.plot_surface_from_function', 'plot_surface_from_function', (['func', '[self.xlim[0], self.xlim[1], self.ylim[0], self.ylim[1]]', 'num_plot_pts_1d', '(False)'], {}), '(func, [self.xlim[0], self.xlim[1], self.ylim[0],\n self.ylim[1]], num_plot_pts_1d, False)\n', (25391, 25483), False, 'from utilities.visualisation import plot_surface_from_function\n'), ((1490, 1524), 'numpy.linspace', 'np.linspace', (['(0.0)', 'np.pi', '(order + 1)'], {}), '(0.0, np.pi, order + 1)\n', (1501, 1524), True, 'import numpy as np\n'), ((6888, 6928), 'numpy.eye', 'np.eye', (['self.collocation_matrix.shape[0]'], {}), '(self.collocation_matrix.shape[0])\n', (6894, 6928), True, 'import numpy as np\n'), ((6951, 7038), 'numpy.dot', 'np.dot', (['(identity + 0.5 * self.time_step_size * self.collocation_matrix)', 'current_sol'], {}), '(identity + 0.5 * self.time_step_size * self.collocation_matrix,\n current_sol)\n', (6957, 7038), True, 'import numpy as np\n'), ((8466, 8558), 'pyapprox.utilities.qr_solve', 'qr_solve', (['self.implicit_matrix_factors[0]', 'self.implicit_matrix_factors[1]', 'rhs[:, None]'], {}), '(self.implicit_matrix_factors[0], self.implicit_matrix_factors[1],\n rhs[:, None])\n', (8474, 8558), False, 'from pyapprox.utilities import qr_solve\n'), ((19458, 19513), 'pylab.plot', 'pylab.plot', (['plot_mesh_coords', 'mesh_values', "('o-' + color)"], {}), "(plot_mesh_coords, mesh_values, 'o-' + color)\n", (19468, 19513), False, 'import pylab\n'), ((19584, 19629), 'pylab.plot', 'pylab.plot', (['self.mesh_pts', 'mesh_values', 'color'], {}), '(self.mesh_pts, mesh_values, color)\n', (19594, 19629), False, 'import pylab\n'), ((3938, 3962), 'inspect.getargspec', 'inspect.getargspec', (['func'], {}), '(func)\n', (3956, 3962), False, 'import inspect\n'), ((4087, 4111), 'inspect.getargspec', 'inspect.getargspec', (['func'], {}), '(func)\n', (4105, 4111), False, 'import inspect\n'), ((8111, 8145), 'numpy.dot', 'np.dot', (['self.collocation_matrix', 'u'], {}), '(self.collocation_matrix, u)\n', (8117, 8145), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
from sklearn.decomposition import TruncatedSVD
"""
打个比方说一张女人图片,我们如何判定这个女人是不是美女呢。我们会看比较关键的一些特征,比如说脸好不好看,胸好不好看,屁股怎么样,腿怎么样,至于衣服上是某个花纹还是手臂上有一个小痔还是,这些特征我们都是不关心的,就可以过滤掉。我们关心的是主成分,也就是对结果贡献系数较大的特征。SVD算法的作用就是来告诉你哪些特征是重要的,有多重要,哪些特征是不重要的,是可以忽略的。
接下来我们使用sklearn提供的TruncatedSVD模块来对美女图片进行压缩。
首先我们使用matplotlib显示一张美女png图片,png图片的格式非常简单,每一个像素有三个维度的颜色值RGB,整个图片就是一个「height x width x 3」维的矩阵。
"""
# 加载png数据矩阵
img_array = img.imread('test2.png')
shape = img_array.shape
print(shape)
# 高度、宽度、RGB通道数=3
height, width, channels = shape[0], shape[1], shape[2]
# 转换成numpy array
img_matrix = np.array(img_array)
# 存储RGB三个通道转换后的数据
planes = []
# RGB三个通道分别处理
for idx in range(channels):
# 提取通道
plane = img_matrix[:, :, idx]
# 转成二维矩阵
plane = np.reshape(plane, (height, width))
# 保留10个主成分
svd = TruncatedSVD(n_components=10)
# 拟合数据,进行矩阵分解,生成特征空间,剔去无关紧要的成分
svd.fit(plane)
# 将输入数据转换到特征空间
new_plane = svd.transform(plane)
# 再将特征空间的数据转换会数据空间
plane = svd.inverse_transform(new_plane)
# 存起来
planes.append(plane)
# 合并三个通道平面数据
img_matrix = np.dstack(planes)
# 显示处理后的图像
plt.imshow(img_matrix)
plt.show()
|
[
"numpy.dstack",
"matplotlib.image.imread",
"matplotlib.pyplot.show",
"sklearn.decomposition.TruncatedSVD",
"matplotlib.pyplot.imshow",
"numpy.array",
"numpy.reshape"
] |
[((511, 534), 'matplotlib.image.imread', 'img.imread', (['"""test2.png"""'], {}), "('test2.png')\n", (521, 534), True, 'import matplotlib.image as img\n'), ((676, 695), 'numpy.array', 'np.array', (['img_array'], {}), '(img_array)\n', (684, 695), True, 'import numpy as np\n'), ((1170, 1187), 'numpy.dstack', 'np.dstack', (['planes'], {}), '(planes)\n', (1179, 1187), True, 'import numpy as np\n'), ((1199, 1221), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_matrix'], {}), '(img_matrix)\n', (1209, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1231, 1233), True, 'import matplotlib.pyplot as plt\n'), ((840, 874), 'numpy.reshape', 'np.reshape', (['plane', '(height, width)'], {}), '(plane, (height, width))\n', (850, 874), True, 'import numpy as np\n'), ((900, 929), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(10)'}), '(n_components=10)\n', (912, 929), False, 'from sklearn.decomposition import TruncatedSVD\n')]
|
from scipy.stats import uniform
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.stats import exponweib
def calculate_parameters(interarrivals):
sample = np.array(interarrivals)
x = np.linspace(0, 1 - 1 / sample.shape[0], sample.shape[0])
x = x[sample > 0]
sample = sample[sample > 0]
sample = sample[x > 0]
x = x[x > 0]
m, c = minimize(lambda t: np.mean((np.log(sample) - (t[0] * np.log(-np.log(1 - x)) + t[1])) ** 2), [1, 0]).x
return 1 / m, np.exp(-c - m)
def main():
step = 4
interarrivals = exponweib(size=10000)
print(calculate_parameters(interarrivals))
hours = []
hour = []
params = []
time = 0
last_time = 0
for arrival in interarrivals:
if time + arrival > last_time + 1000 * 60 * 60 * step:
params.append(calculate_parameters(hour))
hours.append(hour)
hour = []
last_time = time = last_time + 1000 * 60 * 60 * step
time = time + arrival
hour.append(arrival)
fig, ax1 = plt.subplots()
ax2 = plt.twinx()
ax1.plot([p[0] for p in params])
ax2.plot([p[1] for p in params], color='orange')
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.twinx",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"scipy.stats.exponweib",
"matplotlib.pyplot.subplots"
] |
[((211, 234), 'numpy.array', 'np.array', (['interarrivals'], {}), '(interarrivals)\n', (219, 234), True, 'import numpy as np\n'), ((243, 299), 'numpy.linspace', 'np.linspace', (['(0)', '(1 - 1 / sample.shape[0])', 'sample.shape[0]'], {}), '(0, 1 - 1 / sample.shape[0], sample.shape[0])\n', (254, 299), True, 'import numpy as np\n'), ((591, 612), 'scipy.stats.exponweib', 'exponweib', ([], {'size': '(10000)'}), '(size=10000)\n', (600, 612), False, 'from scipy.stats import exponweib\n'), ((1084, 1098), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1096, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1121), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (1119, 1121), True, 'import matplotlib.pyplot as plt\n'), ((1219, 1229), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1227, 1229), True, 'import matplotlib.pyplot as plt\n'), ((529, 543), 'numpy.exp', 'np.exp', (['(-c - m)'], {}), '(-c - m)\n', (535, 543), True, 'import numpy as np\n'), ((437, 451), 'numpy.log', 'np.log', (['sample'], {}), '(sample)\n', (443, 451), True, 'import numpy as np\n'), ((470, 483), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (476, 483), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import time
# In[2]:
def createGraph(depotNodes ,requiredEdges, numNodes, show=True):
G = nx.Graph()
edges = []
pos = {}
reqPos = {}
s = [1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 7]
t = [2, 3, 4, 6, 4, 5, 5, 7, 6, 8, 7, 8]
weights = [2.3, 2, 3, 1.5, 3.2, 2.2, 3.8, 2.6, 2.2, 2.8, 1.8, 0.8]
xData = [-2, -0.5, -1, 0, 1, 1.5, 2, 2.5];
yData = [ 0, -2, 2.5, 0, 3, -2, 0.3, 1.5];
for i in range(len(s)):
edges.append((s[i], t[i], weights[i]))
for i in range(1, numNodes+1):
G.add_node(i)
pos[i] =(xData[i-1], yData[i-1])
node_color = ['y']*int(G.number_of_nodes())
depot_node_color = node_color
for i in range(1, len(node_color)+1):
if i in depotNodes:
depot_node_color[i-1] = 'g'
G.add_weighted_edges_from(edges)
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx(G,pos, node_color = node_color)
nx.draw_networkx(G,pos, node_color = depot_node_color)
nx.draw_networkx_edges(G, pos, edgelist=requiredEdges, width=3, alpha=0.5,
edge_color="r")
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
if show:
plt.figure(1)
plt.show()
return G,pos, depot_node_color
# In[3]:
# Allocating task based on distance between base station and desired edge and UAV availability
def taskAllocation(G, depotNodes, requiredNodes, numrequiredEdges, uavsInDepotNodes):
depotNodesCost = np.zeros((len(depotNodes), numrequiredEdges))
depotPath = []
bestPathTillDesiredEdge = []
bestCostTillDesiredEdge = []
for j in range(numrequiredEdges):
for i in range(len(depotNodes)):
c1 = nx.dijkstra_path_length(G, source=depotNodes[i], target=requiredNodes[j][0])
c2 = nx.dijkstra_path_length(G, source=depotNodes[i], target=requiredNodes[j][1])
l = []
if c1 <= c2:
l = nx.dijkstra_path(G, source=depotNodes[i], target=requiredNodes[j][0])
l.append(requiredNodes[j][1])
else:
l = nx.dijkstra_path(G, source=depotNodes[i], target=requiredNodes[j][1])
l.append(requiredNodes[j][0])
depotNodesCost[i,j] = min(c1,c2)
depotNodesCost[i,j] += G.get_edge_data(requiredNodes[j][0], requiredNodes[j][1])['weight']
depotPath.append(l)
if uavsInDepotNodes[np.argmin(depotNodesCost[:,j])] > 0:
uavsInDepotNodes[np.argmin(depotNodesCost[:,j])] -= 1
else:
depotNodesCost[np.argmin(depotNodesCost[:,j]),j] = np.inf
depotPath = np.transpose(np.array(depotPath, dtype=object).reshape((len(depotNodes), numrequiredEdges)))
taskAllocatedtoBaseStations = []
print("Task Allocation Algorithm Output: ")
for i in range(numrequiredEdges):
taskAllocatedtoBaseStations.append(np.argmin(depotNodesCost[:,i]))
bestCostTillDesiredEdge.append(depotNodesCost[taskAllocatedtoBaseStations[i],i])
bestPathTillDesiredEdge.append(depotPath[taskAllocatedtoBaseStations[i],i])
print('Allocating arc ' + str(requiredNodes[i][0]) + ' - ' + str(requiredNodes[i][1]) + ' to base station - node ' + str(depotNodes[taskAllocatedtoBaseStations[i]]))
return bestPathTillDesiredEdge, bestCostTillDesiredEdge
# In[4]:
def pathScanningAlgorithm(G, numrequiredEdges,depotNodes, bestPathTillDesiredEdge, bestCostTillDesiredEdge, vehicleCapacity):
bestRoute = []
bestRouteCost = []
minCost = np.inf
for j in range(numrequiredEdges):
minCost = np.inf
l = []
for i in range(len(depotNodes)):
c1 = nx.dijkstra_path_length(G, source=bestPathTillDesiredEdge[j][-1], target=depotNodes[i])
if c1 <= minCost:
l = nx.dijkstra_path(G, source=bestPathTillDesiredEdge[j][-1], target=depotNodes[i])[1:]
minCost = c1
bestRoute.append(bestPathTillDesiredEdge[j] + l)
bestRouteCost.append(bestCostTillDesiredEdge[j] + minCost)
if bestRouteCost[j] > vehicleCapacity:
bestRoute[j] = None
bestRouteCost[j] = np.inf
print("Path Scanning Algorithm Output: ")
return bestRoute, bestRouteCost
# In[5]:
def visualizePath(depotNodes, requiredNodes, numNodes, path, pathType="solution"):
plt.figure(1)
for j in range(len(path)):
if path[j] != None:
# plt.figure(j+1)
G, pos, depot_node_color = createGraph(depotNodes, requiredNodes , numNodes, show=False)
G1 = nx.DiGraph()
pos1 = {}
node_color = []
edges = []
for i in range(len(path[j])-1):
edges.append((path[j][i], path[j][i+1], G.get_edge_data(path[j][i], path[j][i+1])['weight']))
pos1[path[j][i]] = pos[path[j][i]]
if i == len(path[j])-2:
pos1[path[j][i+1]] = pos[path[j][i+1]]
for key in pos1.keys():
node_color.append(depot_node_color[key-1])
G1.add_weighted_edges_from(edges)
nx.draw_networkx(G1,pos1, arrows=True, node_color = node_color, edge_color='b', arrowsize=12, width=1, arrowstyle='simple')
if pathType == "solution":
plt.legend(["Solution Path"], loc ="upper left")
else:
plt.legend(["Path"], loc ="upper left")
plt.show()
# In[6]:
def main():
# Initializing Parameters
vehicleCapacity = 14
numNodes = 8
requiredNodes = [[2, 4], [6, 7]];
uavsInDepotNodes = [0, 2];
totalUavs = sum(uavsInDepotNodes);
numrequiredEdges = 2;
depotNodes = [1, 5];
taskAllocatedtoBaseStations = [];
start = time.time()
G,pos, depot_node_color = createGraph(depotNodes, requiredNodes, numNodes, show=False)
bestPathTillDesiredEdge, bestCostTillDesiredEdge = taskAllocation(G, depotNodes, requiredNodes, numrequiredEdges, uavsInDepotNodes)
visualizePath(depotNodes, requiredNodes, numNodes, bestPathTillDesiredEdge, pathType="normal")
bestRoute, bestRouteCost = pathScanningAlgorithm(G, numrequiredEdges, depotNodes, bestPathTillDesiredEdge, bestCostTillDesiredEdge, vehicleCapacity)
visualizePath(depotNodes, requiredNodes, numNodes, bestRoute)
end = time.time()
print("Execution took "+ str(end-start) + " seconds.")
if __name__ == "__main__":
# execute only if run as a script
main()
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"matplotlib.pyplot.show",
"networkx.draw_networkx_edges",
"networkx.dijkstra_path_length",
"matplotlib.pyplot.legend",
"networkx.get_edge_attributes",
"networkx.draw_networkx",
"time.time",
"numpy.argmin",
"networkx.dijkstra_path",
"matplotlib.pyplot.figure",
"networkx.Graph",
"numpy.array",
"networkx.draw_networkx_edge_labels",
"networkx.DiGraph"
] |
[((221, 231), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (229, 231), True, 'import networkx as nx\n'), ((979, 1014), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['G', '"""weight"""'], {}), "(G, 'weight')\n", (1001, 1014), True, 'import networkx as nx\n'), ((1018, 1065), 'networkx.draw_networkx', 'nx.draw_networkx', (['G', 'pos'], {'node_color': 'node_color'}), '(G, pos, node_color=node_color)\n', (1034, 1065), True, 'import networkx as nx\n'), ((1071, 1124), 'networkx.draw_networkx', 'nx.draw_networkx', (['G', 'pos'], {'node_color': 'depot_node_color'}), '(G, pos, node_color=depot_node_color)\n', (1087, 1124), True, 'import networkx as nx\n'), ((1130, 1224), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'edgelist': 'requiredEdges', 'width': '(3)', 'alpha': '(0.5)', 'edge_color': '"""r"""'}), "(G, pos, edgelist=requiredEdges, width=3, alpha=0.5,\n edge_color='r')\n", (1152, 1224), True, 'import networkx as nx\n'), ((1265, 1321), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['G', 'pos'], {'edge_labels': 'labels'}), '(G, pos, edge_labels=labels)\n', (1293, 1321), True, 'import networkx as nx\n'), ((4508, 4521), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4518, 4521), True, 'import matplotlib.pyplot as plt\n'), ((5905, 5916), 'time.time', 'time.time', ([], {}), '()\n', (5914, 5916), False, 'import time\n'), ((6472, 6483), 'time.time', 'time.time', ([], {}), '()\n', (6481, 6483), False, 'import time\n'), ((1343, 1356), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1353, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1375), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1373, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1930), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['G'], {'source': 'depotNodes[i]', 'target': 'requiredNodes[j][0]'}), '(G, source=depotNodes[i], target=requiredNodes[j][0])\n', (1877, 1930), True, 'import networkx as nx\n'), ((1948, 2024), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['G'], {'source': 'depotNodes[i]', 'target': 'requiredNodes[j][1]'}), '(G, source=depotNodes[i], target=requiredNodes[j][1])\n', (1971, 2024), True, 'import networkx as nx\n'), ((3051, 3082), 'numpy.argmin', 'np.argmin', (['depotNodesCost[:, i]'], {}), '(depotNodesCost[:, i])\n', (3060, 3082), True, 'import numpy as np\n'), ((3833, 3925), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['G'], {'source': 'bestPathTillDesiredEdge[j][-1]', 'target': 'depotNodes[i]'}), '(G, source=bestPathTillDesiredEdge[j][-1], target=\n depotNodes[i])\n', (3856, 3925), True, 'import networkx as nx\n'), ((4729, 4741), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4739, 4741), True, 'import networkx as nx\n'), ((5274, 5401), 'networkx.draw_networkx', 'nx.draw_networkx', (['G1', 'pos1'], {'arrows': '(True)', 'node_color': 'node_color', 'edge_color': '"""b"""', 'arrowsize': '(12)', 'width': '(1)', 'arrowstyle': '"""simple"""'}), "(G1, pos1, arrows=True, node_color=node_color, edge_color=\n 'b', arrowsize=12, width=1, arrowstyle='simple')\n", (5290, 5401), True, 'import networkx as nx\n'), ((5588, 5598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5596, 5598), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2158), 'networkx.dijkstra_path', 'nx.dijkstra_path', (['G'], {'source': 'depotNodes[i]', 'target': 'requiredNodes[j][0]'}), '(G, source=depotNodes[i], target=requiredNodes[j][0])\n', (2105, 2158), True, 'import networkx as nx\n'), ((2243, 2312), 'networkx.dijkstra_path', 'nx.dijkstra_path', (['G'], {'source': 'depotNodes[i]', 'target': 'requiredNodes[j][1]'}), '(G, source=depotNodes[i], target=requiredNodes[j][1])\n', (2259, 2312), True, 'import networkx as nx\n'), ((2580, 2611), 'numpy.argmin', 'np.argmin', (['depotNodesCost[:, j]'], {}), '(depotNodesCost[:, j])\n', (2589, 2611), True, 'import numpy as np\n'), ((2650, 2681), 'numpy.argmin', 'np.argmin', (['depotNodesCost[:, j]'], {}), '(depotNodesCost[:, j])\n', (2659, 2681), True, 'import numpy as np\n'), ((2805, 2838), 'numpy.array', 'np.array', (['depotPath'], {'dtype': 'object'}), '(depotPath, dtype=object)\n', (2813, 2838), True, 'import numpy as np\n'), ((5453, 5500), 'matplotlib.pyplot.legend', 'plt.legend', (["['Solution Path']"], {'loc': '"""upper left"""'}), "(['Solution Path'], loc='upper left')\n", (5463, 5500), True, 'import matplotlib.pyplot as plt\n'), ((5536, 5574), 'matplotlib.pyplot.legend', 'plt.legend', (["['Path']"], {'loc': '"""upper left"""'}), "(['Path'], loc='upper left')\n", (5546, 5574), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2759), 'numpy.argmin', 'np.argmin', (['depotNodesCost[:, j]'], {}), '(depotNodesCost[:, j])\n', (2737, 2759), True, 'import numpy as np\n'), ((3971, 4056), 'networkx.dijkstra_path', 'nx.dijkstra_path', (['G'], {'source': 'bestPathTillDesiredEdge[j][-1]', 'target': 'depotNodes[i]'}), '(G, source=bestPathTillDesiredEdge[j][-1], target=depotNodes[i]\n )\n', (3987, 4056), True, 'import networkx as nx\n')]
|
import numpy as np
import pandas as pd
import xarray as xr
from shape import BBox
from .mot import Mot
class BboxMot(Mot):
def __init__(self, **kwargs):
"""
Ground truth stored in xarray.Dataset with frame and id coordinates (frames are 0-indexed).
Example:
<xarray.Dataset>
Dimensions: (frame: 5928, id: 5)
Coordinates:
* frame (frame) int64 0 1 2 3 4 5 6 ... 5922 5923 5924 5925 5926 5927
* id (id) int64 1 2 3 4 5
Data variables:
x (frame, id) float64 434.5 277.7 179.2 180.0 ... nan nan nan nan
y (frame, id) float64 279.0 293.6 407.9 430.0 ... nan nan nan nan
width (frame, id) float64 nan nan nan nan nan ... nan nan nan nan nan
height (frame, id) float64 nan nan nan nan nan ... nan nan nan nan nan
confidence (frame, id) float64 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0
"""
super(Mot, self).__init__(**kwargs)
def init_blank(self, frames, ids):
"""
Initialize blank ground truth.
:param frames: list of frames
:param ids: list of identities
"""
self.ds = xr.Dataset(
data_vars={
"x": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"y": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"width": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"height": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"confidence": (
["frame", "id"],
np.nan * np.ones((len(frames), len(ids))),
),
},
coords={"frame": frames, "id": ids},
)
def load(self, filename):
"""
Load Multiple Object Tacking Challenge trajectories file.
Format described in https://arxiv.org/abs/1603.00831, section 3.3 Data Format
Loads trajectories into a DataFrame, columns frame and id start with 1 (MATLAB indexing).
:param filename: mot filename_or_buffer or buffer
"""
df = pd.read_csv(
filename,
index_col=["frame", "id"],
names=["frame", "id", "x", "y", "width", "height", "confidence"],
converters={
"frame": lambda x: int(x) - 1,
"id": lambda x: int(x) - 1,
},
)
df[df == -1] = np.nan
ds = df.to_xarray()
# ensure that all frames are in the Dataset
self.init_blank(list(range(ds.frame.min(), ds.frame.max())), ds.id)
self.ds = ds.merge(self.ds)
def save(self, filename, make_backup=False):
import datetime
import os
if make_backup and os.path.exists(filename):
dt = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.rename(filename, filename[:-4] + "_" + dt + ".txt")
df = self.ds.to_dataframe().reset_index()
df[df.isna()] = -1
df["frame"] += 1
df["id"] += 1
df.to_csv(filename, index=False, header=False)
def get_bboxes(self, frame):
"""
Get GT bounding boxes in a frame.
The returned BBoxes include obj_id attribute.
:param frame: frame number
:return: list of bounding boxes (BBox)
"""
bboxes = []
for obj_id, obj in self.get_positions_dataframe(frame).iterrows():
if not (
np.isnan(obj.x)
or np.isnan(obj.y)
or np.isnan(obj.width)
or np.isnan(obj.height)
):
bbox = BBox.from_xywh(obj.x, obj.y, obj.width, obj.height, frame)
bbox.obj_id = obj_id
bboxes.append(bbox)
return bboxes
def get_object_distance(self, frame, obj_id, other):
"""
TODO bbox iou
:param frame:
:param obj_id:
:param other:
:return:
"""
assert False, "not implemented"
def draw_frame(self, img, frame, mapping=None):
"""
Draw objects on an image.
:param img: ndarray
:param frame: frame
:param mapping: mapping of ids, dict
:return: image
"""
if frame in self.ds.frame:
if self.colors is None:
self._init_draw()
if mapping is None:
mapping = dict(list(zip(self.ds.id.data, self.ds.id.data)))
for bbox in self.get_bboxes(frame):
bbox.draw_to_image(img, color=self.colors[mapping[bbox.obj_id]])
return img
|
[
"shape.BBox.from_xywh",
"os.rename",
"os.path.exists",
"numpy.isnan",
"datetime.datetime.now"
] |
[((2848, 2872), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2862, 2872), False, 'import os\n'), ((2957, 3011), 'os.rename', 'os.rename', (['filename', "(filename[:-4] + '_' + dt + '.txt')"], {}), "(filename, filename[:-4] + '_' + dt + '.txt')\n", (2966, 3011), False, 'import os\n'), ((3730, 3788), 'shape.BBox.from_xywh', 'BBox.from_xywh', (['obj.x', 'obj.y', 'obj.width', 'obj.height', 'frame'], {}), '(obj.x, obj.y, obj.width, obj.height, frame)\n', (3744, 3788), False, 'from shape import BBox\n'), ((2891, 2914), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2912, 2914), False, 'import datetime\n'), ((3562, 3577), 'numpy.isnan', 'np.isnan', (['obj.x'], {}), '(obj.x)\n', (3570, 3577), True, 'import numpy as np\n'), ((3597, 3612), 'numpy.isnan', 'np.isnan', (['obj.y'], {}), '(obj.y)\n', (3605, 3612), True, 'import numpy as np\n'), ((3632, 3651), 'numpy.isnan', 'np.isnan', (['obj.width'], {}), '(obj.width)\n', (3640, 3651), True, 'import numpy as np\n'), ((3671, 3691), 'numpy.isnan', 'np.isnan', (['obj.height'], {}), '(obj.height)\n', (3679, 3691), True, 'import numpy as np\n')]
|
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
import os
import numpy as num
from anuga.file.netcdf import NetCDFFile
import pylab as P
import anuga
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.shallow_water.boundaries import Reflective_boundary
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.shallow_water.forcing import *
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.file.sww import Write_sww
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
def sts2sww_mesh(basename_in, basename_out=None,
spatial_thinning=1, verbose=False):
from anuga.mesh_engine.mesh_engine import NoTrianglesError
from anuga.pmesh.mesh import Mesh
if verbose:
print("Starting sts2sww_mesh")
mean_stage=0.
zscale=1.
if (basename_in[:-4]=='.sts'):
stsname = basename_in
else:
stsname = basename_in + '.sts'
if verbose: print("Reading sts NetCDF file: %s" %stsname)
infile = NetCDFFile(stsname, netcdf_mode_r)
cellsize = infile.cellsize
ncols = infile.ncols
nrows = infile.nrows
no_data = infile.no_data
refzone = infile.zone
x_origin = infile.xllcorner
y_origin = infile.yllcorner
origin = num.array([x_origin, y_origin])
x = infile.variables['x'][:]
y = infile.variables['y'][:]
times = infile.variables['time'][:]
wind_speed_full = infile.variables['wind_speed'][:]
wind_angle_full = infile.variables['wind_angle'][:]
pressure_full = infile.variables['barometric_pressure'][:]
infile.close()
number_of_points = nrows*ncols
points_utm = num.zeros((number_of_points,2),num.float)
points_utm[:,0]=x+x_origin
points_utm[:,1]=y+y_origin
thinned_indices=[]
for i in range(number_of_points):
if (old_div(i,ncols)==0 or old_div(i,ncols)==ncols-1 or (old_div(i,ncols))%(spatial_thinning)==0):
if ( i%(spatial_thinning)==0 or i%nrows==0 or i%nrows==nrows-1 ):
thinned_indices.append(i)
#Spatial thinning
points_utm=points_utm[thinned_indices]
number_of_points = points_utm.shape[0]
number_of_timesteps = wind_speed_full.shape[0]
wind_speed = num.empty((number_of_timesteps,number_of_points),dtype=float)
wind_angle = num.empty((number_of_timesteps,number_of_points),dtype=float)
barometric_pressure = num.empty((number_of_timesteps,number_of_points),dtype=float)
if verbose:
print("Total number of points: ", nrows*ncols)
print("Number of thinned points: ", number_of_points)
for i in range(number_of_timesteps):
wind_speed[i] = wind_speed_full[i,thinned_indices]
wind_angle[i] = wind_angle_full[i,thinned_indices]
barometric_pressure[i] = pressure_full[i,thinned_indices]
#P.plot(points_utm[:,0],points_utm[:,1],'ro')
#P.show()
if verbose:
print("Generating sww triangulation of gems data")
mesh = Mesh()
mesh.add_vertices(points_utm)
mesh.auto_segment(smooth_indents=True, expand_pinch=True)
mesh.auto_segment(mesh.shape.get_alpha() * 1.1)
try:
mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
except NoTrianglesError:
# This is a bit of a hack, going in and changing the data structure.
mesh.holes = []
mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
mesh_dic = mesh.Mesh2MeshList()
points_utm=ensure_numeric(points_utm)
assert num.alltrue(ensure_numeric(mesh_dic['generatedpointlist'])
== ensure_numeric(points_utm))
volumes = mesh_dic['generatedtrianglelist']
# Write sww intro and grid stuff.
if (basename_out is not None and basename_out[:-4]=='.sww'):
swwname = basename_out
else:
swwname = basename_in + '.sww'
if verbose: 'Output to %s' % swwname
if verbose:
print("Writing sww wind and pressure field file")
outfile = NetCDFFile(swwname, netcdf_mode_w)
sww = Write_sww([], ['wind_speed','wind_angle','barometric_pressure'])
sww.store_header(outfile, times, len(volumes), len(points_utm),
verbose=verbose, sww_precision='d')
outfile.mean_stage = mean_stage
outfile.zscale = zscale
sww.store_triangulation(outfile, points_utm, volumes,
refzone,
new_origin=origin, #check effect of this line
verbose=verbose)
if verbose:
print('Converting quantities')
# Read in a time slice from the sts file and write it to the SWW file
#print wind_angle[0,:10]
for i in range(len(times)):
sww.store_quantities(outfile,
slice_index=i,
verbose=verbose,
wind_speed=wind_speed[i,:],
wind_angle=wind_angle[i,:],
barometric_pressure=barometric_pressure[i,:],
sww_precision=num.float)
if verbose:
sww.verbose_quantities(outfile)
outfile.close()
|
[
"builtins.range",
"anuga.utilities.numerical_tools.ensure_numeric",
"past.utils.old_div",
"numpy.empty",
"anuga.file.sww.Write_sww",
"numpy.zeros",
"numpy.array",
"anuga.pmesh.mesh.Mesh",
"anuga.file.netcdf.NetCDFFile"
] |
[((1240, 1274), 'anuga.file.netcdf.NetCDFFile', 'NetCDFFile', (['stsname', 'netcdf_mode_r'], {}), '(stsname, netcdf_mode_r)\n', (1250, 1274), False, 'from anuga.file.netcdf import NetCDFFile\n'), ((1488, 1519), 'numpy.array', 'num.array', (['[x_origin, y_origin]'], {}), '([x_origin, y_origin])\n', (1497, 1519), True, 'import numpy as num\n'), ((1877, 1920), 'numpy.zeros', 'num.zeros', (['(number_of_points, 2)', 'num.float'], {}), '((number_of_points, 2), num.float)\n', (1886, 1920), True, 'import numpy as num\n'), ((2018, 2041), 'builtins.range', 'range', (['number_of_points'], {}), '(number_of_points)\n', (2023, 2041), False, 'from builtins import range\n'), ((2449, 2512), 'numpy.empty', 'num.empty', (['(number_of_timesteps, number_of_points)'], {'dtype': 'float'}), '((number_of_timesteps, number_of_points), dtype=float)\n', (2458, 2512), True, 'import numpy as num\n'), ((2528, 2591), 'numpy.empty', 'num.empty', (['(number_of_timesteps, number_of_points)'], {'dtype': 'float'}), '((number_of_timesteps, number_of_points), dtype=float)\n', (2537, 2591), True, 'import numpy as num\n'), ((2618, 2681), 'numpy.empty', 'num.empty', (['(number_of_timesteps, number_of_points)'], {'dtype': 'float'}), '((number_of_timesteps, number_of_points), dtype=float)\n', (2627, 2681), True, 'import numpy as num\n'), ((2826, 2852), 'builtins.range', 'range', (['number_of_timesteps'], {}), '(number_of_timesteps)\n', (2831, 2852), False, 'from builtins import range\n'), ((3193, 3199), 'anuga.pmesh.mesh.Mesh', 'Mesh', ([], {}), '()\n', (3197, 3199), False, 'from anuga.pmesh.mesh import Mesh\n'), ((3680, 3706), 'anuga.utilities.numerical_tools.ensure_numeric', 'ensure_numeric', (['points_utm'], {}), '(points_utm)\n', (3694, 3706), False, 'from anuga.utilities.numerical_tools import ensure_numeric\n'), ((4197, 4231), 'anuga.file.netcdf.NetCDFFile', 'NetCDFFile', (['swwname', 'netcdf_mode_w'], {}), '(swwname, netcdf_mode_w)\n', (4207, 4231), False, 'from anuga.file.netcdf import NetCDFFile\n'), ((4242, 4308), 'anuga.file.sww.Write_sww', 'Write_sww', (['[]', "['wind_speed', 'wind_angle', 'barometric_pressure']"], {}), "([], ['wind_speed', 'wind_angle', 'barometric_pressure'])\n", (4251, 4308), False, 'from anuga.file.sww import Write_sww\n'), ((3730, 3776), 'anuga.utilities.numerical_tools.ensure_numeric', 'ensure_numeric', (["mesh_dic['generatedpointlist']"], {}), "(mesh_dic['generatedpointlist'])\n", (3744, 3776), False, 'from anuga.utilities.numerical_tools import ensure_numeric\n'), ((3803, 3829), 'anuga.utilities.numerical_tools.ensure_numeric', 'ensure_numeric', (['points_utm'], {}), '(points_utm)\n', (3817, 3829), False, 'from anuga.utilities.numerical_tools import ensure_numeric\n'), ((2055, 2072), 'past.utils.old_div', 'old_div', (['i', 'ncols'], {}), '(i, ncols)\n', (2062, 2072), False, 'from past.utils import old_div\n'), ((2078, 2095), 'past.utils.old_div', 'old_div', (['i', 'ncols'], {}), '(i, ncols)\n', (2085, 2095), False, 'from past.utils import old_div\n'), ((2108, 2125), 'past.utils.old_div', 'old_div', (['i', 'ncols'], {}), '(i, ncols)\n', (2115, 2125), False, 'from past.utils import old_div\n')]
|
import pathlib
import pandas as pd
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import matplotlib.pyplot as plt
import numpy as np
import logging
import plotly.graph_objects as go
import plotly.io as pio
from pprint import pprint as pprint
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
pio.templates.default = "plotly_white"
dataset = {
"Cifar10": "--cifar10",
"Cifar100": "--cifar100",
# "SVHN": "--svhn",
"MNIST": "--mnist"
}
models_data = {
"Cifar10": ["--cifar10-vgg19"],
# "Cifar100": ["--cifar100-resnet20", "--cifar100-resnet50"],
"Cifar100": ["--cifar100-vgg19", "--cifar100-resnet20", "--cifar100-resnet50"],
"SVHN": ["--svhn-vgg19"],
"MNIST":["--mnist-lenet"],
}
color_bars_sparsity = {
2: "g",
3: "c",
4: "b",
5: "y"
}
tasks = {
"nb-param-compressed-total",
"finetuned-score",
"param-compression-rate-total"
}
ylabel_task = {
"nb-param-compressed-total": "log(# non-zero value)",
"finetuned-score": "Accuracy",
"param-compression-rate-total": "Compression Rate"
}
scale_tasks = {
"nb-param-compressed-total": "log",
"finetuned-score": "linear",
"param-compression-rate-total": "linear"
}
def get_palm_results():
results_path = "2020/03/9_10_finetune_palminized_no_useless"
results_path_2 = "2020/04/9_10_finetune_palminized_no_useless"
src_results_path = root_source_dir / results_path / "results.csv"
src_results_path_2 = root_source_dir / results_path_2 / "results.csv"
df = pd.read_csv(src_results_path, header=0)
df_2 = pd.read_csv(src_results_path_2, header=0)
df = pd.concat([df, df_2])
df = df.fillna("None")
df = df.drop(columns=["Unnamed: 0", "idx-expe"]).drop_duplicates()
df = df[df["keep-last-layer"] == 0]
df = df[df["use-clr"] == 1]
df = df.assign(**{"only-dense": False, "keep-first-layer": False})
return df
def get_faust_results():
results_path = "2020/05/3_4_finetune_faust_no_hierarchical_only_cifar_mnist"
src_results_path = root_source_dir / results_path / "results.csv"
df = pd.read_csv(src_results_path, header=0)
df = df.fillna("None")
df = df[df["hierarchical"] == False]
df = df.drop(columns=["Unnamed: 0", "idx-expe"]).drop_duplicates()
df = df[df["keep-last-layer"] == 0]
df = df.assign(**{"only-dense": False, "keep-first-layer": False})
return df
def get_tucker_results():
results_path_tucker = "2020/04/0_1_compression_tucker_tensortrain"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
df_tucker_tt = df_tucker_tt.assign(**{"only-dense": False, "use-pretrained": False})
df_tucker_tt = df_tucker_tt[df_tucker_tt["compression"] == "tucker"]
return df_tucker_tt
def get_tensortrain_results():
results_path_tucker = "2020/05/2_3_compression_tensortrain"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt[df_tucker_tt["use-pretrained"] == True]
# df_tucker_tt = df_tucker_tt[df_tucker_tt["only-dense"] == False]
return df_tucker_tt
def get_tucker_tensortrain_only_denseresults():
results_path_tucker = "2020/05/2_3_compression_tucker_tensortrain_only_dense"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt[df_tucker_tt["use-pretrained"] == True]
# df_tucker_tt = df_tucker_tt[df_tucker_tt["only-dense"] == True]
return df_tucker_tt
def get_palm_results_only_dense_keep_first():
results_path = "2020/05/5_6_finetune_sparse_facto_no_hierarchical_keep_first_layer_only_dense"
src_results_path = root_source_dir / results_path / "results.csv"
df = pd.read_csv(src_results_path, header=0)
df = df.fillna("None")
df = df.drop(columns=["Unnamed: 0", "idx-expe"]).drop_duplicates()
# df = df[df["only-dense"] == False]
return df
def get_deepfried_results():
results_path_tucker = "2020/05/5_6_compression_baselines"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt.assign(**{"only-dense": True, "use-pretrained": False})
df_tucker_tt = df_tucker_tt[df_tucker_tt["compression"] == "deepfried"]
return df_tucker_tt
def get_magnitude_results():
results_path_tucker = "2020/05/5_6_compression_baselines"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt.assign(**{"only-dense": True, "use-pretrained": False})
df_tucker_tt = df_tucker_tt[df_tucker_tt["compression"] == "magnitude"]
return df_tucker_tt
if __name__ == "__main__":
root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/processed")
SHOW_FAUST = False
SHOW_KEEP_FIRST_ONLY = False
SHOW_PRETRAINED_ONLY = True
results_path = "2020/05/5_6_finetune_sparse_facto_perf_vs_param"
df_tucker = get_tucker_results()
df_tt = get_tensortrain_results()
df_deepfried = get_deepfried_results()
df_tucker_tt_only_dense = get_tucker_tensortrain_only_denseresults()
df_magnitude = get_magnitude_results()
df_tucker_tt_deepfried = pd.concat([df_tucker, df_tt, df_tucker_tt_only_dense, df_deepfried, df_magnitude])
df_palm = get_palm_results()
df_palm_bis = get_palm_results_only_dense_keep_first()
df_palm = pd.concat([df_palm, df_palm_bis])
# ONLY_DENSE = False
# df_tucker_tt = df_tucker_tt[df_tucker_tt["only-dense"] == ONLY_DENSE]
# df_palm = df_palm[df_palm["only-dense"] == ONLY_DENSE]
root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/reports/figures/")
output_dir = root_output_dir / results_path / "histogrammes"
output_dir.mkdir(parents=True, exist_ok=True)
# sparsity_factors = sorted(set(df_palminized["--sparsity-factor"]))
# nb_factors = set(df_palm["nb-factor"].values)
hue_by_sparsity= {
2: 10,
3: 60,
4: 110,
5: 180
}
saturation_by_perm = {
1: 50,
0: 75
}
saturation_by_hier = {
1: 50,
0: 75
}
lum_by_clr = {
1: 20,
0: 30
}
lum_by_keep = {
1: 40,
0: 50
}
dct_symbol = {
"FAUST Q=2": "square",
"FAUST Q=3": "diamond",
"FAUST Q=None": "square-x",
"FAUST Q=None H": "star-square",
"PYQALM Q=2": "square-open",
"PYQALM Q=3": "diamond-open",
"PYQALM Q=None": "hash-open",
"PYQALM Q=None H": "star-square-open",
"PYQALM Q=2 -1": "square-open-dot",
"PYQALM Q=3 -1": "diamond-open-dot",
"PYQALM Q=None -1": "hash-open-dot",
"PYQALM Q=None H -1": "star-square-open-dot",
"PYQALM Q=2 -1 M": "square",
"PYQALM Q=3 -1 M": "diamond",
"PYQALM Q=None -1 M": "hash",
"PYQALM Q=None H -1 M": "star-square",
"Base": "x",
"Tucker": "circle",
"Tucker -1": "circle-dot",
"TT": "triangle-up",
"TT -1": "triangle-up-dot",
"TT -1 pretrained": "triangle-up-open-dot",
"Deepfried": "hexagram",
"Magnitude ": "square",
"Magnitude -1": "square",
}
dct_colors = {
"PALM K=2": "dodgerblue",
"PALM K=3": "darkorchid",
"PALM K=4": "green",
"PALM K=6": "aqua",
"PALM K=8": "cadetblue",
"TT R=2": "orange",
"TT R=6": "gold",
"TT R=10": "red",
"TT R=12": "darkred",
"TT R=14": "indianred",
"Base": "grey",
"Tucker": "pink",
"Tucker + Low Rank 10%": "orange",
"Tucker + Low Rank 20%": "gold",
"Tucker + Low Rank 30%": "red",
"Deepfried": "blueviolet",
"Magnitude 50%": "red",
"Magnitude 70%": "red",
"Magnitude 90%": "red",
}
SIZE_MARKERS = 15
WIDTH_MARKER_LINES = 2
datasets = set(df_palm["dataset"].values)
dct_table = dict()
for dataname in datasets:
dct_table[dataname] = dict()
df_data_palm = df_palm[df_palm["dataset"] == dataname]
df_tucker_tt_data = df_tucker_tt_deepfried[df_tucker_tt_deepfried["dataset"] == dataname]
df_model_values = set(df_data_palm["model"].values)
for modelname in df_model_values:
dct_table[dataname][modelname] = dict()
df_model_palm = df_data_palm[df_data_palm["model"] == modelname]
df_tucker_tt_model = df_tucker_tt_data[df_tucker_tt_data["model"] == modelname]
for ONLY_DENSE in [True, False]:
df_tucker_tt_model_dense = df_tucker_tt_model[df_tucker_tt_model["only-dense"] == ONLY_DENSE]
df_model_palm_dense = df_model_palm[df_model_palm["only-dense"] == ONLY_DENSE]
if ONLY_DENSE:
str_nb_param_compressed = "nb-param-compressed-dense"
str_nb_param_base = "nb-param-base-dense"
str_only_dense = " only dense"
else:
str_nb_param_compressed = "nb-param-compressed-total"
str_nb_param_base = "nb-param-base-total"
str_only_dense = ""
dct_entry_only_dense = "Dense" if ONLY_DENSE else "Conv+Dense"
dct_table[dataname][modelname][dct_entry_only_dense] = list()
fig = go.Figure()
base_score = None
base_nb_param = None
palm_algo = "PYQALM"
for idx_row, row in df_model_palm_dense.iterrows():
hierarchical_value = row["hierarchical"]
str_hierarchical = ' H' if hierarchical_value is True else ''
try:
nb_factor = int(row["nb-factor"])
except:
nb_factor = None
sparsity_factor = int(row["sparsity-factor"])
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
only_mask = row["only-mask"]
str_only_mask = " M" if only_mask is True else ""
name_trace = f"{palm_algo} Q={nb_factor} K={sparsity_factor}{str_hierarchical}{str_keep_first}{str_only_mask}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
base_score_tmp = row["base-model-score"]
assert base_score == base_score_tmp or base_score is None
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None
base_score = base_score_tmp
base_nb_param = base_nb_param_tmp
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"{palm_algo} K={sparsity_factor}{str_only_mask}",
marker=dict(
color=dct_colors[f"PALM K={sparsity_factor}"],
symbol=dct_symbol[f"{palm_algo} Q={nb_factor}{str_hierarchical}{str_keep_first}{str_only_mask}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
dct_row = dict()
dct_row["method"] = "Base"
dct_row["perf"] = base_score
dct_row["nb_param"] = base_nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
#############
# base data #
#############
fig.add_trace(
go.Scatter(
x=[base_nb_param],
y=[base_score],
mode='markers',
name="Base",
hovertext="Base",
legendgroup=f"Base",
marker=dict(
color=dct_colors[f"Base"],
symbol=dct_symbol[f"Base"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES,
)
)
))
###############
# tucker data #
###############
df_tucker = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "tucker"]
for idx_row, row in df_tucker.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
try:
rank_percentage = int(float(row["rank-percentage-dense"]) * 100)
except:
try:
rank_percentage = int(float(row["rank-percentage"]) * 100)
except:
rank_percentage = None
str_percentage = f' + Low Rank {rank_percentage}%' if rank_percentage is not None else ''
name_trace = f"Tucker{str_keep_first}{str_percentage}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
base_score_tmp = row["base-model-score"]
assert base_score == base_score_tmp or base_score is None
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None or base_nb_param_tmp == 0, f"{base_nb_param}!={base_nb_param_tmp}"
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"Tucker{str_percentage}",
marker=dict(
color=dct_colors[f"Tucker{str_percentage}"],
symbol=dct_symbol[f"Tucker{str_keep_first}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
###############
# magnitude data #
###############
df_magnitude = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "magnitude"]
for idx_row, row in df_magnitude.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
# try:
sparsity_percentage = int(float(row["final-sparsity"]) * 100)
# except:
# try:
# rank_percentage = int(float(row["rank-percentage"]) * 100)
# except:
# rank_percentage = None
str_percentage = f' {sparsity_percentage}%' #if sparsity_percentage is not None else ''
name_trace = f"Magnitude {str_keep_first}{str_percentage}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
print(finetuned_score)
base_score_tmp = row["base-model-score"]
assert np.isclose(base_score, base_score_tmp) or base_score is None, f"{base_score}!={base_score_tmp}"
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None or base_nb_param_tmp == 0, f"{base_nb_param}!={base_nb_param_tmp}"
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"Magnitude",
marker=dict(
color=dct_colors[f"Magnitude {str_percentage}"],
symbol=dct_symbol[f"Magnitude {str_keep_first}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
###############
# deepfried data #
###############
df_deepfried = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "deepfried"]
for idx_row, row in df_deepfried.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
# try:
# sparsity_percentage = int(float(row["final-sparsity"]) * 100)
# except:
# try:
# rank_percentage = int(float(row["rank-percentage"]) * 100)
# except:
# rank_percentage = None
# str_percentage = f' {sparsity_percentage}%' #if sparsity_percentage is not None else ''
name_trace = f"Deepfried {str_keep_first}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
if nb_param == 0:
conv_nb_weights = row["nb-param-base-total"] - base_nb_param
nb_param = row["nb-param-compressed-total"] - conv_nb_weights
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
print(finetuned_score)
base_score_tmp = row["base-model-score"]
assert np.isclose(base_score, base_score_tmp) or base_score is None, f"{base_score}!={base_score_tmp}"
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None or base_nb_param_tmp == 0, f"{base_nb_param}!={base_nb_param_tmp}"
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"Deepfried",
marker=dict(
color=dct_colors[f"Deepfried"],
symbol=dct_symbol[f"Deepfried"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
####################
# tensortrain data #
####################
df_tt = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "tensortrain"]
for idx_row, row in df_tt.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
order = int(row["order"])
rank_value = int(row["rank-value"])
if not np.isnan(row["use-pretrained"]):
use_petrained = bool(row["use-pretrained"])
str_pretrained = " pretrained" if use_petrained else ""
else:
use_petrained = False
str_pretrained = ""
if SHOW_PRETRAINED_ONLY and not use_petrained and not ONLY_DENSE:
continue
name_trace = f"Tensortrain{str_keep_first} K={order} R={rank_value}{str_pretrained}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
base_score_tmp = row["base-model-score"]
assert base_score == base_score_tmp or base_score is None
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"TT R={rank_value}",
marker=dict(
color=dct_colors[f"TT R={rank_value}"],
symbol=dct_symbol[f"TT{str_keep_first}{str_pretrained}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
title = "Performance = f(# Param); " + dataname + " " + modelname + str_only_dense
fig.update_layout(title=title,
xaxis_title="# Parameter in Dense and Conv Layers",
yaxis_title="Accuracy (%)",
xaxis_type="log",
)
fig.show()
fig.write_image(str((output_dir / title).absolute()) + ".png")
pprint(dct_table)
# string_table = """
# \begin{tabular}{lcccccccccccccccccccccc}
# \toprule
#
# {} & \multicolumn{2}{c}{ \thead{ Ensemble } } & \multicolumn{2}{c}{ \thead{ Kmeans } } & \multicolumn{2}{c}{ \thead{ NN-OMP\\w/o weights } } & \multicolumn{2}{c}{ \thead{ NN-OMP } } & \multicolumn{2}{c}{ \thead{ OMP\\w/o weights } } & \multicolumn{2}{c}{ \thead{ OMP } } & \multicolumn{2}{c}{ \thead{ Random } } & \multicolumn{2}{c}{ \thead{ Zhang\\Predictions } } & \multicolumn{2}{c}{ \thead{ Zhang\\Similarities } }\\
# \midrule
# Diam. & 3.032E+05 & 86 & \underline{3.024E+05} & \underline{143} & \textbf{3.024E+05} & \textbf{86} & 3.033E+05 & 86 & 3.025E+05 & 143 & \textit{3.087E+05} & \textit{29} & 3.025E+05 & 114 & 3.047E+05 & 143 & 3.032E+05 & 143\\
# Diab. & 3.431E+03 & 32 & \underline{3.281E+03} & \underline{36} & 3.317E+03 & 36 & 3.549E+03 & 36 & 3.324E+03 & 36 & \textit{3.607E+03} & \textit{25} & 3.303E+03 & 32 & 3.282E+03 & 36 & \textbf{3.241E+03} & \textbf{32}\\
# Kin. & 1.892E-02 & 200 & \textit{2.024E-02} & \textit{33} & 1.921E-02 & 133 & \underline{1.809E-02} & \underline{133} & 1.931E-02 & 67 & \textbf{1.776E-02} & \textbf{333} & 2.002E-02 & 333 & 2.089E-02 & 333 & 2.017E-02 & 333\\
# <NAME>. & \underline{2.187E-01} & \underline{267} & \textit{2.449E-01} & \textit{33} & 2.239E-01 & 100 & \textbf{2.180E-01} & \textbf{133} & \textit{2.267E-01} & \textit{33} & 2.197E-01 & 133 & 2.390E-01 & 333 & 2.536E-01 & 333 & 2.452E-01 & 333\\
# Bos. & 1.267E+01 & 30 & \textit{1.278E+01} & \textit{13} & \textbf{1.214E+01} & \textbf{33} & 1.253E+01 & 33 & \underline{1.247E+01} & \underline{27} & \textit{1.293E+01} & \textit{13} & 1.253E+01 & 33 & 1.430E+01 & 33 & 1.283E+01 & 33\\
# \midrule
# Sp. B. & 94.27\% & 133 & 95.52\% & 167 & \textit{95.57\%} & \textit{100} & \underline{\textit{95.59\%}} & \underline{\textit{100}} & 95.56\% & 167 & 95.39\% & 133 & \textbf{95.59\%} & \textbf{167} & 95.45\% & 333 & 95.46\% & 167\\
# St. P. & 98.69\% & 233 & 99.05\% & 267 & \underline{\textit{99.95\%}} & \underline{\textit{67}} & \textbf{99.95\%} & \textbf{100} & \textit{99.64\%} & \textit{67} & 99.90\% & 333 & \textit{99.41\%} & \textit{67} & 99.43\% & 167 & 98.92\% & 300\\
# KR-KP & \textit{98.22\%} & \textit{33} & 99.00\% & 333 & \underline{99.42\%} & \underline{100} & 99.39\% & 100 & 99.22\% & 100 & \textbf{99.48\%} & \textbf{100} & 99.14\% & 267 & 99.14\% & 133 & 98.94\% & 333\\
# B. C. & 95.09\% & 100 & \textbf{\textit{96.58\%}} & \textbf{\textit{33}} & \underline{96.49\%} & \underline{67} & \textbf{96.58\%} & \textbf{67} & 95.79\% & 133 & 95.35\% & 67 & 95.88\% & 300 & \textit{95.70\%} & \textit{33} & 95.61\% & 333\\
# LFW P. & \textit{56.00\%} & \textit{67} & 65.25\% & 333 & \textbf{66.02\%} & \textbf{333} & 65.73\% & 233 & 65.32\% & 133 & 65.55\% & 167 & \underline{65.98\%} & \underline{267} & 65.43\% & 333 & 65.27\% & 333\\
# Gam. & \textit{80.78\%} & \textit{3} & 87.68\% & 33 & \underline{87.75\%} & \underline{33} & \underline{87.75\%} & \underline{33} & \underline{87.75\%} & \underline{33} & \underline{87.75\%} & \underline{33} & \textbf{87.76\%} & \textbf{33} & 87.72\% & 33 & 87.68\% & 33\\
#
# \bottomrule
# \end{tabular}
# """
tab_headers = [
"Dataset",
"Architecture",
"Compressed layers",
"Method",
"Performance",
"# Parameters"
]
str_table = """\\begin{{tabular}}{{cccccc}}
\\toprule
{}
\\bottomrule
\end{{tabular}}
"""
lst_lines_tabular = ["&".join(tab_headers)]
for dataname in dct_table:
for model in dct_table[dataname]:
for layers in dct_table[dataname][model]:
if layers != "Conv+Dense":
continue
for lin in dct_table[dataname][model][layers]:
if "PYQALM Q=None" in str(lin["method"]):
continue
lst_line = [dataname, model, layers]
lst_line.append(str(lin["method"]))
lst_line.append("{:.2f}".format(lin["perf"]))
lst_line.append(str(int(lin["nb_param"])))
str_line = "&".join(lst_line).replace("%", "\%").replace("#", "\#")
lst_lines_tabular.append(str_line)
final_string = str_table.format("\\\\ \n".join(lst_lines_tabular) + "\\\\")
with open(str((output_dir / "table.tex").absolute()), 'w') as wf:
wf.write(final_string)
print(final_string)
|
[
"pandas.read_csv",
"plotly.graph_objects.Figure",
"numpy.isnan",
"pathlib.Path",
"numpy.isclose",
"pprint.pprint",
"pandas.concat",
"logging.getLogger"
] |
[((285, 316), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (302, 316), False, 'import logging\n'), ((1573, 1612), 'pandas.read_csv', 'pd.read_csv', (['src_results_path'], {'header': '(0)'}), '(src_results_path, header=0)\n', (1584, 1612), True, 'import pandas as pd\n'), ((1624, 1665), 'pandas.read_csv', 'pd.read_csv', (['src_results_path_2'], {'header': '(0)'}), '(src_results_path_2, header=0)\n', (1635, 1665), True, 'import pandas as pd\n'), ((1675, 1696), 'pandas.concat', 'pd.concat', (['[df, df_2]'], {}), '([df, df_2])\n', (1684, 1696), True, 'import pandas as pd\n'), ((2143, 2182), 'pandas.read_csv', 'pd.read_csv', (['src_results_path'], {'header': '(0)'}), '(src_results_path, header=0)\n', (2154, 2182), True, 'import pandas as pd\n'), ((2650, 2696), 'pandas.read_csv', 'pd.read_csv', (['src_results_path_tucker'], {'header': '(0)'}), '(src_results_path_tucker, header=0)\n', (2661, 2696), True, 'import pandas as pd\n'), ((3132, 3178), 'pandas.read_csv', 'pd.read_csv', (['src_results_path_tucker'], {'header': '(0)'}), '(src_results_path_tucker, header=0)\n', (3143, 3178), True, 'import pandas as pd\n'), ((3632, 3678), 'pandas.read_csv', 'pd.read_csv', (['src_results_path_tucker'], {'header': '(0)'}), '(src_results_path_tucker, header=0)\n', (3643, 3678), True, 'import pandas as pd\n'), ((4123, 4162), 'pandas.read_csv', 'pd.read_csv', (['src_results_path'], {'header': '(0)'}), '(src_results_path, header=0)\n', (4134, 4162), True, 'import pandas as pd\n'), ((4514, 4560), 'pandas.read_csv', 'pd.read_csv', (['src_results_path_tucker'], {'header': '(0)'}), '(src_results_path_tucker, header=0)\n', (4525, 4560), True, 'import pandas as pd\n'), ((4996, 5042), 'pandas.read_csv', 'pd.read_csv', (['src_results_path_tucker'], {'header': '(0)'}), '(src_results_path_tucker, header=0)\n', (5007, 5042), True, 'import pandas as pd\n'), ((5332, 5399), 'pathlib.Path', 'pathlib.Path', (['"""/home/luc/PycharmProjects/palmnet/results/processed"""'], {}), "('/home/luc/PycharmProjects/palmnet/results/processed')\n", (5344, 5399), False, 'import pathlib\n'), ((5824, 5910), 'pandas.concat', 'pd.concat', (['[df_tucker, df_tt, df_tucker_tt_only_dense, df_deepfried, df_magnitude]'], {}), '([df_tucker, df_tt, df_tucker_tt_only_dense, df_deepfried,\n df_magnitude])\n', (5833, 5910), True, 'import pandas as pd\n'), ((6014, 6047), 'pandas.concat', 'pd.concat', (['[df_palm, df_palm_bis]'], {}), '([df_palm, df_palm_bis])\n', (6023, 6047), True, 'import pandas as pd\n'), ((6234, 6300), 'pathlib.Path', 'pathlib.Path', (['"""/home/luc/PycharmProjects/palmnet/reports/figures/"""'], {}), "('/home/luc/PycharmProjects/palmnet/reports/figures/')\n", (6246, 6300), False, 'import pathlib\n'), ((25628, 25645), 'pprint.pprint', 'pprint', (['dct_table'], {}), '(dct_table)\n', (25634, 25645), True, 'from pprint import pprint as pprint\n'), ((9998, 10009), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (10007, 10009), True, 'import plotly.graph_objects as go\n'), ((18241, 18279), 'numpy.isclose', 'np.isclose', (['base_score', 'base_score_tmp'], {}), '(base_score, base_score_tmp)\n', (18251, 18279), True, 'import numpy as np\n'), ((21201, 21239), 'numpy.isclose', 'np.isclose', (['base_score', 'base_score_tmp'], {}), '(base_score, base_score_tmp)\n', (21211, 21239), True, 'import numpy as np\n'), ((22987, 23018), 'numpy.isnan', 'np.isnan', (["row['use-pretrained']"], {}), "(row['use-pretrained'])\n", (22995, 23018), True, 'import numpy as np\n')]
|
#****************************************************#
# This file is part of OPTALG. #
# #
# Copyright (c) 2019, <NAME>. #
# #
# OPTALG is released under the BSD 2-clause license. #
#****************************************************#
from __future__ import print_function
import os
import numpy as np
import tempfile
import subprocess
from . import utils
from .opt_solver_error import *
from .opt_solver import OptSolver
from .problem import OptProblem
from multiprocessing import cpu_count
class OptSolverCplexCMD(OptSolver):
parameters = {'quiet' : False,
'mipgap': None,
'feasibility': None,
'debug': False}
def __init__(self):
"""
CPLEX solver interface (via command-line interface).
"""
# Check
if not utils.cmd_exists('cplex'):
raise ImportError('cplex cmd not available')
OptSolver.__init__(self)
self.parameters = OptSolverCplexCMD.parameters.copy()
def supports_properties(self, properties):
for p in properties:
if p not in [OptProblem.PROP_CURV_LINEAR,
OptProblem.PROP_VAR_CONTINUOUS,
OptProblem.PROP_VAR_INTEGER,
OptProblem.PROP_TYPE_FEASIBILITY,
OptProblem.PROP_TYPE_OPTIMIZATION]:
return False
return True
def read_solution(self, filename, problem):
import xml.etree.ElementTree as ET
x = np.zeros(problem.c.size)
lam = np.zeros(problem.A.shape[0])
nu = np.zeros(0)
mu = np.zeros(x.size)
pi = np.zeros(x.size)
tree = ET.parse(filename)
root = tree.getroot()
header = root.find('header')
status = header.get('solutionStatusString')
for var in root.find('variables'):
name = var.get('name')
value = float(var.get('value'))
index = int(name.split('_')[1])
x[index] = value
rcost = var.get('reducedCost')
if rcost is not None:
if float(rcost) > 0.:
pi[index] = float(rcost)
else:
mu[index] = -float(rcost)
for c in root.find('linearConstraints'):
name = c.get('name')
index = int(name.split('_')[1])
dual = c.get('dual')
if dual is not None:
lam[index] = float(dual)
return status, x, lam, nu, mu, pi
def solve(self, problem):
# Local vars
params = self.parameters
# Parameters
quiet = params['quiet']
mipgap = params['mipgap']
feasibility = params['feasibility']
debug = params['debug']
# Problem
try:
self.problem = problem.to_mixintlin()
except:
raise OptSolverError_BadProblemType(self)
# Solve
status = ''
try:
base_name = next(tempfile._get_candidate_names())
input_filename = base_name+'.lp'
output_filename = base_name+'.sol'
self.problem.write_to_lp_file(input_filename)
cmd = ['cplex']
cmd += ['-c', 'read', input_filename]
if mipgap is not None:
cmd += ['set mip tolerances mipgap %.2e' %mipgap]
if feasibility is not None:
cmd += ['set simplex tolerances feasibility %.2e' %feasibility]
cmd += ['optimize']
cmd += ['write', output_filename]
cmd += ['quit']
if not quiet:
code = subprocess.call(cmd)
else:
code = subprocess.call(cmd,
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
assert(code == 0)
status, self.x, self.lam, self.nu, self.mu, self.pi = self.read_solution(output_filename, self.problem)
except Exception as e:
raise OptSolverError_CplexCMDCall(self)
finally:
if os.path.isfile(input_filename) and not debug:
os.remove(input_filename)
if os.path.isfile(output_filename) and not debug:
os.remove(output_filename)
if os.path.isfile('cplex.log') and not debug:
os.remove('cplex.log')
for i in range(cpu_count()):
if os.path.isfile('clone%d.log' %i) and not debug:
os.remove('clone%d.log' %i)
if 'optimal' in status.lower():
self.set_status(self.STATUS_SOLVED)
self.set_error_msg('')
else:
raise OptSolverError_CplexCMD(self)
|
[
"xml.etree.ElementTree.parse",
"tempfile._get_candidate_names",
"os.remove",
"numpy.zeros",
"os.path.isfile",
"subprocess.call",
"multiprocessing.cpu_count"
] |
[((1648, 1672), 'numpy.zeros', 'np.zeros', (['problem.c.size'], {}), '(problem.c.size)\n', (1656, 1672), True, 'import numpy as np\n'), ((1687, 1715), 'numpy.zeros', 'np.zeros', (['problem.A.shape[0]'], {}), '(problem.A.shape[0])\n', (1695, 1715), True, 'import numpy as np\n'), ((1729, 1740), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (1737, 1740), True, 'import numpy as np\n'), ((1754, 1770), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (1762, 1770), True, 'import numpy as np\n'), ((1784, 1800), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (1792, 1800), True, 'import numpy as np\n'), ((1817, 1835), 'xml.etree.ElementTree.parse', 'ET.parse', (['filename'], {}), '(filename)\n', (1825, 1835), True, 'import xml.etree.ElementTree as ET\n'), ((3138, 3169), 'tempfile._get_candidate_names', 'tempfile._get_candidate_names', ([], {}), '()\n', (3167, 3169), False, 'import tempfile\n'), ((3775, 3795), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (3790, 3795), False, 'import subprocess\n'), ((4253, 4283), 'os.path.isfile', 'os.path.isfile', (['input_filename'], {}), '(input_filename)\n', (4267, 4283), False, 'import os\n'), ((4315, 4340), 'os.remove', 'os.remove', (['input_filename'], {}), '(input_filename)\n', (4324, 4340), False, 'import os\n'), ((4356, 4387), 'os.path.isfile', 'os.path.isfile', (['output_filename'], {}), '(output_filename)\n', (4370, 4387), False, 'import os\n'), ((4419, 4445), 'os.remove', 'os.remove', (['output_filename'], {}), '(output_filename)\n', (4428, 4445), False, 'import os\n'), ((4461, 4488), 'os.path.isfile', 'os.path.isfile', (['"""cplex.log"""'], {}), "('cplex.log')\n", (4475, 4488), False, 'import os\n'), ((4520, 4542), 'os.remove', 'os.remove', (['"""cplex.log"""'], {}), "('cplex.log')\n", (4529, 4542), False, 'import os\n'), ((4570, 4581), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4579, 4581), False, 'from multiprocessing import cpu_count\n'), ((4603, 4636), 'os.path.isfile', 'os.path.isfile', (["('clone%d.log' % i)"], {}), "('clone%d.log' % i)\n", (4617, 4636), False, 'import os\n'), ((4671, 4699), 'os.remove', 'os.remove', (["('clone%d.log' % i)"], {}), "('clone%d.log' % i)\n", (4680, 4699), False, 'import os\n')]
|
import copy
import os
from functools import reduce
from pathlib import Path
import chainer
import chainer.functions as F
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import six
from chainer import configuration, cuda, function
from chainer import reporter as reporter_module
from chainer.dataset import convert
from chainer.training.extensions import Evaluator
from chainermn import CommunicatorBase
from sklearn import metrics
from tqdm import tqdm
def _to_list(a):
"""convert value `a` to list
Args:
a: value to be convert to `list`
Returns (list):
"""
if isinstance(a, (int, float)):
return [a, ]
else:
# expected to be list or some iterable class
return a
def plot_roc(y_true, y_score, out_name):
fpr, tpr, thresholds = metrics.roc_curve(y_true=y_true, y_score=y_score)
auc = metrics.auc(fpr, tpr)
plt.clf()
plt.plot(fpr, tpr, label='ROC curve (area = %.3f)' % auc)
plt.legend()
plt.title('ROC curve', fontsize=16)
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.grid(True)
plt.savefig(out_name)
class Classification_Evaluator(Evaluator):
"""Evaluator which calculates auc and correlation
Note that this Evaluator is only applicable to binary classification task.
Args:
iterator: Dataset iterator for the dataset to calculate pearson.
It can also be a dictionary of iterators. If this is just an
iterator, the iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays and true label.
:func:`~chainer.dataset.concat_examples` is used by default.
It is expected to return input arrays of the form
`[x_0, ..., x_n, t]`, where `x_0, ..., x_n` are the inputs to
the evaluation function and `t` is the true label.
device: Device to which the training data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
name (str): name of this extension. When `name` is None,
`default_name='validation'` which is defined in super class
`Evaluator` is used as extension name. This name affects to the
reported key name.
pos_labels (int or list): labels of the positive class, other classes
are considered as negative.
ignore_labels (int or list or None): labels to be ignored.
`None` is used to not ignore all labels.
Attributes:
converter: Converter function.
device: Device to which the training data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
pos_labels (list): labels of the positive class
ignore_labels (list): labels to be ignored.
"""
def __init__(self, iterator, target, comm, label_name, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, name=None,
pos_labels=1, ignore_labels=None, path_data=None):
super(Classification_Evaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func)
self.rank = comm.rank
self.name = name
self.pos_labels = _to_list(pos_labels)
self.ignore_labels = _to_list(ignore_labels)
self.comm = comm
self.label_name = label_name
self.path_data = path_data
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name, target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate_roc_corr(trainer=trainer)
reporter_module.report(result)
return result
def evaluate_roc_corr(self, trainer):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
y_total = np.array([]).reshape([0, len(self.label_name)])
t_total = np.array([]).reshape([0, len(self.label_name)])
protein_id_total = np.array([]).reshape([0, len(self.label_name)])
for batch in it:
in_arrays = self.converter(batch, self.device)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
y = eval_func(*in_arrays[:-2])
t = in_arrays[-2]
protein_id = in_arrays[-1]
# y = F.sigmoid(y)
y_data = cuda.to_cpu(y.data)
t_data = cuda.to_cpu(t)
protein_id = cuda.to_cpu(protein_id)
y_total = np.vstack([y_total, y_data])
t_total = np.vstack([t_total, t_data])
protein_id_total = np.vstack([protein_id_total, protein_id])
updater = trainer.updater
epoch = str(updater.epoch)
out_dir = Path(trainer.out)
observation = {}
for label_index, label in enumerate(self.label_name):
y = y_total[:, label_index]
t = t_total[:, label_index]
protein_id = protein_id_total[:, label_index]
index = np.where(t != -1)[0]
y = y[index]
t = t[index]
protein_id = protein_id[index]
gather_data = self.comm.gather(np.vstack([t, y, protein_id]))
if self.rank == 0:
gather_data = np.concatenate(gather_data, axis=1)
gather_t = np.array(gather_data[0], dtype=np.int)
gather_y = np.array(gather_data[1], dtype=np.float32)
gather_protein_id = np.array(gather_data[2], dtype=np.int)
global_score = []
global_label = []
target_name = []
model_path = []
for row, item in self.path_data.iterrows():
model_index = np.where(gather_protein_id==row)[0]
if len(model_index) > 0:
global_score.append(np.mean(F.sigmoid(gather_y[model_index]).data))
global_label.append(item['gdtts'])
target_name.append(item['dir_name'])
model_path.append(item['path'])
df = pd.DataFrame({'global_score':global_score, 'global_label':global_label, 'target_name':target_name, 'model_path': model_path})
pearson = df.groupby('target_name').corr(method='pearson')['global_score'].mean(level=1)['global_label']
spearman = df.groupby('target_name').corr(method='spearman')['global_score'].mean(level=1)['global_label']
csv_out_name = out_dir/(epoch+label+'_df.csv')
df.to_csv(csv_out_name)
roc_out_name = out_dir/(epoch+'iteration_'+label+'_roc.png')
y_score = F.sigmoid(gather_y).data
plot_roc(y_true=gather_t, y_score=y_score, out_name=roc_out_name)
roc_auc = metrics.roc_auc_score(gather_t, y_score)
np.savez((out_dir/epoch).with_suffix('.npz'), local_label=gather_t, local_score=y_score, protein_id=gather_protein_id)
with reporter.report_scope(observation):
reporter.report({'roc_auc_'+label: roc_auc}, self._targets['main'])
reporter.report({'loss': F.sigmoid_cross_entropy(gather_y, gather_t).data},
self._targets['main'])
reporter.report({'accuracy': F.binary_accuracy(gather_y, gather_t).data}, self._targets['main'])
reporter.report({'pearson': pearson}, self._targets['main'])
reporter.report({'spearman': spearman}, self._targets['main'])
return observation
class MultiClassification_Evaluator(Evaluator):
"""Evaluator which calculates auc and correlation
Note that this Evaluator is only applicable to binary classification task.
Args:
iterator: Dataset iterator for the dataset to calculate pearson.
It can also be a dictionary of iterators. If this is just an
iterator, the iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays and true label.
:func:`~chainer.dataset.concat_examples` is used by default.
It is expected to return input arrays of the form
`[x_0, ..., x_n, t]`, where `x_0, ..., x_n` are the inputs to
the evaluation function and `t` is the true label.
device: Device to which the training data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
name (str): name of this extension. When `name` is None,
`default_name='validation'` which is defined in super class
`Evaluator` is used as extension name. This name affects to the
reported key name.
pos_labels (int or list): labels of the positive class, other classes
are considered as negative.
ignore_labels (int or list or None): labels to be ignored.
`None` is used to not ignore all labels.
Attributes:
converter: Converter function.
device: Device to which the training data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
pos_labels (list): labels of the positive class
ignore_labels (list): labels to be ignored.
"""
def __init__(self, iterator, target, comm, label_name, class_num,
converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, name=None,
pos_labels=1, ignore_labels=None, path_data=None):
super(MultiClassification_Evaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func)
self.rank = comm.rank
self.class_num = class_num
self.name = name
self.pos_labels = _to_list(pos_labels)
self.ignore_labels = _to_list(ignore_labels)
self.comm = comm
self.label_name = label_name
self.path_data = path_data
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name, target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate_corr(trainer=trainer)
reporter_module.report(result)
return result
def evaluate_corr(self, trainer):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
y_total = np.array([]).reshape([0, self.class_num])
t_total = np.array([], dtype=np.int)
protein_id_total = np.array([], dtype=np.int)
for batch in it:
in_arrays = self.converter(batch, self.device)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
y = eval_func(*in_arrays[:-2])
t = in_arrays[-2]
protein_id = in_arrays[-1]
# y = F.sigmoid(y)
y_data = cuda.to_cpu(y.data)
t_data = cuda.to_cpu(t)
protein_id = cuda.to_cpu(protein_id)
y_total = np.vstack([y_total, y_data])
t_total = np.concatenate([t_total, t_data])
protein_id_total = np.concatenate([protein_id_total, protein_id])
updater = trainer.updater
epoch = str(updater.epoch)
out_dir = Path(trainer.out)
observation = {}
gather_data = self.comm.gather(np.hstack([t_total.reshape(-1,1), y_total, protein_id_total.reshape(-1,1)]))
if self.rank == 0:
gather_data = np.concatenate(gather_data)
gather_t = gather_data[:, 0].astype(np.int)
gather_y = gather_data[:, 1:-1].astype(np.float32)
gather_protein_id = gather_data[:, -1].astype(np.int)
global_score = []
global_label = []
target_name = []
model_path = []
for row, item in self.path_data.iterrows():
model_index = np.where(gather_protein_id==row)[0]
if len(model_index) > 0:
local_score = np.argmax(gather_y[model_index], axis=1)/self.class_num
global_score.append(np.mean(local_score))
global_label.append(item['gdtts'])
target_name.append(item['dir_name'])
model_path.append(item['path'])
df = pd.DataFrame({'global_score':global_score, 'global_label':global_label, 'target_name':target_name, 'model_path': model_path})
pearson = df.groupby('target_name').corr(method='pearson')['global_score'].mean(level=1)['global_label']
spearman = df.groupby('target_name').corr(method='spearman')['global_score'].mean(level=1)['global_label']
csv_out_name = out_dir/(epoch+'_df.csv')
df.to_csv(csv_out_name)
np.savez((out_dir/epoch).with_suffix('.npz'), local_label=gather_t, local_score=y_score, protein_id=gather_protein_id)
with reporter.report_scope(observation):
reporter.report({'loss': F.softmax_cross_entropy(gather_y, gather_t).data},
self._targets['main'])
reporter.report({'accuracy': F.accuracy(gather_y, gather_t).data}, self._targets['main'])
reporter.report({'pearson': pearson}, self._targets['main'])
reporter.report({'spearman': spearman}, self._targets['main'])
return observation
class Regression_Evaluator(Evaluator):
"""Evaluator which calculates correlation
Args:
iterator: Dataset iterator for the dataset to calculate pearson.
It can also be a dictionary of iterators. If this is just an
iterator, the iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays and true label.
:func:`~chainer.dataset.concat_examples` is used by default.
It is expected to return input arrays of the form
`[x_0, ..., x_n, t]`, where `x_0, ..., x_n` are the inputs to
the evaluation function and `t` is the true label.
device: Device to which the training data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
name (str): name of this extension. When `name` is None,
`default_name='validation'` which is defined in super class
`Evaluator` is used as extension name. This name affects to the
reported key name.
pos_labels (int or list): labels of the positive class, other classes
are considered as negative.
ignore_labels (int or list or None): labels to be ignored.
`None` is used to not ignore all labels.
Attributes:
converter: Converter function.
device: Device to which the training data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
pos_labels (list): labels of the positive class
ignore_labels (list): labels to be ignored.
"""
def __init__(self, iterator, target, comm, label_name, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, name=None,
pos_labels=1, ignore_labels=None, path_data=None):
super(Regression_Evaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func)
self.rank = comm.rank
self.name = name
self.pos_labels = _to_list(pos_labels)
self.ignore_labels = _to_list(ignore_labels)
self.comm = comm
self.label_name = label_name
self.path_data = path_data
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name, target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate_corr(trainer=trainer)
reporter_module.report(result)
return result
def evaluate_corr(self, trainer):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
y_total = np.array([]).reshape([0, len(self.label_name)])
t_total = np.array([]).reshape([0, len(self.label_name)])
protein_id_total = np.array([]).reshape([0, len(self.label_name)])
for batch in it:
in_arrays = self.converter(batch, self.device)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
y = eval_func(*in_arrays[:-2])
t = in_arrays[-2]
protein_id = in_arrays[-1]
# y = F.sigmoid(y)
y_data = cuda.to_cpu(y.data)
t_data = cuda.to_cpu(t)
protein_id = cuda.to_cpu(protein_id)
y_total = np.vstack([y_total, y_data])
t_total = np.vstack([t_total, t_data])
protein_id_total = np.vstack([protein_id_total, protein_id])
updater = trainer.updater
epoch = str(updater.epoch)
out_dir = Path(trainer.out)
observation = {}
for label_index, label in enumerate(self.label_name):
y = y_total[:, label_index]
t = t_total[:, label_index]
protein_id = protein_id_total[:, label_index]
index = np.where(t != -1)[0]
y = y[index]
t = t[index]
protein_id = protein_id[index]
gather_data = self.comm.gather(np.vstack([t, y, protein_id]))
if self.rank == 0:
gather_data = np.concatenate(gather_data, axis=1)
gather_t = np.array(gather_data[0], dtype=np.float32)
gather_y = np.array(gather_data[1], dtype=np.float32)
gather_protein_id = np.array(gather_data[2], dtype=np.int)
global_score = []
global_label = []
target_name = []
model_path = []
for row, item in self.path_data.iterrows():
model_index = np.where(gather_protein_id==row)[0]
if len(model_index) > 0:
global_score.append(np.mean(gather_y[model_index]))
global_label.append(item['gdtts'])
target_name.append(item['dir_name'])
model_path.append(item['path'])
df = pd.DataFrame({'global_score':global_score, 'global_label':global_label, 'target_name':target_name, 'model_path': model_path})
pearson = df.groupby('target_name').corr(method='pearson')['global_score'].mean(level=1)['global_label']
spearman = df.groupby('target_name').corr(method='spearman')['global_score'].mean(level=1)['global_label']
csv_out_name = out_dir/(epoch+label+'_df.csv')
df.to_csv(csv_out_name)
np.savez((out_dir/epoch).with_suffix('.npz'), local_label=gather_t, local_score=y_score, protein_id=gather_protein_id)
with reporter.report_scope(observation):
reporter.report({'loss': F.mean_squared_error(gather_y, gather_t).data},
self._targets['main'])
reporter.report({'accuracy': F.r2_score(gather_y, gather_t).data}, self._targets['main'])
reporter.report({'pearson': pearson}, self._targets['main'])
reporter.report({'spearman': spearman}, self._targets['main'])
return observation
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"numpy.argmax",
"chainer.reporter.Reporter",
"pathlib.Path",
"chainer.no_backprop_mode",
"numpy.mean",
"six.iteritems",
"pandas.DataFrame",
"chainer.functions.softmax_cross_entropy",
"chainer.functions.sigmoid_cross_entropy",
"chainer.cuda.to_cpu",
"chainer.functions.mean_squared_error",
"chainer.functions.r2_score",
"matplotlib.pyplot.legend",
"sklearn.metrics.roc_auc_score",
"matplotlib.use",
"chainer.functions.sigmoid",
"chainer.configuration.using_config",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"chainer.functions.accuracy",
"numpy.vstack",
"numpy.concatenate",
"chainer.functions.binary_accuracy",
"matplotlib.pyplot.plot",
"sklearn.metrics.roc_curve",
"chainer.reporter.report",
"copy.copy",
"sklearn.metrics.auc",
"numpy.where",
"numpy.array",
"chainer.using_config",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((140, 161), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (154, 161), False, 'import matplotlib\n'), ((855, 904), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', ([], {'y_true': 'y_true', 'y_score': 'y_score'}), '(y_true=y_true, y_score=y_score)\n', (872, 904), False, 'from sklearn import metrics\n'), ((915, 936), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (926, 936), False, 'from sklearn import metrics\n'), ((941, 950), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (948, 950), True, 'import matplotlib.pyplot as plt\n'), ((955, 1012), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'label': "('ROC curve (area = %.3f)' % auc)"}), "(fpr, tpr, label='ROC curve (area = %.3f)' % auc)\n", (963, 1012), True, 'import matplotlib.pyplot as plt\n'), ((1017, 1029), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1027, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1069), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC curve"""'], {'fontsize': '(16)'}), "('ROC curve', fontsize=16)\n", (1043, 1069), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1120), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(16)'}), "('False Positive Rate', fontsize=16)\n", (1084, 1120), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1170), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(16)'}), "('True Positive Rate', fontsize=16)\n", (1135, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1189), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1183, 1189), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1215), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_name'], {}), '(out_name)\n', (1205, 1215), True, 'import matplotlib.pyplot as plt\n'), ((4944, 4970), 'chainer.reporter.Reporter', 'reporter_module.Reporter', ([], {}), '()\n', (4968, 4970), True, 'from chainer import reporter as reporter_module\n'), ((5108, 5136), 'six.iteritems', 'six.iteritems', (['self._targets'], {}), '(self._targets)\n', (5121, 5136), False, 'import six\n'), ((5438, 5468), 'chainer.reporter.report', 'reporter_module.report', (['result'], {}), '(result)\n', (5460, 5468), True, 'from chainer import reporter as reporter_module\n'), ((6780, 6797), 'pathlib.Path', 'Path', (['trainer.out'], {}), '(trainer.out)\n', (6784, 6797), False, 'from pathlib import Path\n'), ((13436, 13462), 'chainer.reporter.Reporter', 'reporter_module.Reporter', ([], {}), '()\n', (13460, 13462), True, 'from chainer import reporter as reporter_module\n'), ((13600, 13628), 'six.iteritems', 'six.iteritems', (['self._targets'], {}), '(self._targets)\n', (13613, 13628), False, 'import six\n'), ((13926, 13956), 'chainer.reporter.report', 'reporter_module.report', (['result'], {}), '(result)\n', (13948, 13956), True, 'from chainer import reporter as reporter_module\n'), ((14415, 14441), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (14423, 14441), True, 'import numpy as np\n'), ((14469, 14495), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (14477, 14495), True, 'import numpy as np\n'), ((15226, 15243), 'pathlib.Path', 'Path', (['trainer.out'], {}), '(trainer.out)\n', (15230, 15243), False, 'from pathlib import Path\n'), ((20979, 21005), 'chainer.reporter.Reporter', 'reporter_module.Reporter', ([], {}), '()\n', (21003, 21005), True, 'from chainer import reporter as reporter_module\n'), ((21143, 21171), 'six.iteritems', 'six.iteritems', (['self._targets'], {}), '(self._targets)\n', (21156, 21171), False, 'import six\n'), ((21469, 21499), 'chainer.reporter.report', 'reporter_module.report', (['result'], {}), '(result)\n', (21491, 21499), True, 'from chainer import reporter as reporter_module\n'), ((22807, 22824), 'pathlib.Path', 'Path', (['trainer.out'], {}), '(trainer.out)\n', (22811, 22824), False, 'from pathlib import Path\n'), ((5832, 5851), 'copy.copy', 'copy.copy', (['iterator'], {}), '(iterator)\n', (5841, 5851), False, 'import copy\n'), ((6404, 6423), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['y.data'], {}), '(y.data)\n', (6415, 6423), False, 'from chainer import configuration, cuda, function\n'), ((6445, 6459), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['t'], {}), '(t)\n', (6456, 6459), False, 'from chainer import configuration, cuda, function\n'), ((6485, 6508), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['protein_id'], {}), '(protein_id)\n', (6496, 6508), False, 'from chainer import configuration, cuda, function\n'), ((6531, 6559), 'numpy.vstack', 'np.vstack', (['[y_total, y_data]'], {}), '([y_total, y_data])\n', (6540, 6559), True, 'import numpy as np\n'), ((6582, 6610), 'numpy.vstack', 'np.vstack', (['[t_total, t_data]'], {}), '([t_total, t_data])\n', (6591, 6610), True, 'import numpy as np\n'), ((6642, 6683), 'numpy.vstack', 'np.vstack', (['[protein_id_total, protein_id]'], {}), '([protein_id_total, protein_id])\n', (6651, 6683), True, 'import numpy as np\n'), ((14316, 14335), 'copy.copy', 'copy.copy', (['iterator'], {}), '(iterator)\n', (14325, 14335), False, 'import copy\n'), ((14840, 14859), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['y.data'], {}), '(y.data)\n', (14851, 14859), False, 'from chainer import configuration, cuda, function\n'), ((14881, 14895), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['t'], {}), '(t)\n', (14892, 14895), False, 'from chainer import configuration, cuda, function\n'), ((14921, 14944), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['protein_id'], {}), '(protein_id)\n', (14932, 14944), False, 'from chainer import configuration, cuda, function\n'), ((14967, 14995), 'numpy.vstack', 'np.vstack', (['[y_total, y_data]'], {}), '([y_total, y_data])\n', (14976, 14995), True, 'import numpy as np\n'), ((15018, 15051), 'numpy.concatenate', 'np.concatenate', (['[t_total, t_data]'], {}), '([t_total, t_data])\n', (15032, 15051), True, 'import numpy as np\n'), ((15083, 15129), 'numpy.concatenate', 'np.concatenate', (['[protein_id_total, protein_id]'], {}), '([protein_id_total, protein_id])\n', (15097, 15129), True, 'import numpy as np\n'), ((15439, 15466), 'numpy.concatenate', 'np.concatenate', (['gather_data'], {}), '(gather_data)\n', (15453, 15466), True, 'import numpy as np\n'), ((16266, 16398), 'pandas.DataFrame', 'pd.DataFrame', (["{'global_score': global_score, 'global_label': global_label, 'target_name':\n target_name, 'model_path': model_path}"], {}), "({'global_score': global_score, 'global_label': global_label,\n 'target_name': target_name, 'model_path': model_path})\n", (16278, 16398), True, 'import pandas as pd\n'), ((21859, 21878), 'copy.copy', 'copy.copy', (['iterator'], {}), '(iterator)\n', (21868, 21878), False, 'import copy\n'), ((22431, 22450), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['y.data'], {}), '(y.data)\n', (22442, 22450), False, 'from chainer import configuration, cuda, function\n'), ((22472, 22486), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['t'], {}), '(t)\n', (22483, 22486), False, 'from chainer import configuration, cuda, function\n'), ((22512, 22535), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['protein_id'], {}), '(protein_id)\n', (22523, 22535), False, 'from chainer import configuration, cuda, function\n'), ((22558, 22586), 'numpy.vstack', 'np.vstack', (['[y_total, y_data]'], {}), '([y_total, y_data])\n', (22567, 22586), True, 'import numpy as np\n'), ((22609, 22637), 'numpy.vstack', 'np.vstack', (['[t_total, t_data]'], {}), '([t_total, t_data])\n', (22618, 22637), True, 'import numpy as np\n'), ((22669, 22710), 'numpy.vstack', 'np.vstack', (['[protein_id_total, protein_id]'], {}), '([protein_id_total, protein_id])\n', (22678, 22710), True, 'import numpy as np\n'), ((5320, 5362), 'chainer.configuration.using_config', 'configuration.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (5346, 5362), False, 'from chainer import configuration, cuda, function\n'), ((5871, 5883), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5879, 5883), True, 'import numpy as np\n'), ((5937, 5949), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5945, 5949), True, 'import numpy as np\n'), ((6012, 6024), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6020, 6024), True, 'import numpy as np\n'), ((6162, 6188), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (6186, 6188), False, 'import chainer\n'), ((6190, 6226), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (6210, 6226), False, 'import chainer\n'), ((7044, 7061), 'numpy.where', 'np.where', (['(t != -1)'], {}), '(t != -1)\n', (7052, 7061), True, 'import numpy as np\n'), ((7201, 7230), 'numpy.vstack', 'np.vstack', (['[t, y, protein_id]'], {}), '([t, y, protein_id])\n', (7210, 7230), True, 'import numpy as np\n'), ((7293, 7328), 'numpy.concatenate', 'np.concatenate', (['gather_data'], {'axis': '(1)'}), '(gather_data, axis=1)\n', (7307, 7328), True, 'import numpy as np\n'), ((7356, 7394), 'numpy.array', 'np.array', (['gather_data[0]'], {'dtype': 'np.int'}), '(gather_data[0], dtype=np.int)\n', (7364, 7394), True, 'import numpy as np\n'), ((7422, 7464), 'numpy.array', 'np.array', (['gather_data[1]'], {'dtype': 'np.float32'}), '(gather_data[1], dtype=np.float32)\n', (7430, 7464), True, 'import numpy as np\n'), ((7501, 7539), 'numpy.array', 'np.array', (['gather_data[2]'], {'dtype': 'np.int'}), '(gather_data[2], dtype=np.int)\n', (7509, 7539), True, 'import numpy as np\n'), ((8138, 8270), 'pandas.DataFrame', 'pd.DataFrame', (["{'global_score': global_score, 'global_label': global_label, 'target_name':\n target_name, 'model_path': model_path}"], {}), "({'global_score': global_score, 'global_label': global_label,\n 'target_name': target_name, 'model_path': model_path})\n", (8150, 8270), True, 'import pandas as pd\n'), ((8848, 8888), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['gather_t', 'y_score'], {}), '(gather_t, y_score)\n', (8869, 8888), False, 'from sklearn import metrics\n'), ((13812, 13854), 'chainer.configuration.using_config', 'configuration.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (13838, 13854), False, 'from chainer import configuration, cuda, function\n'), ((14355, 14367), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14363, 14367), True, 'import numpy as np\n'), ((14598, 14624), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (14622, 14624), False, 'import chainer\n'), ((14626, 14662), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (14646, 14662), False, 'import chainer\n'), ((21355, 21397), 'chainer.configuration.using_config', 'configuration.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (21381, 21397), False, 'from chainer import configuration, cuda, function\n'), ((21898, 21910), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (21906, 21910), True, 'import numpy as np\n'), ((21964, 21976), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (21972, 21976), True, 'import numpy as np\n'), ((22039, 22051), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (22047, 22051), True, 'import numpy as np\n'), ((22189, 22215), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (22213, 22215), False, 'import chainer\n'), ((22217, 22253), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (22237, 22253), False, 'import chainer\n'), ((23071, 23088), 'numpy.where', 'np.where', (['(t != -1)'], {}), '(t != -1)\n', (23079, 23088), True, 'import numpy as np\n'), ((23228, 23257), 'numpy.vstack', 'np.vstack', (['[t, y, protein_id]'], {}), '([t, y, protein_id])\n', (23237, 23257), True, 'import numpy as np\n'), ((23320, 23355), 'numpy.concatenate', 'np.concatenate', (['gather_data'], {'axis': '(1)'}), '(gather_data, axis=1)\n', (23334, 23355), True, 'import numpy as np\n'), ((23383, 23425), 'numpy.array', 'np.array', (['gather_data[0]'], {'dtype': 'np.float32'}), '(gather_data[0], dtype=np.float32)\n', (23391, 23425), True, 'import numpy as np\n'), ((23453, 23495), 'numpy.array', 'np.array', (['gather_data[1]'], {'dtype': 'np.float32'}), '(gather_data[1], dtype=np.float32)\n', (23461, 23495), True, 'import numpy as np\n'), ((23532, 23570), 'numpy.array', 'np.array', (['gather_data[2]'], {'dtype': 'np.int'}), '(gather_data[2], dtype=np.int)\n', (23540, 23570), True, 'import numpy as np\n'), ((24153, 24285), 'pandas.DataFrame', 'pd.DataFrame', (["{'global_score': global_score, 'global_label': global_label, 'target_name':\n target_name, 'model_path': model_path}"], {}), "({'global_score': global_score, 'global_label': global_label,\n 'target_name': target_name, 'model_path': model_path})\n", (24165, 24285), True, 'import pandas as pd\n'), ((8715, 8734), 'chainer.functions.sigmoid', 'F.sigmoid', (['gather_y'], {}), '(gather_y)\n', (8724, 8734), True, 'import chainer.functions as F\n'), ((15856, 15890), 'numpy.where', 'np.where', (['(gather_protein_id == row)'], {}), '(gather_protein_id == row)\n', (15864, 15890), True, 'import numpy as np\n'), ((7768, 7802), 'numpy.where', 'np.where', (['(gather_protein_id == row)'], {}), '(gather_protein_id == row)\n', (7776, 7802), True, 'import numpy as np\n'), ((15967, 16007), 'numpy.argmax', 'np.argmax', (['gather_y[model_index]'], {'axis': '(1)'}), '(gather_y[model_index], axis=1)\n', (15976, 16007), True, 'import numpy as np\n'), ((16063, 16083), 'numpy.mean', 'np.mean', (['local_score'], {}), '(local_score)\n', (16070, 16083), True, 'import numpy as np\n'), ((23799, 23833), 'numpy.where', 'np.where', (['(gather_protein_id == row)'], {}), '(gather_protein_id == row)\n', (23807, 23833), True, 'import numpy as np\n'), ((16942, 16985), 'chainer.functions.softmax_cross_entropy', 'F.softmax_cross_entropy', (['gather_y', 'gather_t'], {}), '(gather_y, gather_t)\n', (16965, 16985), True, 'import chainer.functions as F\n'), ((17097, 17127), 'chainer.functions.accuracy', 'F.accuracy', (['gather_y', 'gather_t'], {}), '(gather_y, gather_t)\n', (17107, 17127), True, 'import chainer.functions as F\n'), ((23924, 23954), 'numpy.mean', 'np.mean', (['gather_y[model_index]'], {}), '(gather_y[model_index])\n', (23931, 23954), True, 'import numpy as np\n'), ((9214, 9257), 'chainer.functions.sigmoid_cross_entropy', 'F.sigmoid_cross_entropy', (['gather_y', 'gather_t'], {}), '(gather_y, gather_t)\n', (9237, 9257), True, 'import chainer.functions as F\n'), ((9373, 9410), 'chainer.functions.binary_accuracy', 'F.binary_accuracy', (['gather_y', 'gather_t'], {}), '(gather_y, gather_t)\n', (9390, 9410), True, 'import chainer.functions as F\n'), ((24863, 24903), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['gather_y', 'gather_t'], {}), '(gather_y, gather_t)\n', (24883, 24903), True, 'import chainer.functions as F\n'), ((25019, 25049), 'chainer.functions.r2_score', 'F.r2_score', (['gather_y', 'gather_t'], {}), '(gather_y, gather_t)\n', (25029, 25049), True, 'import chainer.functions as F\n'), ((7901, 7933), 'chainer.functions.sigmoid', 'F.sigmoid', (['gather_y[model_index]'], {}), '(gather_y[model_index])\n', (7910, 7933), True, 'import chainer.functions as F\n')]
|
from __future__ import print_function
import numpy
OPENOPT = SCIPY = True
try:
from openopt import NLP
except ImportError:
OPENOPT = False
try:
from scipy.optimize import minimize
except ImportError:
SCIPY = False
SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP']
OPENOPT_LOCAL_SOLVERS = ['bobyqa', 'ptn', 'slmvm2', 'ralg', 'mma', 'auglag', 'sqlcp']
def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None,
totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5,
tabustrategy='farthest', fmin=-numpy.inf, disp=None):
"""
Finds the global minimum of a function using the AMPGO (Adaptive Memory Programming for
Global Optimization) algorithm.
:param `objfun`: Function to be optimized, in the form ``f(x, *args)``.
:type `objfun`: callable
:param `args`: Additional arguments passed to `objfun`.
:type `args`: tuple
:param `local`: The local minimization method (e.g. ``"L-BFGS-B"``). It can be one of the available
`scipy` local solvers or `OpenOpt` solvers.
:type `local`: string
:param `bounds`: A list of tuples specifying the lower and upper bound for each independent variable
[(`xl0`, `xu0`), (`xl1`, `xu1`), ...]
:type `bounds`: list
:param `maxfunevals`: The maximum number of function evaluations allowed.
:type `maxfunevals`: integer
:param `totaliter`: The maximum number of global iterations allowed.
:type `totaliter`: integer
:param `maxiter`: The maximum number of `Tabu Tunnelling` iterations allowed during each global iteration.
:type `maxiter`: integer
:param `glbtol`: The optimization will stop if the absolute difference between the current minimum objective
function value and the provided global optimum (`fmin`) is less than `glbtol`.
:type `glbtol`: float
:param `eps1`: A constant used to define an aspiration value for the objective function during the Tunnelling phase.
:type `eps1`: float
:param `eps2`: Perturbation factor used to move away from the latest local minimum at the start of a Tunnelling phase.
:type `eps2`: float
:param `tabulistsize`: The size of the tabu search list (a circular list).
:type `tabulistsize`: integer
:param `tabustrategy`: The strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be
'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from
the last local minimum found.
:type `tabustrategy`: string
:param `fmin`: If known, the objective function global optimum value.
:type `fmin`: float
:param `disp`: If zero or defaulted, then no output is printed on screen. If a positive number, then status
messages are printed.
:type `disp`: integer
:returns: A tuple of 5 elements, in the following order:
1. **best_x** (`array_like`): the estimated position of the global minimum.
2. **best_f** (`float`): the value of `objfun` at the minimum.
3. **evaluations** (`integer`): the number of function evaluations.
4. **msg** (`string`): a message describes the cause of the termination.
5. **tunnel_info** (`tuple`): a tuple containing the total number of Tunnelling phases performed and the
successful ones.
:rtype: `tuple`
The detailed implementation of AMPGO is described in the paper
"Adaptive Memory Programming for Constrained Global Optimization" located here:
http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf
Copyright 2014 <NAME>
"""
if local not in SCIPY_LOCAL_SOLVERS + OPENOPT_LOCAL_SOLVERS:
raise Exception('Invalid local solver selected: %s'%local)
if local in SCIPY_LOCAL_SOLVERS and not SCIPY:
raise Exception('The selected solver %s is not available as there is no scipy installation'%local)
if local in OPENOPT_LOCAL_SOLVERS and not OPENOPT:
raise Exception('The selected solver %s is not available as there is no OpenOpt installation'%local)
x0 = numpy.atleast_1d(x0)
n = len(x0)
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
low = [0]*n
up = [0]*n
for i in range(n):
if bounds[i] is None:
l, u = -numpy.inf, numpy.inf
else:
l, u = bounds[i]
if l is None:
low[i] = -numpy.inf
else:
low[i] = l
if u is None:
up[i] = numpy.inf
else:
up[i] = u
if maxfunevals is None:
maxfunevals = max(100, 10*len(x0))
if tabulistsize < 1:
raise Exception('Invalid tabulistsize specified: %s. It should be an integer greater than zero.'%tabulistsize)
if tabustrategy not in ['oldest', 'farthest']:
raise Exception('Invalid tabustrategy specified: %s. It must be one of "oldest" or "farthest"'%tabustrategy)
iprint = 50
if disp is None or disp <= 0:
disp = 0
iprint = -1
low = numpy.asarray(low)
up = numpy.asarray(up)
tabulist = []
best_f = numpy.inf
best_x = x0
global_iter = 0
all_tunnel = success_tunnel = 0
evaluations = 0
if glbtol < 1e-8:
local_tol = glbtol
else:
local_tol = 1e-8
while 1:
if disp > 0:
print('\n')
print('='*72)
print('Starting MINIMIZATION Phase %-3d'%(global_iter+1))
print('='*72)
if local in OPENOPT_LOCAL_SOLVERS:
problem = NLP(objfun, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint)
problem.args = args
results = problem.solve(local)
xf, yf, num_fun = results.xf, results.ff, results.evals['f']
else:
options = {'maxiter': max(1, maxfunevals), 'disp': disp}
if local_opts is not None:
options.update(local_opts)
res = minimize(objfun, x0, args=args, method=local, bounds=bounds, tol=local_tol, options=options)
xf, yf, num_fun = res['x'], res['fun'], res['nfev']
maxfunevals -= num_fun
evaluations += num_fun
if yf < best_f:
best_f = yf
best_x = xf
if disp > 0:
print('\n\n ==> Reached local minimum: %s\n'%yf)
if best_f < fmin + glbtol:
if disp > 0:
print('='*72)
return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel)
if maxfunevals <= 0:
if disp > 0:
print('='*72)
return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel)
tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
tabulist.append(xf)
i = improve = 0
while i < maxiter and improve == 0:
if disp > 0:
print('-'*72)
print('Starting TUNNELLING Phase (%3d-%3d)'%(global_iter+1, i+1))
print('-'*72)
all_tunnel += 1
r = numpy.random.uniform(-1.0, 1.0, size=(n, ))
beta = eps2*numpy.linalg.norm(xf)/numpy.linalg.norm(r)
if numpy.abs(beta) < 1e-8:
beta = eps2
x0 = xf + beta*r
x0 = numpy.where(x0 < low, low, x0)
x0 = numpy.where(x0 > up , up , x0)
aspiration = best_f - eps1*(1.0 + numpy.abs(best_f))
tunnel_args = tuple([objfun, aspiration, tabulist] + list(args))
if local in OPENOPT_LOCAL_SOLVERS:
problem = NLP(tunnel, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint)
problem.args = tunnel_args
results = problem.solve(local)
xf, yf, num_fun = results.xf, results.ff, results.evals['f']
else:
options = {'maxiter': max(1, maxfunevals), 'disp': disp}
if local_opts is not None:
options.update(local_opts)
res = minimize(tunnel, x0, args=tunnel_args, method=local, bounds=bounds, tol=local_tol, options=options)
xf, yf, num_fun = res['x'], res['fun'], res['nfev']
maxfunevals -= num_fun
evaluations += num_fun
yf = inverse_tunnel(xf, yf, aspiration, tabulist)
if yf <= best_f + glbtol:
oldf = best_f
best_f = yf
best_x = xf
improve = 1
success_tunnel += 1
if disp > 0:
print('\n\n ==> Successful tunnelling phase. Reached local minimum: %s < %s\n'%(yf, oldf))
if best_f < fmin + glbtol:
return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel)
i += 1
if maxfunevals <= 0:
return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel)
tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
tabulist.append(xf)
if disp > 0:
print('='*72)
global_iter += 1
x0 = xf.copy()
if global_iter >= totaliter:
return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel)
if best_f < fmin + glbtol:
return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel)
def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy):
if len(tabulist) < tabulistsize:
return tabulist
if tabustrategy == 'oldest':
tabulist.pop(0)
else:
distance = numpy.sqrt(numpy.sum((tabulist-xf)**2, axis=1))
index = numpy.argmax(distance)
tabulist.pop(index)
return tabulist
def tunnel(x0, *args):
objfun, aspiration, tabulist = args[0:3]
fun_args = ()
if len(args) > 3:
fun_args = tuple(args[3:])
numerator = (objfun(x0, *fun_args) - aspiration)**2
denominator = 1.0
for tabu in tabulist:
denominator = denominator*numpy.sqrt(numpy.sum((x0 - tabu)**2))
ytf = numerator/denominator
return ytf
def inverse_tunnel(xtf, ytf, aspiration, tabulist):
denominator = 1.0
for tabu in tabulist:
denominator = denominator*numpy.sqrt(numpy.sum((xtf - tabu)**2))
numerator = ytf*denominator
yf = aspiration + numpy.sqrt(ytf*denominator)
return yf
if __name__ == '__main__':
import os
import go_benchmark
os.system('cls')
for tests in ['Bird']:
klass = getattr(go_benchmark, tests)()
x0 = klass.generator()
fmin = klass.fglob
bounds = klass.bounds
tolfun = 1e-6
xf, yf, fun_evals, msg, tt = AMPGO(klass.evaluator, x0, args=(), local='L-BFGS-B', bounds=bounds,
maxfunevals=20000, totaliter=2000, maxiter=5, eps1=0.02, eps2=0.1,
tabulistsize=5, tabustrategy='farthest', fmin=fmin, disp=1, glbtol=tolfun)
xb = numpy.asarray(klass.global_optimum)
if xb.ndim == 2:
xb = xb[0, :]
print('\n\n')
print('F_glob :', klass.evaluator(xb))
print('F_best :', yf)
print('X_best :', xf)
print('F_evals:', fun_evals)
print('Message:', msg)
print('Tunnels:', tt)
|
[
"numpy.random.uniform",
"scipy.optimize.minimize",
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"numpy.asarray",
"os.system",
"numpy.where",
"numpy.linalg.norm",
"numpy.atleast_1d",
"numpy.sqrt"
] |
[((4282, 4302), 'numpy.atleast_1d', 'numpy.atleast_1d', (['x0'], {}), '(x0)\n', (4298, 4302), False, 'import numpy\n'), ((5366, 5384), 'numpy.asarray', 'numpy.asarray', (['low'], {}), '(low)\n', (5379, 5384), False, 'import numpy\n'), ((5395, 5412), 'numpy.asarray', 'numpy.asarray', (['up'], {}), '(up)\n', (5408, 5412), False, 'import numpy\n'), ((11382, 11398), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (11391, 11398), False, 'import os\n'), ((10537, 10559), 'numpy.argmax', 'numpy.argmax', (['distance'], {}), '(distance)\n', (10549, 10559), False, 'import numpy\n'), ((11258, 11287), 'numpy.sqrt', 'numpy.sqrt', (['(ytf * denominator)'], {}), '(ytf * denominator)\n', (11268, 11287), False, 'import numpy\n'), ((11950, 11985), 'numpy.asarray', 'numpy.asarray', (['klass.global_optimum'], {}), '(klass.global_optimum)\n', (11963, 11985), False, 'import numpy\n'), ((6357, 6453), 'scipy.optimize.minimize', 'minimize', (['objfun', 'x0'], {'args': 'args', 'method': 'local', 'bounds': 'bounds', 'tol': 'local_tol', 'options': 'options'}), '(objfun, x0, args=args, method=local, bounds=bounds, tol=local_tol,\n options=options)\n', (6365, 6453), False, 'from scipy.optimize import minimize\n'), ((7610, 7652), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1.0)', '(1.0)'], {'size': '(n,)'}), '(-1.0, 1.0, size=(n,))\n', (7630, 7652), False, 'import numpy\n'), ((7874, 7904), 'numpy.where', 'numpy.where', (['(x0 < low)', 'low', 'x0'], {}), '(x0 < low, low, x0)\n', (7885, 7904), False, 'import numpy\n'), ((7923, 7951), 'numpy.where', 'numpy.where', (['(x0 > up)', 'up', 'x0'], {}), '(x0 > up, up, x0)\n', (7934, 7951), False, 'import numpy\n'), ((10483, 10522), 'numpy.sum', 'numpy.sum', (['((tabulist - xf) ** 2)'], {'axis': '(1)'}), '((tabulist - xf) ** 2, axis=1)\n', (10492, 10522), False, 'import numpy\n'), ((7701, 7721), 'numpy.linalg.norm', 'numpy.linalg.norm', (['r'], {}), '(r)\n', (7718, 7721), False, 'import numpy\n'), ((7752, 7767), 'numpy.abs', 'numpy.abs', (['beta'], {}), '(beta)\n', (7761, 7767), False, 'import numpy\n'), ((8672, 8776), 'scipy.optimize.minimize', 'minimize', (['tunnel', 'x0'], {'args': 'tunnel_args', 'method': 'local', 'bounds': 'bounds', 'tol': 'local_tol', 'options': 'options'}), '(tunnel, x0, args=tunnel_args, method=local, bounds=bounds, tol=\n local_tol, options=options)\n', (8680, 8776), False, 'from scipy.optimize import minimize\n'), ((10929, 10956), 'numpy.sum', 'numpy.sum', (['((x0 - tabu) ** 2)'], {}), '((x0 - tabu) ** 2)\n', (10938, 10956), False, 'import numpy\n'), ((11166, 11194), 'numpy.sum', 'numpy.sum', (['((xtf - tabu) ** 2)'], {}), '((xtf - tabu) ** 2)\n', (11175, 11194), False, 'import numpy\n'), ((7679, 7700), 'numpy.linalg.norm', 'numpy.linalg.norm', (['xf'], {}), '(xf)\n', (7696, 7700), False, 'import numpy\n'), ((8003, 8020), 'numpy.abs', 'numpy.abs', (['best_f'], {}), '(best_f)\n', (8012, 8020), False, 'import numpy\n')]
|
# @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import sys
import time
import numpy as np
from planar import Polygon
from Controller.AreaMap.MapPoint import MapPoint
from Controller.MoveController.CarModel import CarModel
from .NearestNeighbor import NearestNeighbor
from .SupportPointChain import SupportPointChain
class TrajectoryPlanning:
def __init__(self, areaMap, emergencyStopQueue):
self.areaMap = areaMap
self.emergencyStopQueue = emergencyStopQueue
self.carModel = CarModel()
self.nearestNeighbor = NearestNeighbor()
self.supportPointChain = SupportPointChain()
self.reset()
self.maxDriveableSlope = 3
self.normalMode = True
self.minImageCount = 6
self.imageCount = 0
def reset(self):
self.newestSupportChain = []
self.callculatedNextMove = None
def nextMove(self):
self.imageCount +=1
if self.minImageCount > self.imageCount:
self.callculatedNextMove = {'x': 0, 'y': 0, 'm': 0}
return self.callculatedNextMove
nextMove = self.handleNextMove()
if not self.emergencyStopQueue.empty():
print('emergeny mode')
self.emergencyStopQueue.get()
self.normalMode = False
elif self.normalMode is False and nextMove is not None:
self.normalMode = True
print('reset Mode')
return {'command': 'resetSavety'}
if self.normalMode:
if nextMove is not None:
self.callculatedNextMove = nextMove
return nextMove
self.callculatedNextMove = {'x': 0, 'y': 0, 'm': 0}
return {'x': 0, 'y': 0, 'm': 0}
else:
self.callculatedNextMove = {'x': 0, 'y': 0, 'm': 0}
self.areaMap.reset()
self.imageCount=0
return {'x': 0, 'y': 0, 'm': 0}
def handleNextMove(self):
if not self.areaMap.isBorderAvailable():
# print('no border available')
return None
supportPoints = self.nearestNeighbor.getNearestNeighbor(self.areaMap.left, self.areaMap.right)
supportPointChain = self.supportPointChain.getSupportPointChain(supportPoints, self.areaMap.robotPosition)
self.newestSupportChain = supportPointChain
if len(supportPointChain)<=1:
print('no possible target in drive direction')
return None
nextMove = self.callculateNextTarget(self.areaMap.robotPosition, supportPointChain)
return nextMove
def callculateNextTarget(self,robotPosition, supportPointChain):
nextPoint = supportPointChain[1]
offsetNextPoint = robotPosition.getRelativeOffsetsToPoint(nextPoint[0],nextPoint[1])
if len(supportPointChain) >= 3:
secondPoint = supportPointChain[2]
offsetSecondPoint = robotPosition.getRelativeOffsetsToPoint(secondPoint[0],secondPoint[1])
slope = self.slope(offsetNextPoint, offsetSecondPoint)
if offsetNextPoint[1] < offsetSecondPoint[1]:
slope = -slope
else:
slope = 0
return {'x': offsetNextPoint[1], 'y': -offsetNextPoint[0], 'm': slope/2}
def slope(self, point1, point2):
m = (point2[0]-point1[0])/(point2[1]-point1[1])
m= np.clip(m, -self.maxDriveableSlope,self.maxDriveableSlope)
return m
|
[
"Controller.MoveController.CarModel.CarModel",
"numpy.clip"
] |
[((1120, 1130), 'Controller.MoveController.CarModel.CarModel', 'CarModel', ([], {}), '()\n', (1128, 1130), False, 'from Controller.MoveController.CarModel import CarModel\n'), ((3953, 4012), 'numpy.clip', 'np.clip', (['m', '(-self.maxDriveableSlope)', 'self.maxDriveableSlope'], {}), '(m, -self.maxDriveableSlope, self.maxDriveableSlope)\n', (3960, 4012), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 27 00:52:19 2018
@author: xavier.qiu
"""
from common.load import *
from common.pd_util import *
from common.preprocess import *
from common.util import *
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import gc
import pickle
from tqdm import tqdm
class DataSet:
def __init__(self, embedding='glove', voc_len=105000, max_ques_len=72, cache=True):
"""
:param embedding:
"""
self.config = load_config()
self.embedding_type = embedding
self.voc_len = voc_len
self.max_ques_len = max_ques_len
if cache and os.path.exists(os.path.join(self.config["data_dir"], "y_train.pickle")):
with open(os.path.join(self.config["data_dir"], "x_train.pickle"), 'rb') as handle:
self.x_train = pickle.load(handle)
with open(os.path.join(self.config["data_dir"], "x_test.pickle"), 'rb') as handle:
self.x_test = pickle.load(handle)
with open(os.path.join(self.config["data_dir"], "y_train.pickle"), 'rb') as handle:
self.y_train = pickle.load(handle)
with open(os.path.join(self.config["data_dir"], "embedding_matrix.pickle"), 'rb') as handle:
self.embedding_matrix = pickle.load(handle)
return
print("Loading Train df")
self.train_df = pd.read_csv(os.path.join(self.config["data_dir"], "train.csv"))
print("Loading Test df")
self.test_df = pd.read_csv(os.path.join(self.config["data_dir"], "test.csv"))
self.preprocess("train")
self.preprocess("test")
self.word_index = None
# convert question_text to question_ids_list
self.word2indices()
print("Loading Embedding - {}".format(embedding))
self.embedding_index = load_embedding(self.embedding_type, word_index=self.word_index, voc_len = self.voc_len)
if self.embedding_type != "mix":
self.embedding_matrix = self.make_embed_matrix(self.embedding_index, self.word_index, self.voc_len)
else:
self.embedding_matrix = self.embedding_index
del self.word_index
del self.embedding_index
send_msg("Load Done")
gc.collect()
with open(os.path.join(self.config["data_dir"], "x_train.pickle"), 'wb') as handle:
pickle.dump(self.x_train, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.config["data_dir"], "x_test.pickle"), 'wb') as handle:
pickle.dump(self.x_test, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.config["data_dir"], "y_train.pickle"), 'wb') as handle:
pickle.dump(self.y_train, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.config["data_dir"], "embedding_matrix.pickle"), 'wb') as handle:
pickle.dump(self.embedding_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)
def make_embed_matrix(self, embeddings_index, word_index, len_voc):
all_embs = np.stack(embeddings_index.values())
emb_mean, emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = word_index
embedding_matrix = np.random.normal(emb_mean, emb_std, (len_voc, embed_size))
for word, i in tqdm(word_index.items()):
if i >= len_voc:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def word2indices(self):
t = Tokenizer(num_words=self.voc_len, filters='')
x_train = self.train_df["treated_question"].fillna("_na_").values
x_test = self.test_df["treated_question"].fillna("_na_").values
t.fit_on_texts(list(x_train))
self.word_index = t.word_index
# Tokenize the sentences
x_train = t.texts_to_sequences(x_train)
x_test = t.texts_to_sequences(x_test)
# Pad the sentences
x_train = pad_sequences(x_train, maxlen=self.max_ques_len)
x_test = pad_sequences(x_test, maxlen=self.max_ques_len)
# Get the target values
y_train = self.train_df['target'].values
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
def preprocess(self, data_set, filters=["punct", "contraction", "special characters", "misspell"]):
"""
:param filters:
:param data_set:
:return:
"""
if data_set == "train":
df = self.train_df
else:
df = self.test_df
print("Pre-processing {}".format(data_set))
df["treated_question"] = df["question_text"]
if "numbers" in filters:
print("Clean number ing ... ")
df["treated_question"] = df["treated_question"].apply(lambda x: deal_with_numbers(x))
if "punct" in filters:
print("Clean punct ing ... ")
df['treated_question'] = df['treated_question'].apply(lambda x: deal_with_punct(x))
if "lower" in filters:
print("Lowering ... ")
df['treated_question'] = df['treated_question'].apply(lambda x: x.lower())
if "special characters" in filters:
print("Clean special chars ing ... ")
df['treated_question'] = df['treated_question'].apply(lambda x: deal_with_special_characters(x))
if "misspell" in filters:
print("Clean misspell ing ...")
df['treated_question'] = df['treated_question'].apply(lambda x: deal_with_misspell(x))
|
[
"pickle.dump",
"keras.preprocessing.sequence.pad_sequences",
"gc.collect",
"keras.preprocessing.text.Tokenizer",
"pickle.load",
"numpy.random.normal"
] |
[((2340, 2352), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2350, 2352), False, 'import gc\n'), ((3345, 3403), 'numpy.random.normal', 'np.random.normal', (['emb_mean', 'emb_std', '(len_voc, embed_size)'], {}), '(emb_mean, emb_std, (len_voc, embed_size))\n', (3361, 3403), True, 'import numpy as np\n'), ((3741, 3786), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'self.voc_len', 'filters': '""""""'}), "(num_words=self.voc_len, filters='')\n", (3750, 3786), False, 'from keras.preprocessing.text import Tokenizer\n'), ((4188, 4236), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_train'], {'maxlen': 'self.max_ques_len'}), '(x_train, maxlen=self.max_ques_len)\n', (4201, 4236), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((4254, 4301), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_test'], {'maxlen': 'self.max_ques_len'}), '(x_test, maxlen=self.max_ques_len)\n', (4267, 4301), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2459, 2526), 'pickle.dump', 'pickle.dump', (['self.x_train', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.x_train, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (2470, 2526), False, 'import pickle\n'), ((2630, 2696), 'pickle.dump', 'pickle.dump', (['self.x_test', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.x_test, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (2641, 2696), False, 'import pickle\n'), ((2801, 2868), 'pickle.dump', 'pickle.dump', (['self.y_train', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.y_train, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (2812, 2868), False, 'import pickle\n'), ((2982, 3058), 'pickle.dump', 'pickle.dump', (['self.embedding_matrix', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.embedding_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (2993, 3058), False, 'import pickle\n'), ((920, 939), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (931, 939), False, 'import pickle\n'), ((1065, 1084), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1076, 1084), False, 'import pickle\n'), ((1212, 1231), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1223, 1231), False, 'import pickle\n'), ((1377, 1396), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1388, 1396), False, 'import pickle\n')]
|
import numpy as np
# Sizes relevant to default camera frame
ASPECT_RATIO = 16.0 / 9.0
FRAME_HEIGHT = 8.0
FRAME_WIDTH = FRAME_HEIGHT * ASPECT_RATIO
FRAME_Y_RADIUS = FRAME_HEIGHT / 2
FRAME_X_RADIUS = FRAME_WIDTH / 2
DEFAULT_PIXEL_HEIGHT = 1080
DEFAULT_PIXEL_WIDTH = 1920
DEFAULT_FRAME_RATE = 30
SMALL_BUFF = 0.1
MED_SMALL_BUFF = 0.25
MED_LARGE_BUFF = 0.5
LARGE_BUFF = 1
DEFAULT_MOBJECT_TO_EDGE_BUFFER = MED_LARGE_BUFF
DEFAULT_MOBJECT_TO_MOBJECT_BUFFER = MED_SMALL_BUFF
# All in seconds
DEFAULT_POINTWISE_FUNCTION_RUN_TIME = 3.0
DEFAULT_WAIT_TIME = 1.0
ORIGIN = np.array((0., 0., 0.))
UP = np.array((0., 1., 0.))
DOWN = np.array((0., -1., 0.))
RIGHT = np.array((1., 0., 0.))
LEFT = np.array((-1., 0., 0.))
IN = np.array((0., 0., -1.))
OUT = np.array((0., 0., 1.))
X_AXIS = np.array((1., 0., 0.))
Y_AXIS = np.array((0., 1., 0.))
Z_AXIS = np.array((0., 0., 1.))
# Useful abbreviations for diagonals
UL = UP + LEFT
UR = UP + RIGHT
DL = DOWN + LEFT
DR = DOWN + RIGHT
TOP = FRAME_Y_RADIUS * UP
BOTTOM = FRAME_Y_RADIUS * DOWN
LEFT_SIDE = FRAME_X_RADIUS * LEFT
RIGHT_SIDE = FRAME_X_RADIUS * RIGHT
PI = np.pi
TAU = 2 * PI
DEGREES = TAU / 360
FFMPEG_BIN = "ffmpeg"
JOINT_TYPE_MAP = {
"auto": 0,
"round": 1,
"bevel": 2,
"miter": 3,
}
# Related to Text
START_X = 30
START_Y = 20
NORMAL = "NORMAL"
ITALIC = "ITALIC"
OBLIQUE = "OBLIQUE"
BOLD = "BOLD"
DEFAULT_STROKE_WIDTH = 4
# Colors
COLOR_MAP = {
"BLUE_E": "#1C758A",
"BLUE_D": "#29ABCA",
"BLUE_C": "#58C4DD",
"BLUE_B": "#9CDCEB",
"BLUE_A": "#C7E9F1",
"TEAL_E": "#49A88F",
"TEAL_D": "#55C1A7",
"TEAL_C": "#5CD0B3",
"TEAL_B": "#76DDC0",
"TEAL_A": "#ACEAD7",
"GREEN_E": "#699C52",
"GREEN_D": "#77B05D",
"GREEN_C": "#83C167",
"GREEN_B": "#A6CF8C",
"GREEN_A": "#C9E2AE",
"YELLOW_E": "#E8C11C",
"YELLOW_D": "#F4D345",
"YELLOW_C": "#FFFF00",
"YELLOW_B": "#FFEA94",
"YELLOW_A": "#FFF1B6",
"GOLD_E": "#C78D46",
"GOLD_D": "#E1A158",
"GOLD_C": "#F0AC5F",
"GOLD_B": "#F9B775",
"GOLD_A": "#F7C797",
"RED_E": "#CF5044",
"RED_D": "#E65A4C",
"RED_C": "#FC6255",
"RED_B": "#FF8080",
"RED_A": "#F7A1A3",
"MAROON_E": "#94424F",
"MAROON_D": "#A24D61",
"MAROON_C": "#C55F73",
"MAROON_B": "#EC92AB",
"MAROON_A": "#ECABC1",
"PURPLE_E": "#644172",
"PURPLE_D": "#715582",
"PURPLE_C": "#9A72AC",
"PURPLE_B": "#B189C6",
"PURPLE_A": "#CAA3E8",
"GREY_E": "#222222",
"GREY_D": "#444444",
"GREY_C": "#888888",
"GREY_B": "#BBBBBB",
"GREY_A": "#DDDDDD",
"WHITE": "#FFFFFF",
"BLACK": "#000000",
"GREY_BROWN": "#736357",
"DARK_BROWN": "#8B4513",
"LIGHT_BROWN": "#CD853F",
"PINK": "#D147BD",
"LIGHT_PINK": "#DC75CD",
"GREEN_SCREEN": "#00FF00",
"ORANGE": "#FF862F",
}
PALETTE = list(COLOR_MAP.values())
locals().update(COLOR_MAP)
for name in [s for s in list(COLOR_MAP.keys()) if s.endswith("_C")]:
locals()[name.replace("_C", "")] = locals()[name]
|
[
"numpy.array"
] |
[((567, 592), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (575, 592), True, 'import numpy as np\n'), ((595, 620), 'numpy.array', 'np.array', (['(0.0, 1.0, 0.0)'], {}), '((0.0, 1.0, 0.0))\n', (603, 620), True, 'import numpy as np\n'), ((625, 651), 'numpy.array', 'np.array', (['(0.0, -1.0, 0.0)'], {}), '((0.0, -1.0, 0.0))\n', (633, 651), True, 'import numpy as np\n'), ((657, 682), 'numpy.array', 'np.array', (['(1.0, 0.0, 0.0)'], {}), '((1.0, 0.0, 0.0))\n', (665, 682), True, 'import numpy as np\n'), ((687, 713), 'numpy.array', 'np.array', (['(-1.0, 0.0, 0.0)'], {}), '((-1.0, 0.0, 0.0))\n', (695, 713), True, 'import numpy as np\n'), ((716, 742), 'numpy.array', 'np.array', (['(0.0, 0.0, -1.0)'], {}), '((0.0, 0.0, -1.0))\n', (724, 742), True, 'import numpy as np\n'), ((746, 771), 'numpy.array', 'np.array', (['(0.0, 0.0, 1.0)'], {}), '((0.0, 0.0, 1.0))\n', (754, 771), True, 'import numpy as np\n'), ((778, 803), 'numpy.array', 'np.array', (['(1.0, 0.0, 0.0)'], {}), '((1.0, 0.0, 0.0))\n', (786, 803), True, 'import numpy as np\n'), ((810, 835), 'numpy.array', 'np.array', (['(0.0, 1.0, 0.0)'], {}), '((0.0, 1.0, 0.0))\n', (818, 835), True, 'import numpy as np\n'), ((842, 867), 'numpy.array', 'np.array', (['(0.0, 0.0, 1.0)'], {}), '((0.0, 0.0, 1.0))\n', (850, 867), True, 'import numpy as np\n')]
|
import numpy as np
from net import Net
from functional import *
from os import remove
temp_path = "./model/param"
def save_model(net: Net, name: str):
'''
将网络信息保存
parameters
----------
net : 神经网络类
name : 文件名,文件将被保存到model文件夹中的指定名称文件中
return
------
1 : 表示保存成功
'''
path = "./model/{}".format(name)
args = net.args
layer_info = "layer info:\n"
for layer in args:
layer_info += "{} {}\n".format(*layer)
criterion = "criterion : {}\n".format("ce" if net.criterion ==
ce_loss else "mse")
regualarize = "regularize : " + ("{} with alpha={}\n".format(
net.regularize, net.alpha) if net.regularize else "None\n")
with open(path, "w") as f:
f.write(layer_info)
f.write(criterion)
f.write(regualarize)
for param in net.parameters():
np.savetxt(temp_path, param)
with open(temp_path, "r") as fa:
f.write(fa.read())
remove(temp_path)
return 1
def load_model(name: str):
'''
指定文件名,函数将读取文件,生成文件中描述的神经网络模型
return
------
net : 模型文件所描述的网络
'''
path = "./model/{}".format(name)
parameters = []
with open(path, "r") as f:
f.readline() # 读掉第一行
layer_info = []
while True:
s = f.readline()[:-1]
if "criterion" in s:
break
n, act = s.split()
layer_info.append((eval(n), act))
criterion = s.split(" : ")[-1]
s = f.readline()
if "alpha" in s: # 有正则化设置
regualarize = s[:2]
alpha = eval(s.split("=")[-1])
else:
regualarize = None
alpha = 0.01
net = Net(
*layer_info,
criterion=criterion,
regularize=regualarize,
alpha=alpha,
)
for l in range(len(layer_info) - 1):
i, o = layer_info[l][0], layer_info[l + 1][0]
str_W = "".join([f.readline() for l in range(i)])
str_b = f.readline()
with open(temp_path, "w") as fw:
fw.writelines(str_W)
W = np.loadtxt(temp_path).reshape(i, o)
with open(temp_path, "w") as fb:
fb.writelines(str_b)
b = np.loadtxt(temp_path).reshape(1, o)
parameters.extend((W, b))
net.reset_net(parameters)
remove(temp_path)
return net
def random_init(net: Net, path="./data/random.npy"):
'''用指定数组来初始化参数'''
n_layer = net.ct_layer
n_weight_list = [
n_layer[i] * n_layer[i + 1] for i in range(len(n_layer) - 1)
]
parameters = []
x = np.load(path)[:sum(n_weight_list)]
ptr = 0
for i in range(len(n_layer) - 1):
W = x[ptr:ptr + n_weight_list[i]].reshape((n_layer[i], n_layer[i + 1]))
b = np.zeros((1, n_layer[i + 1]))
parameters.extend((W, b))
ptr += n_weight_list[i]
net.reset_net(parameters, net.xavier, net.he)
return net
|
[
"os.remove",
"numpy.load",
"numpy.savetxt",
"numpy.zeros",
"net.Net",
"numpy.loadtxt"
] |
[((1013, 1030), 'os.remove', 'remove', (['temp_path'], {}), '(temp_path)\n', (1019, 1030), False, 'from os import remove\n'), ((2430, 2447), 'os.remove', 'remove', (['temp_path'], {}), '(temp_path)\n', (2436, 2447), False, 'from os import remove\n'), ((1754, 1828), 'net.Net', 'Net', (['*layer_info'], {'criterion': 'criterion', 'regularize': 'regualarize', 'alpha': 'alpha'}), '(*layer_info, criterion=criterion, regularize=regualarize, alpha=alpha)\n', (1757, 1828), False, 'from net import Net\n'), ((2692, 2705), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (2699, 2705), True, 'import numpy as np\n'), ((2869, 2898), 'numpy.zeros', 'np.zeros', (['(1, n_layer[i + 1])'], {}), '((1, n_layer[i + 1]))\n', (2877, 2898), True, 'import numpy as np\n'), ((900, 928), 'numpy.savetxt', 'np.savetxt', (['temp_path', 'param'], {}), '(temp_path, param)\n', (910, 928), True, 'import numpy as np\n'), ((2186, 2207), 'numpy.loadtxt', 'np.loadtxt', (['temp_path'], {}), '(temp_path)\n', (2196, 2207), True, 'import numpy as np\n'), ((2320, 2341), 'numpy.loadtxt', 'np.loadtxt', (['temp_path'], {}), '(temp_path)\n', (2330, 2341), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
class MinusOpTest(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self):
self.type = "minus"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((32, 84)).astype("float32")
}
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
class MinusGradTest(GradientChecker):
def test_left(self):
op = create_op("minus")
inputs = {
"X": np.random.random((10, 10)).astype("float32"),
"Y": np.random.random((10, 10)).astype("float32")
}
self.check_grad(op, inputs, ["X", 'Y'], "Out")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.random.random",
"gradient_checker.create_op"
] |
[((816, 831), 'unittest.main', 'unittest.main', ([], {}), '()\n', (829, 831), False, 'import unittest\n'), ((555, 573), 'gradient_checker.create_op', 'create_op', (['"""minus"""'], {}), "('minus')\n", (564, 573), False, 'from gradient_checker import GradientChecker, create_op\n'), ((289, 315), 'numpy.random.random', 'np.random.random', (['(32, 84)'], {}), '((32, 84))\n', (305, 315), True, 'import numpy as np\n'), ((352, 378), 'numpy.random.random', 'np.random.random', (['(32, 84)'], {}), '((32, 84))\n', (368, 378), True, 'import numpy as np\n'), ((610, 636), 'numpy.random.random', 'np.random.random', (['(10, 10)'], {}), '((10, 10))\n', (626, 636), True, 'import numpy as np\n'), ((673, 699), 'numpy.random.random', 'np.random.random', (['(10, 10)'], {}), '((10, 10))\n', (689, 699), True, 'import numpy as np\n')]
|
"""Seek behaviour in Pygame"""
import pygame
import numpy as np
import math
WIDTH,HEIGHT = 700,400
screen = pygame.display.set_mode((WIDTH,HEIGHT))
class Seeker():
def __init__(self,x,y):
super().__init__()
self.pos=np.array([x,y])
self.vel=np.array([0,0])
self.acc=np.array([0,0])
self.max_speed=0.1
def Draw(self):
#pygame.draw.polygon(screen, (0,255,255), ((self.pos),(self.pos+(8,-20)),(self.pos+(18,0))))
pygame.draw.circle(screen, (0,255,255), self.pos, 10)
def Update(self):
self.vel = np.add(self.vel, self.acc)
self.pos = np.subtract(self.pos, self.vel)
self.acc = np.multiply(self.acc,[0,0])
def Apply(self,force):
self.acc = np.add(self.acc,force)
def Seek(self,target):
desired_vel = self.pos - target
desired_vel = desired_vel/math.sqrt(desired_vel[0]*desired_vel[0]+desired_vel[1]*desired_vel[1])
desired_vel = desired_vel * self.max_speed
steering_vel = desired_vel - self.vel
self.Apply(steering_vel)
def Snitch(pos):
pygame.draw.circle(screen, (255,215,0), pos,10)
pygame.init()
agents=[]
for i in range(20):
agents.append(Seeker(i*100,i*100))
running = True
while running:
screen.fill((0,0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#Get target position
target_pos= np.array(pygame.mouse.get_pos())
Snitch(target_pos)
for agent in agents:
agent.Seek(target_pos)
agent.Update()
agent.Draw()
pygame.display.update()
#pygame.time.Clock().tick(30)
|
[
"numpy.multiply",
"pygame.draw.circle",
"numpy.subtract",
"math.sqrt",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.mouse.get_pos",
"pygame.display.update",
"numpy.array",
"numpy.add"
] |
[((117, 157), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (140, 157), False, 'import pygame\n'), ((1196, 1209), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1207, 1209), False, 'import pygame\n'), ((1145, 1195), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(255, 215, 0)', 'pos', '(10)'], {}), '(screen, (255, 215, 0), pos, 10)\n', (1163, 1195), False, 'import pygame\n'), ((1362, 1380), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1378, 1380), False, 'import pygame\n'), ((1673, 1696), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1694, 1696), False, 'import pygame\n'), ((251, 267), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (259, 267), True, 'import numpy as np\n'), ((285, 301), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (293, 301), True, 'import numpy as np\n'), ((319, 335), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (327, 335), True, 'import numpy as np\n'), ((497, 552), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 255, 255)', 'self.pos', '(10)'], {}), '(screen, (0, 255, 255), self.pos, 10)\n', (515, 552), False, 'import pygame\n'), ((600, 626), 'numpy.add', 'np.add', (['self.vel', 'self.acc'], {}), '(self.vel, self.acc)\n', (606, 626), True, 'import numpy as np\n'), ((647, 678), 'numpy.subtract', 'np.subtract', (['self.pos', 'self.vel'], {}), '(self.pos, self.vel)\n', (658, 678), True, 'import numpy as np\n'), ((699, 728), 'numpy.multiply', 'np.multiply', (['self.acc', '[0, 0]'], {}), '(self.acc, [0, 0])\n', (710, 728), True, 'import numpy as np\n'), ((777, 800), 'numpy.add', 'np.add', (['self.acc', 'force'], {}), '(self.acc, force)\n', (783, 800), True, 'import numpy as np\n'), ((1504, 1526), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (1524, 1526), False, 'import pygame\n'), ((906, 982), 'math.sqrt', 'math.sqrt', (['(desired_vel[0] * desired_vel[0] + desired_vel[1] * desired_vel[1])'], {}), '(desired_vel[0] * desired_vel[0] + desired_vel[1] * desired_vel[1])\n', (915, 982), False, 'import math\n')]
|
import os
import collections
import pdb
import gym
import gym.envs.mujoco
import time
import csv
import json
import shutil
import numpy as np
import random
from . import ant_env
from . import proprioceptive_humanoid_env
from . import maze_ant
from . import maze_humanoid
# Wrapper that records everything we might care about in our environment
# All rewards (clipped and raw), states, actions, time and steps
# Copied originally from https://github.com/openai/baselines/blob/master/baselines/bench/monitor.py
class SmartMonitor(gym.Wrapper):
def __init__(self, env, log_dir, rank, opt, verbose=True, allow_early_resets=False):
super(SmartMonitor, self).__init__(env)
self.tstart = time.time()
self.episode_count = -1
# Get the rewards we want to log
# Got to be a better way to get the names of the subpart rewards, but it seems to be hardcoded in the mujoco envs
self.reward_list = ['reward_env']
if opt['model']['mode'] in ['baseline', 'baseline_reverse', 'baselinewtheta', 'baseline_lowlevel']:
self.baseline = True
elif opt['model']['mode'] in ['phasesimple', 'phasewstate', 'phasewtheta', 'phase_lowlevel']:
self.baseline = False
self.reward_list.append('reward_exp')
if opt['model']['mode'] != 'phase_lowlevel':
self.reward_list.append('reward_move')
if opt['env']['state_cycle_weight'] > 0 or opt['env']['action_cycle_weight'] > 0:
self.reward_list.append('reward_cycle')
self.reward_list.append('reward_cycle_s')
self.reward_list.append('reward_cycle_a')
elif opt['model']['mode'] == 'interpolate':
self.baseline = False
self.reward_list.append('reward_interpolate')
elif opt['model']['mode'] == 'cyclic':
self.baseline = False
self.reward_list.append('reward_cycle')
self.reward_list.append('reward_thresh')
elif opt['model']['mode'] in ['hierarchical', 'hierarchical_many']:
self.baseline = True
self.reward_list.append('reward_velocity')
self.reward_list.append('reward_goal')
elif opt['model']['mode'] in [ 'maze_baseline', 'maze_baseline_wphase']:
self.baseline = True
self.reward_list.append('reward_velocity')
self.reward_list.append('reward_goal')
else:
raise NotImplementedError
# This is currently hardcoded to Mujoco envs
if isinstance(env.unwrapped, ant_env.BaseAntEnv) or isinstance(env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv):
self.reward_list += ['reward_forward', 'reward_ctrl', 'reward_contact', 'reward_survive']
elif isinstance(env.unwrapped, gym.envs.mujoco.AntEnv):
self.reward_list += ['reward_forward', 'reward_ctrl', 'reward_contact', 'reward_survive']
else:
raise NotImplementedError
# Data structure that holds all the values we want to log
self.episode_struct = collections.OrderedDict()
all_keys = self.reward_list + ['obs', 'action', 'env_count', 'episode_count']
if isinstance(env.unwrapped, ant_env.BaseAntEnv) or isinstance(env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv) or isinstance(env.unwrapped, gym.envs.mujoco.MujocoEnv):
all_keys += ['state']
# Log the distances
if opt['model']['mode'] in ['hierarchical', 'hierarchical_many', 'maze_baseline', 'maze_baseline_wphase']:
if isinstance(env.unwrapped, maze_humanoid.ProprioceptiveHumanoidMazeEnv) or isinstance(env.unwrapped, maze_ant.AntMazeEnv):
all_keys += ['goal_distance', 'goal_distance_radius']
for key in all_keys:
self.episode_struct[key] = []
# Create and initialize our csv files
# File to store entire episode information (rather than every single step)
# Prints total reward (for all rewards), overall obs and state displacements, episode length, and episode time
episode_filename = os.path.join(log_dir, str(rank) + '.Episode.Monitor.csv')
self.ep_f = open(episode_filename, "wt")
self.ep_f.write('# Episode Logging %s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id, 'mode': opt['model']['mode'], 'name': opt['logs']['exp_name']}))
ep_fields = self.reward_list + ['delta_obs', 'mean_action', 'episode_len', 'episode_dt', 'episode_count']
if isinstance(env.unwrapped, ant_env.BaseAntEnv) or isinstance(env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv) or isinstance(env.unwrapped, gym.envs.mujoco.MujocoEnv):
ep_fields += ['delta_state']
if opt['model']['mode'] in ['hierarchical', 'hierarchical_many', 'maze_baseline', 'maze_baseline_wphase']:
if isinstance(env.unwrapped, maze_humanoid.ProprioceptiveHumanoidMazeEnv) or isinstance(env.unwrapped, maze_ant.AntMazeEnv):
ep_fields += ['goal_distance', 'goal_distance_radius']
self.ep_logger = csv.DictWriter(self.ep_f, fieldnames=ep_fields)
self.ep_logger.writeheader()
self.ep_f.flush()
# If in super verbose mode
if verbose:
# File to store every step
# Prints everything in episode_struct plus episode count
step_filename = os.path.join(log_dir, str(rank) + '.Step.Monitor.csv')
self.st_f = open(step_filename, "wt")
self.st_f.write('# Episode Logging %s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id, 'mode': opt['model']['mode'], 'name': opt['logs']['exp_name']}))
st_fields = list(self.episode_struct.keys())
self.st_logger = csv.DictWriter(self.st_f, fieldnames=st_fields)
self.st_logger.writeheader()
self.st_f.flush()
else:
self.st_f = None
self.verbose = verbose
self.rank = rank
self.opt = opt
self.log_dir = log_dir
# Other bookkeeping
self.allow_early_resets = allow_early_resets
self.needs_reset = True
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
# Reset environment, record initial values
def reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
# Reset all the values in self.episode_struct
for key in self.episode_struct:
self.episode_struct[key] = []
# Update episode count
self.episode_count += 1
# Update values and return
obs = self.env.reset(**kwargs)
self.record_info(obs, 0)
self.needs_reset = False
return obs
# Take a step, update all the values
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
# Do step
obs, rew, done, info = self.env.step(action)
# Record new info
self.record_info(obs, rew, action, info)
# If done with episode, get summary info for episode and dump values to episode and step files
if done:
self.needs_reset = True
# For rewards, get sums
epinfo = {}
for key in self.reward_list:
reward_val = sum(self.episode_struct[key])
epinfo[key] = reward_val
# For obs and state, get delta change
epinfo['delta_obs'] = self.episode_struct['obs'][-1] - self.episode_struct['obs'][0]
if 'state' in self.episode_struct:
epinfo['delta_state'] = self.episode_struct['state'][-1] - self.episode_struct['state'][0]
# For action, get average value
epinfo['mean_action'] = np.mean(self.episode_struct['action'], axis=0)
# Update episode_len, episode_dt and episode_count
epinfo['episode_len'] = len(self.episode_struct['env_count'])
epinfo['episode_dt'] = round(time.time() - self.tstart, 6)
epinfo['episode_count'] = self.episode_count
# Update goal distances
if 'goal_distance' in self.episode_struct:
epinfo['goal_distance'] = self.episode_struct['goal_distance'][-1]
epinfo['goal_distance_radius'] = self.episode_struct['goal_distance_radius'][-1]
elif 'key_distance' in self.episode_struct:
epinfo['key_distance'] = self.episode_struct['key_distance'][-1]
epinfo['key_distance_radius'] = self.episode_struct['key_distance_radius'][-1]
epinfo['lock_distance'] = self.episode_struct['lock_distance'][-1]
epinfo['lock_distance_radius'] = self.episode_struct['lock_distance_radius'][-1]
# Do string conversion
for k in epinfo:
epinfo[k] = str(epinfo[k]).replace('\n', '')
# Update episode file
if self.ep_logger:
self.ep_logger.writerow(epinfo)
self.ep_f.flush()
# If in super verbose mode
if self.verbose:
# Make and update a temp step file with just the last episode (and only rank 0, and only every 100)
if self.rank == 0: #and self.episode_count % 100 == 0:
# Setup temp file
tmp_step_filename = os.path.join(self.log_dir, 'Tmp.Last.Step.Monitor.csv')
tmp_f = open(tmp_step_filename, "wt")
tmp_f.write('# Episode Logging %s\n'%json.dumps({"t_start": self.tstart, 'env_id' : self.env.spec and self.env.spec.id, 'mode': self.opt['model']['mode'], 'name': self.opt['logs']['exp_name']}))
st_fields = list(self.episode_struct.keys())
tmp_logger = csv.DictWriter(tmp_f, fieldnames=st_fields)
tmp_logger.writeheader()
tmp_f.flush()
else:
tmp_f = None
# Update step file
assert(self.episode_struct['env_count'][-1]+1 == len(self.episode_struct['env_count']))
for step in range(len(self.episode_struct['env_count'])):
stepinfo = {}
for key in self.episode_struct:
stepinfo[key] = self.episode_struct[key][step]
# Do string conversion
for k in stepinfo:
stepinfo[k] = str(stepinfo[k]).replace('\n', '')
# Update loggers
self.st_logger.writerow(stepinfo)
if tmp_f is not None:
tmp_logger.writerow(stepinfo)
self.st_f.flush()
# Write tmp file and close, copy tmp to last
if tmp_f is not None:
tmp_f.flush()
tmp_f.close()
# Copy tmp to last
last_step_filename = os.path.join(self.log_dir, 'Last.Step.Monitor.csv')
shutil.copyfile(tmp_step_filename, last_step_filename)
# Update info
info['episode'] = epinfo
self.total_steps += 1
return (obs, rew, done, info)
# Record step info
def record_info(self, obs, rew, action=None, info=None):
# Update all of our values
# Reward values
for key in self.reward_list:
# If reset, all 0
if info is None:
self.episode_struct[key].append(0)
else:
# For baseline, reward_env is reward
if key == 'reward_env' and self.baseline:
self.episode_struct[key].append(rew)
else:
self.episode_struct[key].append(info[key])
# Observation values
self.episode_struct['obs'].append(obs)
# State values, right now just Mujoco
if isinstance(self.env.unwrapped, ant_env.BaseAntEnv) or isinstance(self.env.unwrapped, ant_env.BaseAntLowGearEnv) or isinstance(self.env.unwrapped, proprioceptive_humanoid_env.BaseProprioceptiveHumanoidEnv) or isinstance(self.env.unwrapped, gym.envs.mujoco.MujocoEnv):
state = self.env.unwrapped.state_vector()
self.episode_struct['state'].append(state)
# Update actions
if action is None:
action = np.zeros(self.env.action_space.shape)
self.episode_struct['action'].append(action)
# Update step and episode counts
env_count = self.env._elapsed_steps
self.episode_struct['env_count'].append(env_count)
self.episode_struct['episode_count'].append(self.episode_count)
# Update distances
if 'goal_distance' in self.episode_struct:
if info is None:
self.episode_struct['goal_distance'].append(0)
self.episode_struct['goal_distance_radius'].append(0)
else:
self.episode_struct['goal_distance'].append(info['goal_distance'])
self.episode_struct['goal_distance_radius'].append(info['goal_distance_radius'])
# Close file handles
def close(self):
if self.ep_f is not None:
self.ep_f.close()
if self.st_f is not None:
self.st_f.close()
# Get total number of steps
def get_total_steps(self):
return self.total_steps
|
[
"numpy.zeros",
"json.dumps",
"time.time",
"numpy.mean",
"collections.OrderedDict",
"shutil.copyfile",
"os.path.join",
"csv.DictWriter"
] |
[((702, 713), 'time.time', 'time.time', ([], {}), '()\n', (711, 713), False, 'import time\n'), ((3162, 3187), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (3185, 3187), False, 'import collections\n'), ((5319, 5366), 'csv.DictWriter', 'csv.DictWriter', (['self.ep_f'], {'fieldnames': 'ep_fields'}), '(self.ep_f, fieldnames=ep_fields)\n', (5333, 5366), False, 'import csv\n'), ((6016, 6063), 'csv.DictWriter', 'csv.DictWriter', (['self.st_f'], {'fieldnames': 'st_fields'}), '(self.st_f, fieldnames=st_fields)\n', (6030, 6063), False, 'import csv\n'), ((8452, 8498), 'numpy.mean', 'np.mean', (["self.episode_struct['action']"], {'axis': '(0)'}), "(self.episode_struct['action'], axis=0)\n", (8459, 8498), True, 'import numpy as np\n'), ((13095, 13132), 'numpy.zeros', 'np.zeros', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (13103, 13132), True, 'import numpy as np\n'), ((4419, 4558), 'json.dumps', 'json.dumps', (["{'t_start': self.tstart, 'env_id': env.spec and env.spec.id, 'mode': opt[\n 'model']['mode'], 'name': opt['logs']['exp_name']}"], {}), "({'t_start': self.tstart, 'env_id': env.spec and env.spec.id,\n 'mode': opt['model']['mode'], 'name': opt['logs']['exp_name']})\n", (4429, 4558), False, 'import json\n'), ((5792, 5931), 'json.dumps', 'json.dumps', (["{'t_start': self.tstart, 'env_id': env.spec and env.spec.id, 'mode': opt[\n 'model']['mode'], 'name': opt['logs']['exp_name']}"], {}), "({'t_start': self.tstart, 'env_id': env.spec and env.spec.id,\n 'mode': opt['model']['mode'], 'name': opt['logs']['exp_name']})\n", (5802, 5931), False, 'import json\n'), ((8678, 8689), 'time.time', 'time.time', ([], {}), '()\n', (8687, 8689), False, 'import time\n'), ((10058, 10113), 'os.path.join', 'os.path.join', (['self.log_dir', '"""Tmp.Last.Step.Monitor.csv"""'], {}), "(self.log_dir, 'Tmp.Last.Step.Monitor.csv')\n", (10070, 10113), False, 'import os\n'), ((10485, 10528), 'csv.DictWriter', 'csv.DictWriter', (['tmp_f'], {'fieldnames': 'st_fields'}), '(tmp_f, fieldnames=st_fields)\n', (10499, 10528), False, 'import csv\n'), ((11677, 11728), 'os.path.join', 'os.path.join', (['self.log_dir', '"""Last.Step.Monitor.csv"""'], {}), "(self.log_dir, 'Last.Step.Monitor.csv')\n", (11689, 11728), False, 'import os\n'), ((11749, 11803), 'shutil.copyfile', 'shutil.copyfile', (['tmp_step_filename', 'last_step_filename'], {}), '(tmp_step_filename, last_step_filename)\n', (11764, 11803), False, 'import shutil\n'), ((10229, 10394), 'json.dumps', 'json.dumps', (["{'t_start': self.tstart, 'env_id': self.env.spec and self.env.spec.id,\n 'mode': self.opt['model']['mode'], 'name': self.opt['logs']['exp_name']}"], {}), "({'t_start': self.tstart, 'env_id': self.env.spec and self.env.\n spec.id, 'mode': self.opt['model']['mode'], 'name': self.opt['logs'][\n 'exp_name']})\n", (10239, 10394), False, 'import json\n')]
|
import json
from sklearn.metrics import mean_squared_error, mean_absolute_error
import numpy as np
from model import Dfembeding
from sklearn.kernel_ridge import KernelRidge
import torch
from PIL import Image
from utils import *
import csv
import torch.utils.data as data
import pandas as pd
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# class Dataset(data.Dataset):
# def __init__(self, file, transfrom):
# self.Pic_Names = os.listdir(file)
# self.file = file
# self.transfrom = transfrom
#
# def __len__(self):
# return len(self.Pic_Names)
#
# def __getitem__(self, idx):
# img_name = self.Pic_Names[idx]
# Pic = Image.open(os.path.join(self.file, self.Pic_Names[idx]))
# Pic = self.transfrom(Pic)
# try:
# ret = re.match(r"\d+?_([FMfm])_(\d+?)_(\d+?)_(\d+).+", img_name)
# BMI = (int(ret.group(4)) / 100000) / (int(ret.group(3)) / 100000) ** 2
# Pic_name = os.path.join(self.file, self.Pic_Names[idx])
# return (Pic, Pic_name), BMI
# except:
# return (Pic, ''), 10000
class Dataset(data.Dataset):
def __init__(self, file, transfrom):
self.Pic_Names = os.listdir(file)
self.file = file
self.transfrom = transfrom
def __len__(self):
return len(self.Pic_Names)
def __getitem__(self, idx):
img_name = self.Pic_Names[idx]
Pic = Image.open(os.path.join(self.file, self.Pic_Names[idx]))
Pic = self.transfrom(Pic)
ret = re.match(r"\d+?_([FMfm])_(\d+?)_(\d+?)_(\d+).+", img_name)
sex = 0 if (ret.group(1) == 'F' or ret.group(1) == 'f') else 1
age = int(ret.group(2))
height = int(ret.group(3)) / 100000
weight = int(ret.group(4)) / 100000
BMI = weight / (height ** 2)
# BMI = (int(ret.group(4))/100000) / (int(ret.group(3))/100000)**2
Pic_name = os.path.join(self.file, self.Pic_Names[idx])
return (Pic, Pic_name, img_name, sex, age, height, weight), BMI
def CombineDFBF(model, BodyFeatures, df, loader_test, loader_train):
# test(model, DEVICE, loader_test)
loaders = [ loader_test, loader_train,]
files = [ 'test', 'train',]
for loader, file in zip(loaders, files):
with open('/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_{}.csv'.format(file), 'w',
newline='') as fp:
writer = csv.writer(fp)
model.eval()
pred = []
targ = []
for (img, name, img_name, sex, age, height, weight), target in loader:
values = []
img, target = img.to(DEVICE), target.to(DEVICE)
img_name = img_name[0]
# print('Processing IMage :', img_name)
values.append(img_name)
values.append(target.cpu().numpy()[0])
values.append(sex.numpy()[0])
values.append(BodyFeatures[img_name]['WSR'])
values.append(BodyFeatures[img_name]['WTR'])
values.append(BodyFeatures[img_name]['WHpR'])
values.append(BodyFeatures[img_name]['WHdR'])
values.append(BodyFeatures[img_name]['HpHdR'])
values.append(BodyFeatures[img_name]['Area'])
values.append(BodyFeatures[img_name]['H2W'])
conv_out = LayerActivations(model.fc, 1)
out = model(img)
pred.append(out.item())
targ.append(target.item())
conv_out.remove()
xs = torch.squeeze(conv_out.features.detach()).numpy()
# print(xs.shape)
for x in xs:
values.append(float(x))
values.append(age.numpy()[0])
values.append(height.numpy()[0])
values.append(weight.numpy()[0])
writer.writerow(values)
MAE = mean_absolute_error(targ, pred)
print(file,' ',MAE)
def Pre(raw_data, name):
if (name != 'vgg16'):
raw_data = raw_data.iloc[:, 1:]
raw_data = raw_data.replace([np.inf, -np.inf], np.nan)
# raw_data = raw_data.fillna(raw_data.mean())
raw_data = raw_data.replace(np.nan, 0)
raw_data = raw_data.values.astype(np.float64)
return raw_data
def Feature(data, df, name):
if (name == 'author'):
x_5f = data[:, 0:5]
y = data[:, 9]
return x_5f, y
elif (name == 'vgg16'):
x_df = data[:, 2:]
y = data[:, 0]
return x_df, y
elif (name == 'ours'):
x_5f = data[:, 3:8]
x_7f = data[:, 2:9]
x_df = data[:, 9:9 + df]
y = data[:, 0]
return x_5f, x_7f, x_df, y
def Stdm(x):
Mean = np.mean(x, axis=0)
Std = np.std(x, axis=0)
return Mean, Std
def Regression(df=20, file='test'):
# raw_data_train = pd.read_csv('/home/benkesheng/BMI_DETECT/ReDone_CSV/Ours/Image_train.csv')
# raw_data_test = pd.read_csv('/home/benkesheng/BMI_DETECT/ReDone_CSV/Ours/Image_test.csv')
raw_data_train = pd.read_csv('/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_train.csv')
raw_data_test = pd.read_csv('/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_test.csv')
raw_data_name = raw_data_test.values
raw_data_train = Pre(raw_data_train, 'ours')
raw_data_test = Pre(raw_data_test, 'ours')
x_5f_train, x_7f_train, x_df_train, y_train = Feature(raw_data_train, df, 'ours')
x_5f_test, x_7f_test, x_df_test, y_test = Feature(raw_data_test, df, 'ours')
x_body_train = x_7f_train
Mean, Std = Stdm(x_body_train)
x_body_train = (x_body_train - Mean) / Std
x_train = np.append(x_body_train, x_df_train, axis=1)
y_train = y_train
x_body_test = x_7f_test
x_body_test = (x_body_test - Mean) / Std
x_test = np.append(x_body_test, x_df_test, axis=1)
y_test = y_test
print(x_test.shape)
print(x_train.shape)
krr = KernelRidge()
krr.fit(x_train, y_train)
y_krr = krr.predict(x_test)
print('KRR: MAE: ', mean_absolute_error(y_test, y_krr), ' MAPE: ', mean_absolute_percentage_error(y_test, y_krr))
if file == 'demo':
for i, data in enumerate(x_test):
y_pred = krr.predict(data[None,:])
print('Name: ', raw_data_name[i][0], ' y_pred:', y_pred[0], ' y_ture:', y_test[i])
if __name__ == '__main__':
IMG_SIZE = 224
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
transform = transforms.Compose([
Resize(IMG_SIZE),
transforms.Pad(IMG_SIZE),
transforms.CenterCrop(IMG_SIZE),
transforms.ToTensor(),
transforms.Normalize(IMG_MEAN, IMG_STD)
])
DEVICE = torch.device("cuda:0")
dataset_train = Dataset('/home/benkesheng/BMI_DETECT/datasets/Image_train', transform)
dataset_test = Dataset('/home/benkesheng/BMI_DETECT/datasets/Image_test', transform)
loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=1, shuffle=True)
loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1, shuffle=True)
df = 20
model = Dfembeding()
# model.load_state_dict(torch.load('/home/benkesheng/BMI_DETECT/ReDone_CSV/model/Ours.pkl'.format(df)))
model.load_state_dict(torch.load('/home/benkesheng/BMI_DETECT/MODEL/9-1reexperiment/MIN_RESNET101_BMI_20-1fc.pkl'))
model.to(DEVICE)
Path = '/home/benkesheng/BMI_DETECT/Deep_Learning_Method/datasets_bodyfeature/BodyFeature.json'
with open(Path, 'r') as f:
BodyFeatures = json.load(f)
# CombineDFBF(model, BodyFeatures, df, loader_test, loader_train)
Regression(df)
|
[
"json.load",
"numpy.abs",
"csv.writer",
"torch.utils.data.DataLoader",
"numpy.std",
"sklearn.kernel_ridge.KernelRidge",
"pandas.read_csv",
"torch.load",
"model.Dfembeding",
"sklearn.metrics.mean_absolute_error",
"numpy.append",
"numpy.mean",
"numpy.array",
"torch.device"
] |
[((5043, 5061), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5050, 5061), True, 'import numpy as np\n'), ((5073, 5090), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5079, 5090), True, 'import numpy as np\n'), ((5374, 5472), 'pandas.read_csv', 'pd.read_csv', (['"""/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_train.csv"""'], {}), "(\n '/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_train.csv'\n )\n", (5385, 5472), True, 'import pandas as pd\n'), ((5484, 5576), 'pandas.read_csv', 'pd.read_csv', (['"""/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_test.csv"""'], {}), "(\n '/home/benkesheng/BMI_DETECT/Deep_Learning_Method/DF_BF_csv/20-1_test.csv')\n", (5495, 5576), True, 'import pandas as pd\n'), ((6015, 6058), 'numpy.append', 'np.append', (['x_body_train', 'x_df_train'], {'axis': '(1)'}), '(x_body_train, x_df_train, axis=1)\n', (6024, 6058), True, 'import numpy as np\n'), ((6173, 6214), 'numpy.append', 'np.append', (['x_body_test', 'x_df_test'], {'axis': '(1)'}), '(x_body_test, x_df_test, axis=1)\n', (6182, 6214), True, 'import numpy as np\n'), ((6302, 6315), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {}), '()\n', (6313, 6315), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((7083, 7105), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (7095, 7105), False, 'import torch\n'), ((7308, 7378), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_train'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset_train, batch_size=1, shuffle=True)\n', (7335, 7378), False, 'import torch\n'), ((7398, 7467), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_test'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset_test, batch_size=1, shuffle=True)\n', (7425, 7467), False, 'import torch\n'), ((7496, 7508), 'model.Dfembeding', 'Dfembeding', ([], {}), '()\n', (7506, 7508), False, 'from model import Dfembeding\n'), ((380, 396), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (388, 396), True, 'import numpy as np\n'), ((398, 414), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (406, 414), True, 'import numpy as np\n'), ((6405, 6439), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'y_krr'], {}), '(y_test, y_krr)\n', (6424, 6439), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((7647, 7749), 'torch.load', 'torch.load', (['"""/home/benkesheng/BMI_DETECT/MODEL/9-1reexperiment/MIN_RESNET101_BMI_20-1fc.pkl"""'], {}), "(\n '/home/benkesheng/BMI_DETECT/MODEL/9-1reexperiment/MIN_RESNET101_BMI_20-1fc.pkl'\n )\n", (7657, 7749), False, 'import torch\n'), ((7924, 7936), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7933, 7936), False, 'import json\n'), ((435, 469), 'numpy.abs', 'np.abs', (['((y_true - y_pred) / y_true)'], {}), '((y_true - y_pred) / y_true)\n', (441, 469), True, 'import numpy as np\n'), ((2645, 2659), 'csv.writer', 'csv.writer', (['fp'], {}), '(fp)\n', (2655, 2659), False, 'import csv\n'), ((4197, 4228), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['targ', 'pred'], {}), '(targ, pred)\n', (4216, 4228), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n')]
|
import numpy as np
from scipy import sparse
def fit_glove_bias(A, emb):
N = A.shape[0]
row_sum = np.array(A.sum(axis=1)).reshape(-1).astype(float)
col_sum = np.array(A.sum(axis=0)).reshape(-1).astype(float)
emb_sum = np.array(emb @ np.array(np.sum(emb, axis=0)).reshape((-1, 1))).reshape(-1)
row_sum -= emb_sum
col_sum -= emb_sum
a = np.zeros(N)
b = np.zeros(N)
adam_a = ADAM()
adam_b = ADAM()
for it in range(1000):
grad_a = row_sum - np.sum(b) * a
grad_b = col_sum - np.sum(a) * b
anew = adam_a.update(a, grad_a, 0)
bnew = adam_b.update(b, grad_b, 0)
if it % 20 == 0:
dif = np.mean(np.abs(a - anew) + np.abs(b - bnew)) / 2
dif /= np.maximum(np.mean(np.abs(a) + np.abs(b)) / 2, 1e-8)
if dif < 1e-2:
break
a = anew.copy()
b = bnew.copy()
return a, b
class ADAM:
def __init__(self):
self.beta1 = 0.9
self.beta2 = 0.999
self.eta = 0.001
self.t = 0
self.mt = None
self.vt = None
self.eps = 1e-8
def update(self, theta, grad, lasso_penalty, positiveConstraint=False):
"""Ascending."""
if self.mt is None:
self.mt = np.zeros(grad.shape)
self.vt = np.zeros(grad.shape)
self.t = self.t + 1
self.mt = self.beta1 * self.mt + (1 - self.beta1) * grad
self.vt = self.beta2 * self.vt + (1 - self.beta2) * np.multiply(grad, grad)
mthat = self.mt / (1 - np.power(self.beta1, self.t))
vthat = self.vt / (1 - np.power(self.beta2, self.t))
new_grad = mthat / (np.sqrt(vthat) + self.eps)
return self._prox(
theta + self.eta * new_grad, lasso_penalty * self.eta, positiveConstraint
)
def _prox(self, x, lam, positiveConstraint):
"""Soft thresholding operator.
Parameters
----------
x : float
Variable.
lam : float
Lasso penalty.
Returns
-------
y : float
Thresholded value of x.
"""
if positiveConstraint:
b = ((lam) > 0).astype(int)
return np.multiply(b, np.maximum(x - lam, np.zeros(x.shape))) + np.multiply(
1 - b,
np.multiply(np.sign(x), np.maximum(np.abs(x) - lam, np.zeros(x.shape))),
)
else:
return np.multiply(
np.sign(x), np.maximum(np.abs(x) - lam, np.zeros(x.shape))
)
|
[
"numpy.multiply",
"numpy.sum",
"numpy.abs",
"numpy.power",
"numpy.zeros",
"numpy.sign",
"numpy.sqrt"
] |
[((366, 377), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (374, 377), True, 'import numpy as np\n'), ((386, 397), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (394, 397), True, 'import numpy as np\n'), ((1272, 1292), 'numpy.zeros', 'np.zeros', (['grad.shape'], {}), '(grad.shape)\n', (1280, 1292), True, 'import numpy as np\n'), ((1315, 1335), 'numpy.zeros', 'np.zeros', (['grad.shape'], {}), '(grad.shape)\n', (1323, 1335), True, 'import numpy as np\n'), ((494, 503), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (500, 503), True, 'import numpy as np\n'), ((535, 544), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (541, 544), True, 'import numpy as np\n'), ((1491, 1514), 'numpy.multiply', 'np.multiply', (['grad', 'grad'], {}), '(grad, grad)\n', (1502, 1514), True, 'import numpy as np\n'), ((1547, 1575), 'numpy.power', 'np.power', (['self.beta1', 'self.t'], {}), '(self.beta1, self.t)\n', (1555, 1575), True, 'import numpy as np\n'), ((1608, 1636), 'numpy.power', 'np.power', (['self.beta2', 'self.t'], {}), '(self.beta2, self.t)\n', (1616, 1636), True, 'import numpy as np\n'), ((1667, 1681), 'numpy.sqrt', 'np.sqrt', (['vthat'], {}), '(vthat)\n', (1674, 1681), True, 'import numpy as np\n'), ((2480, 2490), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (2487, 2490), True, 'import numpy as np\n'), ((2520, 2537), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (2528, 2537), True, 'import numpy as np\n'), ((688, 704), 'numpy.abs', 'np.abs', (['(a - anew)'], {}), '(a - anew)\n', (694, 704), True, 'import numpy as np\n'), ((707, 723), 'numpy.abs', 'np.abs', (['(b - bnew)'], {}), '(b - bnew)\n', (713, 723), True, 'import numpy as np\n'), ((2257, 2274), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (2265, 2274), True, 'import numpy as np\n'), ((2343, 2353), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (2350, 2353), True, 'import numpy as np\n'), ((2503, 2512), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2509, 2512), True, 'import numpy as np\n'), ((767, 776), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (773, 776), True, 'import numpy as np\n'), ((779, 788), 'numpy.abs', 'np.abs', (['b'], {}), '(b)\n', (785, 788), True, 'import numpy as np\n'), ((2383, 2400), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (2391, 2400), True, 'import numpy as np\n'), ((260, 279), 'numpy.sum', 'np.sum', (['emb'], {'axis': '(0)'}), '(emb, axis=0)\n', (266, 279), True, 'import numpy as np\n'), ((2366, 2375), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2372, 2375), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.linalg import cho_solve, inv
from scipy.stats import norm
from scipy.interpolate import InterpolatedUnivariateSpline
from sklearn.mixture import GaussianMixture as GMM
from .utils import custom_KDE
import time
class Acq(object):
'''
The base acq class.
'''
def __init__(self, inputs):
self.inputs = inputs
def compute_value(self, x):
raise NotImplementedError
def update_prior_search(self, model):
raise NotImplementedError
class AcqLW(Acq):
''' Select the next sample for estimating extreme event statistics.
This acquisition can be used in both single and multi-fidelity contexts.
parameters:
---------
inputs: instance of Input class
Input of the problem including pdf information and a sampling method.
ll_type: string
the type of the weights, must be one of
(1) rare: w(x)=p(x)/p(y(x))
(2) extreme: w(x)=p(x)|y(x)-z|^n
(3) plain: no weights
(4) input: w(x)=p(x)
load_pts: bool
whether load the input samples from a txt file
ll_kwargs: key words for extreme ll_type
attributes:
----------
model: instance of gpr.GaussianProcessRegressor
The surrogate model based on current dataset
DX: array
The inputs of current samples
gmm: instance of sklearn.GMM
The gmm to approximate likelihood, including gmm.means_,
gmm.covariances_, and gmm.scores_.
'''
def __init__(self, inputs, ll_type='rare', load_pts=False, **ll_kwargs):
self.inputs = inputs
self.ll_type = ll_type
self.load_pts = load_pts
self.ll_kwargs = ll_kwargs
if load_pts:
smpl = np.loadtxt('map_samples.txt')
self.pts = smpl[:,0:-1] # mc points
self.fx = smpl[:,-1] # pdf of mc points
def compute_value_tf_cost(self, pos, fidelity, cost):
''' Compute the benefit per cost of adding a sample (pos, fidelity)
'''
x = np.append(pos, fidelity)
value, gradient = self.compute_value(x)
return value/cost, gradient/cost
def compute_value(self, x):
''' Compute the benefit of adding a sample x
For single fidelity, x = pos, while for multi-fidelity,
x = {pos, fidelity}.
'''
x = np.atleast_2d(x)
integral, integral_derivative = self.compute_integral(x)
cov, cov_deriv = self.model.post_cov(x)
value = (integral / cov).item()
gradient = 1/cov**2 * (cov*integral_derivative - integral*cov_deriv)
gradient = gradient.reshape(-1)
return -value, -gradient
def compute_integral(self, x):
''' \int cov^2(f_i(pos), f_h(x'))*w(x')dx', x = {pos, i=fidelity}
Eq.(15) in paper.
and
d \int cov^2(f_i(pos), f_h(x'))*w(x')dx' d pos,
x = {pos, i=fidelity} Eq.(49) in paper.
'''
# compute value
kernel = self.model.kernel_
integral = self.compute_mixed_kappa(x,x)
alpha = cho_solve((self.model.L_, True), kernel(self.X, x))
integral += alpha.T.dot(np.dot(self.kappaXX, alpha)
- 2*self.compute_mixed_kappa(self.X, x))
# compute derivative
term1 = 2*self.compute_mixed_dkappa_dx(x,x)
dalpha_dx = cho_solve((self.model.L_, True),
kernel.gradient_x(x, self.X))
term2 = 2 * alpha.T.dot(np.dot(self.kappaXX, dalpha_dx))
term3 = 2 * alpha.T.dot(self.compute_mixed_dkappa_dx(x,self.X))
term3 += 2 * self.compute_mixed_kappa(x, self.X).dot(dalpha_dx)
return integral, term1 + term2 - term3
def update_prior_search(self, model):
''' Update the model(gpr), data(X), compute the gmm of weights and
kappa(X,X).
'''
self.model = model
self.X = self.model.X_train_
# generate GMM approximation of the likelihood
self._prepare_likelihood(self.ll_type, **self.ll_kwargs)
# constant for all hypothetical point
self.kappaXX = self.compute_mixed_kappa(self.X, self.X)
def compute_mixed_kappa(self, X1, X2):
''' compute averaged kappa w.r.t gmm components.
Eq. (18) in paper. The 'G' function relies on kernel properties.
'''
kernel = self.model.kernel_
mixed_kappa = 0
for i in range(self.gmm.n_components): # the number of gmm component
mixed_kappa += self.gmm.weights_[i] * kernel.intKKNorm(X1, X2,
self.gmm.means_[i],
self.gmm.covariances_[i])
return mixed_kappa
def compute_mixed_dkappa_dx(self, x, X):
''' Compute the averaged kappa derivatives.
Eq.(53) in paper.
'''
kernel = self.model.kernel_
mixed_kappa = 0
for i in range(self.gmm.n_components):
mixed_kappa += self.gmm.weights_[i] * kernel.dintKKNorm_dx(x, X,
self.gmm.means_[i],
self.gmm.covariances_[i])
return mixed_kappa
def _prepare_likelihood(self, ll_type, n_components=2, power=6,
center=0, depressed_side=None):
'''Compute gmm components of w(x').
'''
if self.load_pts:
pts = self.pts
fx = self.fx
n_samples = pts.shape[0]
else:
if self.inputs.dim <= 2:
n_samples = int(1e5)
else:
n_samples = int(1e6)
pts = self.inputs.sampling(n_samples) # input-samples
fx = self.inputs.pdf(pts) # weights
if ll_type =='input':
w_raw = fx
elif ll_type == 'plain':
w_raw = 1
else:
# compute the mean prediction for input-samples
if self.X.shape[1] != self.inputs.dim:
aug_pts = np.concatenate((pts, [[1]] * n_samples), axis = 1)
else:
aug_pts = pts
if ll_type == 'rare':
if n_samples > 4*1e5:
aug_pts_list = np.array_split(aug_pts, 10)
mu = np.empty(0)
for iii in range(10):
mu = np.concatenate((mu,
self.model.predict(aug_pts_list[iii]).flatten()))
else:
mu = self.model.predict(aug_pts).flatten()
x, y = custom_KDE(mu, weights=fx).evaluate()
self.fy_interp = InterpolatedUnivariateSpline(x, y, k=1)
w_raw = fx/self.fy_interp(mu)
elif ll_type == 'extreme':
mu = self.model.predict(aug_pts).flatten()
if center == 'mean':
center = np.average(mu, fx)
if depressed_side == 'negative':
w_raw = fx*abs(mu - center) ** (power*np.sign(mu - center))
elif depressed_side == 'positive':
w_raw = fx*abs(mu - center) ** (-power*np.sign(mu - center))
else:
w_raw = fx*abs(mu - center)**power
elif ll_type == 'failure':
# P(X)(1-P(X)) * p(X) / var(X)
mu, std = self.model.predict(aug_pts, return_std=True)
# failure probability as a Bernoulli RV
p = norm.cdf(mu.flatten()/std.flatten())
vb = p*(1-p) # var of the Bernoulli
vf = std**2 # var of the predictions
w_raw = vb * fx / vf
self.gmm = self._fit_gmm(pts, w_raw, n_components)
return self
@staticmethod
def _fit_gmm(pts, w_raw, n_components):
'''Fit gmm with weighted samples
'''
sca = np.sum(w_raw)
rng = np.random.default_rng()
aa = rng.choice(pts, size=50000, p=w_raw/sca)
gmm = GMM(n_components=n_components, covariance_type="full")
gmm = gmm.fit(X=aa)
return gmm
|
[
"numpy.sum",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.average",
"numpy.empty",
"sklearn.mixture.GaussianMixture",
"numpy.random.default_rng",
"numpy.append",
"numpy.loadtxt",
"numpy.sign",
"numpy.array_split",
"numpy.dot",
"numpy.concatenate",
"numpy.atleast_2d"
] |
[((2119, 2143), 'numpy.append', 'np.append', (['pos', 'fidelity'], {}), '(pos, fidelity)\n', (2128, 2143), True, 'import numpy as np\n'), ((2464, 2480), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (2477, 2480), True, 'import numpy as np\n'), ((8341, 8354), 'numpy.sum', 'np.sum', (['w_raw'], {}), '(w_raw)\n', (8347, 8354), True, 'import numpy as np\n'), ((8370, 8393), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (8391, 8393), True, 'import numpy as np\n'), ((8464, 8518), 'sklearn.mixture.GaussianMixture', 'GMM', ([], {'n_components': 'n_components', 'covariance_type': '"""full"""'}), "(n_components=n_components, covariance_type='full')\n", (8467, 8518), True, 'from sklearn.mixture import GaussianMixture as GMM\n'), ((1818, 1847), 'numpy.loadtxt', 'np.loadtxt', (['"""map_samples.txt"""'], {}), "('map_samples.txt')\n", (1828, 1847), True, 'import numpy as np\n'), ((3321, 3348), 'numpy.dot', 'np.dot', (['self.kappaXX', 'alpha'], {}), '(self.kappaXX, alpha)\n', (3327, 3348), True, 'import numpy as np\n'), ((3657, 3688), 'numpy.dot', 'np.dot', (['self.kappaXX', 'dalpha_dx'], {}), '(self.kappaXX, dalpha_dx)\n', (3663, 3688), True, 'import numpy as np\n'), ((6399, 6447), 'numpy.concatenate', 'np.concatenate', (['(pts, [[1]] * n_samples)'], {'axis': '(1)'}), '((pts, [[1]] * n_samples), axis=1)\n', (6413, 6447), True, 'import numpy as np\n'), ((7060, 7099), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['x', 'y'], {'k': '(1)'}), '(x, y, k=1)\n', (7088, 7099), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((6613, 6640), 'numpy.array_split', 'np.array_split', (['aug_pts', '(10)'], {}), '(aug_pts, 10)\n', (6627, 6640), True, 'import numpy as np\n'), ((6667, 6678), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (6675, 6678), True, 'import numpy as np\n'), ((7318, 7336), 'numpy.average', 'np.average', (['mu', 'fx'], {}), '(mu, fx)\n', (7328, 7336), True, 'import numpy as np\n'), ((7446, 7466), 'numpy.sign', 'np.sign', (['(mu - center)'], {}), '(mu - center)\n', (7453, 7466), True, 'import numpy as np\n'), ((7580, 7600), 'numpy.sign', 'np.sign', (['(mu - center)'], {}), '(mu - center)\n', (7587, 7600), True, 'import numpy as np\n')]
|
import numpy as np
# class for 3D points in an image frame
class Point(object):
# class constructor
def __init__(self, img_map, location, color):
self.point = location
self.frames = []
self.idx = []
self.color = np.copy(color)
self.id = img_map.max_point
img_map.max_point += 1
img_map.points.append(self)
def orb(self):
des = []
for f in self.frames:
des.append(f.des[f.pts.index(self)])
return des
# class method to add a frame and index from video
# feed to the Point object
def add_observation(self, frame, index):
frame.pts[index] = self
self.frames.append(frame)
self.idx.append(index)
# class method to delete a point from a frame
def delete_point(self):
for f in self.frames:
f.pts[f.pts.index(self)] = None
del self
def homogenous(self):
return np.array([self.point[0], self.point[1], self.point[2], 1.0])
|
[
"numpy.array",
"numpy.copy"
] |
[((253, 267), 'numpy.copy', 'np.copy', (['color'], {}), '(color)\n', (260, 267), True, 'import numpy as np\n'), ((948, 1008), 'numpy.array', 'np.array', (['[self.point[0], self.point[1], self.point[2], 1.0]'], {}), '([self.point[0], self.point[1], self.point[2], 1.0])\n', (956, 1008), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Remove unnecessary information
import numpy as np
# cpu_count = 4
# 因为服务器没有图形界面,所以必须这样弄
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# 好看的打印格式
def fancy_print(n = None, c = None, s = '#'):
print(s * 40)
print(n)
print(c)
print(s * 40)
print() # 空一行避免混淆
# 拿到所有模型
from model import *
# 图片读取生成器
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import keras
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.callbacks import Callback
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.model_selection import train_test_split
from sklearn import metrics
most_epoches = 500 # 最大训练次数 500 测试时 2-10
def train_cnn_dense_resnet(gen_name, model_name, gene_length):
# 打印说明,方便检查
fancy_print('gen_name', gen_name)
fancy_print('model_name', model_name)
##############################
#
# png reader in iterator
#
##############################
# 训练集:验证集:测试集 = 8:1:1
train_datagen = ImageDataGenerator(rescale = 1./255, validation_split = 0.11) # set validation split
BATCH_SIZE = 32 # 一次大小
train_generator = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/png_train/',
target_size = (gene_length*2, 5),
color_mode = 'grayscale',
class_mode = 'categorical',
batch_size = BATCH_SIZE,
subset = 'training', # set as training data
shuffle = True, # must shuffle
seed = 42,
)
val_generator = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/png_train/', # same directory as training data
target_size = (gene_length*2, 5),
color_mode = 'grayscale',
class_mode = 'categorical',
batch_size = BATCH_SIZE,
subset = 'validation', # set as validation data
shuffle = True, # must shuffle
seed = 42,
)
##############################
#
# loss数据可视化
#
##############################
class PlotProgress(keras.callbacks.Callback):
def __init__(self, entity = ['loss', 'accuracy']):
self.entity = entity
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.accs = []
self.val_accs = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
# 损失函数
self.losses.append(logs.get('{}'.format(self.entity[0])))
self.val_losses.append(logs.get('val_{}'.format(self.entity[0])))
# 准确率
self.accs.append(logs.get('{}'.format(self.entity[1])))
self.val_accs.append(logs.get('val_{}'.format(self.entity[1])))
self.i += 1
# clear_output(wait=True)
plt.figure(0)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.losses, label="{}".format(self.entity[0]))
plt.plot(self.x, self.val_losses, label="val_{}".format(self.entity[0]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/loss.png')
# plt.pause(0.01)
# plt.show()
plt.figure(1)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.accs, label="{}".format(self.entity[1]))
plt.plot(self.x, self.val_accs, label="val_{}".format(self.entity[1]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/acc.png')
# plt.pause(0.01)
# plt.show()
##############################
#
# Model building
#
##############################
if model_name == 'onehot_cnn_one_branch':
clf = model_onehot_cnn_one_branch(gene_length)
if model_name == 'onehot_embedding_dense':
clf = model_onehot_embedding_dense(gene_length)
if model_name == 'onehot_dense':
clf = model_onehot_dense(gene_length)
if model_name == 'onehot_resnet18':
clf = model_onehot_resnet18(gene_length)
if model_name == 'onehot_resnet34':
clf = model_onehot_resnet34(gene_length)
clf.summary() # Print model structure
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 10, restore_best_weights = True)
# 绘图函数
plot_progress = PlotProgress(entity = ['loss', 'accuracy'])
##############################
#
# Model training
#
##############################
# No need to count how many epochs, keras can count
history = clf.fit_generator(generator = train_generator,
epochs = most_epoches,
validation_data = val_generator,
steps_per_epoch = train_generator.samples // BATCH_SIZE,
validation_steps = val_generator.samples // BATCH_SIZE,
callbacks = [plot_progress, early_stopping],
# max_queue_size = 64,
# workers = cpu_count,
# use_multiprocessing = True,
verbose = 2 # 一次训练就显示一行
)
clf.save_weights('h5_weights/'+gen_name+'/'+model_name+'.h5')
# 打印一下,方便检查
fancy_print('save_weights', 'h5_weights/'+gen_name+'/'+model_name+'.h5', '=')
def train_cnn_separate(gen_name, model_name, gene_length):
##############################
#
# 构建迭代器
#
##############################
from keras.preprocessing.image import ImageDataGenerator
# train_datagen = ImageDataGenerator(horizontal_flip = True, vertical_flip = True, rescale = 1. / 255) # 上下翻转 左右翻转
train_datagen = ImageDataGenerator(rescale = 1. / 255, validation_split = 0.11)
BATCH_SIZE = 32 # 每次大小
def generator_two_train():
train_generator1 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_en/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'training', # set as training data
shuffle = True,
seed = 42) # 相同方式打散
train_generator2 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_pr/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'training', # set as training data
shuffle = True,
seed = 42) # 相同方式打散
while True:
out1 = train_generator1.next()
out2 = train_generator2.next()
yield [out1[0], out2[0]], out1[1] # 返回两个的组合和结果
def generator_two_val():
val_generator1 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_en/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'validation', # set as validation data
shuffle =True,
seed = 42) # 相同方式打散
val_generator2 = train_datagen.flow_from_directory(directory = 'data/'+gen_name+'/train_pr/', target_size = (gene_length, 5),
color_mode = 'grayscale',
class_mode = 'categorical', # 'categorical'会返回2D的one-hot编码标签, 'binary'返回1D的二值标签, 'sparse'返回1D的整数标签
batch_size = BATCH_SIZE,
subset = 'validation', # set as validation data
shuffle = True,
seed = 42) # 相同方式打散
while True:
out1 = val_generator1.next()
out2 = val_generator2.next()
yield [out1[0], out2[0]], out1[1] # 返回两个的组合和结果
##############################
#
# 模型搭建
#
##############################
# 如果出现版本不兼容,那么就用这两句代码,否则会报警告
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
from sklearn import metrics
from keras.callbacks import ModelCheckpoint
##############################
#
# Model building
#
##############################
if model_name == 'onehot_cnn_two_branch':
clf = model_onehot_cnn_two_branch(gene_length)
clf.summary() # 打印模型结构
'''
filename = 'best_model.h5'
modelCheckpoint = ModelCheckpoint(filename, monitor = 'val_accuracy', save_best_only = True, mode = 'max')
'''
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 10, restore_best_weights = True)
'''
fancy_print('train_generator.next()[0]', train_generator.next()[0], '+')
fancy_print('train_generator.next()[1]', train_generator.next()[1], '+')
fancy_print('train_generator.next()[0].shape', train_generator.next()[0].shape, '+')
fancy_print('train_generator.next()[1].shape', train_generator.next()[1].shape, '+')
fancy_print('val_generator.next()[0]', val_generator.next()[0], '-')
fancy_print('val_generator.next()[1]', val_generator.next()[1], '-')
fancy_print('val_generator.next()[0].shape', val_generator.next()[0].shape, '-')
fancy_print('val_generator.next()[1].shape', val_generator.next()[1].shape, '-')
'''
##############################
#
# 模型训练
#
##############################
# 不需要再算多少个epoch了,自己会算
history = clf.fit_generator(generator = generator_two_train(),
epochs = most_epoches,
validation_data = generator_two_val(),
steps_per_epoch = 24568 * 2 // BATCH_SIZE, # 全部训练
validation_steps = 3071 * 2 // BATCH_SIZE, # 全部验证
callbacks = [early_stopping],
shuffle = True, # 再次 shuffle
# max_queue_size = 64,
# workers = cpu_count,
# use_multiprocessing = True,
verbose = 2) # 一次训练就显示一行
clf.save_weights('h5_weights/'+gen_name+'/'+model_name+'.h5')
# 打印一下,方便检查
fancy_print('save_weights', 'h5_weights/'+gen_name+'/'+model_name+'.h5', '=')
def train_embedding(gen_name, model_name):
# 打印说明,方便检查
fancy_print('gen_name', gen_name)
fancy_print('model_name', model_name)
'''
2021-04-11 16:53:06.007063: E tensorflow/stream_executor/dnn.cc:616] CUDNN_STATUS_INTERNAL_ERROR
in tensorflow/stream_executor/cuda/cuda_dnn.cc(2011): 'cudnnRNNBackwardData( cudnn.handle(), rnn_desc.handle(),
model_dims.max_seq_length, output_desc.handles(), output_data.opaque(), output_desc.handles(), output_backprop_data.opaque(),
output_h_desc.handle(), output_h_backprop_data.opaque(), output_c_desc.handle(), output_c_backprop_data.opaque(),
rnn_desc.params_handle(), params.opaque(), input_h_desc.handle(), input_h_data.opaque(), input_c_desc.handle(),
input_c_data.opaque(), input_desc.handles(), input_backprop_data->opaque(), input_h_desc.handle(), input_h_backprop_data->opaque(),
input_c_desc.handle(), input_c_backprop_data->opaque(), workspace.opaque(), workspace.size(), reserve_space_data->opaque(), reserve_space_data->size())'
2021-04-11 16:53:06.007530: W tensorflow/core/framework/op_kernel.cc:1767] OP_REQUIRES failed at cudnn_rnn_ops.cc:1922:
Internal: Failed to call ThenRnnBackward with model config: [rnn_mode, rnn_input_mode, rnn_direction_mode]: 3, 0, 0 ,
[num_layers, input_size, num_units, dir_count, max_seq_length, batch_size, cell_num_units]: [1, 64, 50, 1, 100, 32, 0]
2021-04-11 16:53:06.007077: F tensorflow/stream_executor/cuda/cuda_dnn.cc:190] Check failed: status == CUDNN_STATUS_SUCCESS (7 vs. 0)Failed to set cuDNN stream.
解决方案
'''
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
##############################
#
# loss数据可视化
#
##############################
class PlotProgress(keras.callbacks.Callback):
def __init__(self, entity = ['loss', 'accuracy']):
self.entity = entity
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.accs = []
self.val_accs = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
# 损失函数
self.losses.append(logs.get('{}'.format(self.entity[0])))
self.val_losses.append(logs.get('val_{}'.format(self.entity[0])))
# 准确率
self.accs.append(logs.get('{}'.format(self.entity[1])))
self.val_accs.append(logs.get('val_{}'.format(self.entity[1])))
self.i += 1
plt.figure(0)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.losses, label="{}".format(self.entity[0]))
plt.plot(self.x, self.val_losses, label="val_{}".format(self.entity[0]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/loss.png')
# plt.pause(0.01)
# plt.show()
plt.figure(1)
plt.clf() # 清理历史遗迹
plt.plot(self.x, self.accs, label="{}".format(self.entity[1]))
plt.plot(self.x, self.val_accs, label="val_{}".format(self.entity[1]))
plt.legend()
plt.savefig('result/'+gen_name+'/'+model_name+'/acc.png')
# plt.pause(0.01)
# plt.show()
train = np.load('data/'+gen_name+'/embedding_train.npz')
X_en_tra, X_pr_tra, y_tra = train['X_en_tra'], train['X_pr_tra'], train['y_tra']
##############################
#
# Model building
#
##############################
if model_name == 'embedding_cnn_one_branch':
model = model_embedding_cnn_one_branch()
if model_name == 'embedding_cnn_two_branch':
model = model_embedding_cnn_two_branch()
if model_name == 'embedding_dense':
model = model_embedding_dense()
if model_name == 'onehot_embedding_cnn_one_branch':
model = model_onehot_embedding_cnn_one_branch()
if model_name == 'onehot_embedding_cnn_two_branch':
model = model_onehot_embedding_cnn_two_branch()
model.summary()
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 20, restore_best_weights = True)
# 绘图函数
plot_progress = PlotProgress(entity = ['loss', 'accuracy'])
history = model.fit([X_en_tra, X_pr_tra], y_tra, epochs=most_epoches, batch_size=32, validation_split=0.11,
callbacks=[early_stopping, plot_progress],
# max_queue_size = 64,
# workers = cpu_count,
# use_multiprocessing = True,
verbose = 2 # 一次训练就显示一行
)
model.save_weights('h5_weights/'+gen_name+'/'+model_name+'.h5')
# 打印一下,方便检查
fancy_print('save_weights', 'h5_weights/'+gen_name+'/'+model_name+'.h5', '=')
########################################
#
# 本模块没有代码运行
#
########################################
if __name__ == '__main__':
pass
|
[
"keras.preprocessing.image.ImageDataGenerator",
"numpy.load",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"tensorflow.config.experimental.set_memory_growth",
"matplotlib.pyplot.figure",
"matplotlib.use",
"keras.callbacks.EarlyStopping",
"tensorflow.config.experimental.list_physical_devices",
"matplotlib.pyplot.savefig"
] |
[((205, 226), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (219, 226), False, 'import matplotlib\n'), ((1274, 1334), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': '(0.11)'}), '(rescale=1.0 / 255, validation_split=0.11)\n', (1292, 1334), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((5417, 5494), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'patience': '(10)', 'restore_best_weights': '(True)'}), "(monitor='val_accuracy', patience=10, restore_best_weights=True)\n", (5430, 5494), False, 'from keras.callbacks import EarlyStopping\n'), ((7054, 7114), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': '(0.11)'}), '(rescale=1.0 / 255, validation_split=0.11)\n', (7072, 7114), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((11269, 11346), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'patience': '(10)', 'restore_best_weights': '(True)'}), "(monitor='val_accuracy', patience=10, restore_best_weights=True)\n", (11282, 11346), False, 'from keras.callbacks import EarlyStopping\n'), ((14817, 14868), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (14861, 14868), True, 'import tensorflow as tf\n'), ((14874, 14941), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (14914, 14941), True, 'import tensorflow as tf\n'), ((16747, 16799), 'numpy.load', 'np.load', (["('data/' + gen_name + '/embedding_train.npz')"], {}), "('data/' + gen_name + '/embedding_train.npz')\n", (16754, 16799), True, 'import numpy as np\n'), ((17569, 17646), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'patience': '(20)', 'restore_best_weights': '(True)'}), "(monitor='val_accuracy', patience=20, restore_best_weights=True)\n", (17582, 17646), False, 'from keras.callbacks import EarlyStopping\n'), ((3429, 3441), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3439, 3441), True, 'from matplotlib import pyplot as plt\n'), ((4010, 4023), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (4020, 4023), True, 'from matplotlib import pyplot as plt\n'), ((4037, 4046), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4044, 4046), True, 'from matplotlib import pyplot as plt\n'), ((4233, 4245), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4243, 4245), True, 'from matplotlib import pyplot as plt\n'), ((4259, 4325), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('result/' + gen_name + '/' + model_name + '/loss.png')"], {}), "('result/' + gen_name + '/' + model_name + '/loss.png')\n", (4270, 4325), True, 'from matplotlib import pyplot as plt\n'), ((4390, 4403), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4400, 4403), True, 'from matplotlib import pyplot as plt\n'), ((4417, 4426), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4424, 4426), True, 'from matplotlib import pyplot as plt\n'), ((4610, 4622), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4620, 4622), True, 'from matplotlib import pyplot as plt\n'), ((4636, 4701), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('result/' + gen_name + '/' + model_name + '/acc.png')"], {}), "('result/' + gen_name + '/' + model_name + '/acc.png')\n", (4647, 4701), True, 'from matplotlib import pyplot as plt\n'), ((15443, 15455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15453, 15455), True, 'from matplotlib import pyplot as plt\n'), ((15987, 16000), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (15997, 16000), True, 'from matplotlib import pyplot as plt\n'), ((16014, 16023), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (16021, 16023), True, 'from matplotlib import pyplot as plt\n'), ((16210, 16222), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16220, 16222), True, 'from matplotlib import pyplot as plt\n'), ((16236, 16302), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('result/' + gen_name + '/' + model_name + '/loss.png')"], {}), "('result/' + gen_name + '/' + model_name + '/loss.png')\n", (16247, 16302), True, 'from matplotlib import pyplot as plt\n'), ((16367, 16380), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (16377, 16380), True, 'from matplotlib import pyplot as plt\n'), ((16394, 16403), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (16401, 16403), True, 'from matplotlib import pyplot as plt\n'), ((16587, 16599), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16597, 16599), True, 'from matplotlib import pyplot as plt\n'), ((16613, 16678), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('result/' + gen_name + '/' + model_name + '/acc.png')"], {}), "('result/' + gen_name + '/' + model_name + '/acc.png')\n", (16624, 16678), True, 'from matplotlib import pyplot as plt\n')]
|
import numpy as np
import cv2
import pdb
# https://github.com/zju3dv/clean-pvnet/blob/master/lib/datasets/augmentation.py
def debug_visualize(image, mask, pts2d, sym_cor, name_prefix='debug'):
from random import sample
cv2.imwrite('{}_image.png'.format(name_prefix), image * 255)
cv2.imwrite('{}_mask.png'.format(name_prefix), mask * 255)
img_pts = image.copy() * 255
for i in range(pts2d.shape[0]):
x = int(round(pts2d[i, 0]))
y = int(round(pts2d[i, 1]))
img_pts = cv2.circle(img_pts, (x, y), 2, (0, 0, 255), thickness=-1)
cv2.imwrite('{}_pts.png'.format(name_prefix), img_pts)
img_sym = image.copy() * 255
ys, xs = np.nonzero(mask)
for i_pt in sample([i for i in range(len(ys))], min(100, len(ys))):
y = int(round(ys[i_pt]))
x = int(round(xs[i_pt]))
x_cor, y_cor = sym_cor[y, x]
x_cor = int(round(x + x_cor))
y_cor = int(round(y + y_cor))
img_sym = cv2.line(img_sym, (x, y), (x_cor, y_cor), (0, 0, 255), 1)
cv2.imwrite('{}_sym.png'.format(name_prefix), img_sym)
def rotate_sym_cor(sym_cor, mask, R):
h, w = sym_cor.shape[:2]
ys, xs = np.nonzero(mask)
source = np.float32(np.stack([xs, ys], axis=-1))
delta = np.float32(sym_cor[ys, xs])
target = source + delta
last_col = np.ones((source.shape[0], 1), dtype=np.float32)
source = np.concatenate([source, last_col], axis=-1)
target = np.concatenate([target, last_col], axis=-1)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
source = np.matmul(source, R)[:, :2]
target = np.matmul(target, R)[:, :2]
source = np.uint32(np.round(source))
delta = target - source
# remove invalid indices
xs, ys = source[:, 0], source[:, 1]
valid = (xs > 0) & (xs < w) & (ys > 0) & (ys < h)
xs, ys, delta = xs[valid], ys[valid], delta[valid]
sym_cor = np.zeros_like(sym_cor)
sym_cor[ys, xs] = delta
return sym_cor
def rotate_instance(img, mask, hcoords, sym_cor, rot_ang_min, rot_ang_max):
h, w = img.shape[0], img.shape[1]
degree = np.random.uniform(rot_ang_min, rot_ang_max)
hs, ws = np.nonzero(mask)
R = cv2.getRotationMatrix2D((np.mean(ws), np.mean(hs)), degree, 1)
sym_cor = rotate_sym_cor(sym_cor, mask, R)
mask = cv2.warpAffine(mask, R, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
img = cv2.warpAffine(img, R, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
last_row = np.asarray([[0, 0, 1]], dtype=np.float32)
R = np.concatenate([R, last_row], axis=0).transpose()
last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, last_col], axis=1)
hcoords = np.float32(np.matmul(hcoords, R))
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_resize_instance_v1(img, mask, hcoords, sym_cor, imheight, imwidth,
overlap_ratio=0.5, ratio_min=0.8, ratio_max=1.2):
'''
crop a region with [imheight*resize_ratio,imwidth*resize_ratio]
which at least overlap with foreground bbox with overlap
'''
hcoords_last_col = np.ones((hcoords.shape[0], 1), dtype=np.float32)
hcoords = np.concatenate([hcoords, hcoords_last_col], axis=1)
resize_ratio = np.random.uniform(ratio_min, ratio_max)
target_height = int(imheight * resize_ratio)
target_width = int(imwidth * resize_ratio)
img, mask, hcoords, sym_cor = crop_or_padding_to_fixed_size_instance(
img, mask, hcoords, sym_cor, target_height, target_width, overlap_ratio)
img = cv2.resize(img, (imwidth, imheight), interpolation=cv2.INTER_LINEAR)
mask = cv2.resize(mask, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor = cv2.resize(sym_cor, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)
sym_cor /= resize_ratio
hcoords[:, 0] = hcoords[:, 0] / resize_ratio
hcoords[:, 1] = hcoords[:, 1] / resize_ratio
hcoords = hcoords[:, :2]
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size_instance(img, mask, hcoords, sym_cor, th, tw,
overlap_ratio=0.5):
h, w, _ = img.shape
hs, ws = np.nonzero(mask)
hmin, hmax = np.min(hs), np.max(hs)
wmin, wmax = np.min(ws), np.max(ws)
fh, fw = hmax - hmin, wmax - wmin
hpad, wpad = th >= h, tw >= w
hrmax = int(min(hmin + overlap_ratio * fh, h - th)) # h must > target_height else hrmax<0
hrmin = int(max(hmin + overlap_ratio * fh - th, 0))
wrmax = int(min(wmin + overlap_ratio * fw, w - tw)) # w must > target_width else wrmax<0
wrmin = int(max(wmin + overlap_ratio * fw - tw, 0))
hbeg = 0 if (hpad or hrmin == hrmax) else np.random.randint(hrmin, hrmax)
hend = hbeg + th
wbeg = 0 if (wpad or wrmin == wrmax) else np.random.randint(wrmin, wrmax) # if pad then [0,wend] will larger than [0,w], indexing it is safe
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
hcoords[:, 0] -= wbeg * hcoords[:, 2]
hcoords[:, 1] -= hbeg * hcoords[:, 2]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
hcoords[:, 0] += wbeg * hcoords[:, 2]
hcoords[:, 1] += hbeg * hcoords[:, 2]
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, hcoords, sym_cor
def crop_or_padding_to_fixed_size(img, mask, sym_cor, th, tw):
h, w, _ = img.shape
hpad, wpad = th >= h, tw >= w
hbeg = 0 if hpad else np.random.randint(0, h - th)
wbeg = 0 if wpad else np.random.randint(0,
w - tw) # if pad then [0,wend] will larger than [0,w], indexing it is safe
hend = hbeg + th
wend = wbeg + tw
img = img[hbeg:hend, wbeg:wend]
mask = mask[hbeg:hend, wbeg:wend]
sym_cor = sym_cor[hbeg:hend, wbeg:wend]
if hpad or wpad:
nh, nw, _ = img.shape
new_img = np.zeros([th, tw, 3], dtype=img.dtype)
new_mask = np.zeros([th, tw], dtype=mask.dtype)
new_sym_cor = np.zeros([th, tw, 2], dtype=sym_cor.dtype)
hbeg = 0 if not hpad else (th - h) // 2
wbeg = 0 if not wpad else (tw - w) // 2
new_img[hbeg:hbeg + nh, wbeg:wbeg + nw] = img
new_mask[hbeg:hbeg + nh, wbeg:wbeg + nw] = mask
new_sym_cor[hbeg:hbeg + nh, wbeg:wbeg + nw] = sym_cor
img, mask, sym_cor = new_img, new_mask, new_sym_cor
return img, mask, sym_cor
|
[
"numpy.ones",
"cv2.warpAffine",
"numpy.random.randint",
"numpy.mean",
"numpy.round",
"cv2.line",
"numpy.zeros_like",
"numpy.max",
"cv2.resize",
"numpy.stack",
"cv2.circle",
"numpy.asarray",
"numpy.min",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.float32",
"numpy.zeros",
"numpy.nonzero",
"numpy.matmul"
] |
[((675, 691), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (685, 691), True, 'import numpy as np\n'), ((1159, 1175), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (1169, 1175), True, 'import numpy as np\n'), ((1241, 1268), 'numpy.float32', 'np.float32', (['sym_cor[ys, xs]'], {}), '(sym_cor[ys, xs])\n', (1251, 1268), True, 'import numpy as np\n'), ((1312, 1359), 'numpy.ones', 'np.ones', (['(source.shape[0], 1)'], {'dtype': 'np.float32'}), '((source.shape[0], 1), dtype=np.float32)\n', (1319, 1359), True, 'import numpy as np\n'), ((1373, 1416), 'numpy.concatenate', 'np.concatenate', (['[source, last_col]'], {'axis': '(-1)'}), '([source, last_col], axis=-1)\n', (1387, 1416), True, 'import numpy as np\n'), ((1430, 1473), 'numpy.concatenate', 'np.concatenate', (['[target, last_col]'], {'axis': '(-1)'}), '([target, last_col], axis=-1)\n', (1444, 1473), True, 'import numpy as np\n'), ((1489, 1530), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 1]], dtype=np.float32)\n', (1499, 1530), True, 'import numpy as np\n'), ((1932, 1954), 'numpy.zeros_like', 'np.zeros_like', (['sym_cor'], {}), '(sym_cor)\n', (1945, 1954), True, 'import numpy as np\n'), ((2130, 2173), 'numpy.random.uniform', 'np.random.uniform', (['rot_ang_min', 'rot_ang_max'], {}), '(rot_ang_min, rot_ang_max)\n', (2147, 2173), True, 'import numpy as np\n'), ((2187, 2203), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (2197, 2203), True, 'import numpy as np\n'), ((2333, 2441), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'R', '(w, h)'], {'flags': 'cv2.INTER_NEAREST', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(0)'}), '(mask, R, (w, h), flags=cv2.INTER_NEAREST, borderMode=cv2.\n BORDER_CONSTANT, borderValue=0)\n', (2347, 2441), False, 'import cv2\n'), ((2447, 2553), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'R', '(w, h)'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(0)'}), '(img, R, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.\n BORDER_CONSTANT, borderValue=0)\n', (2461, 2553), False, 'import cv2\n'), ((2564, 2605), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 1]], dtype=np.float32)\n', (2574, 2605), True, 'import numpy as np\n'), ((2679, 2727), 'numpy.ones', 'np.ones', (['(hcoords.shape[0], 1)'], {'dtype': 'np.float32'}), '((hcoords.shape[0], 1), dtype=np.float32)\n', (2686, 2727), True, 'import numpy as np\n'), ((2742, 2785), 'numpy.concatenate', 'np.concatenate', (['[hcoords, last_col]'], {'axis': '(1)'}), '([hcoords, last_col], axis=1)\n', (2756, 2785), True, 'import numpy as np\n'), ((3225, 3273), 'numpy.ones', 'np.ones', (['(hcoords.shape[0], 1)'], {'dtype': 'np.float32'}), '((hcoords.shape[0], 1), dtype=np.float32)\n', (3232, 3273), True, 'import numpy as np\n'), ((3288, 3339), 'numpy.concatenate', 'np.concatenate', (['[hcoords, hcoords_last_col]'], {'axis': '(1)'}), '([hcoords, hcoords_last_col], axis=1)\n', (3302, 3339), True, 'import numpy as np\n'), ((3360, 3399), 'numpy.random.uniform', 'np.random.uniform', (['ratio_min', 'ratio_max'], {}), '(ratio_min, ratio_max)\n', (3377, 3399), True, 'import numpy as np\n'), ((3663, 3731), 'cv2.resize', 'cv2.resize', (['img', '(imwidth, imheight)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (imwidth, imheight), interpolation=cv2.INTER_LINEAR)\n', (3673, 3731), False, 'import cv2\n'), ((3743, 3813), 'cv2.resize', 'cv2.resize', (['mask', '(imwidth, imheight)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)\n', (3753, 3813), False, 'import cv2\n'), ((3828, 3901), 'cv2.resize', 'cv2.resize', (['sym_cor', '(imwidth, imheight)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(sym_cor, (imwidth, imheight), interpolation=cv2.INTER_NEAREST)\n', (3838, 3901), False, 'import cv2\n'), ((4278, 4294), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (4288, 4294), True, 'import numpy as np\n'), ((512, 569), 'cv2.circle', 'cv2.circle', (['img_pts', '(x, y)', '(2)', '(0, 0, 255)'], {'thickness': '(-1)'}), '(img_pts, (x, y), 2, (0, 0, 255), thickness=-1)\n', (522, 569), False, 'import cv2\n'), ((961, 1018), 'cv2.line', 'cv2.line', (['img_sym', '(x, y)', '(x_cor, y_cor)', '(0, 0, 255)', '(1)'], {}), '(img_sym, (x, y), (x_cor, y_cor), (0, 0, 255), 1)\n', (969, 1018), False, 'import cv2\n'), ((1200, 1227), 'numpy.stack', 'np.stack', (['[xs, ys]'], {'axis': '(-1)'}), '([xs, ys], axis=-1)\n', (1208, 1227), True, 'import numpy as np\n'), ((1602, 1622), 'numpy.matmul', 'np.matmul', (['source', 'R'], {}), '(source, R)\n', (1611, 1622), True, 'import numpy as np\n'), ((1643, 1663), 'numpy.matmul', 'np.matmul', (['target', 'R'], {}), '(target, R)\n', (1652, 1663), True, 'import numpy as np\n'), ((1694, 1710), 'numpy.round', 'np.round', (['source'], {}), '(source)\n', (1702, 1710), True, 'import numpy as np\n'), ((2811, 2832), 'numpy.matmul', 'np.matmul', (['hcoords', 'R'], {}), '(hcoords, R)\n', (2820, 2832), True, 'import numpy as np\n'), ((4313, 4323), 'numpy.min', 'np.min', (['hs'], {}), '(hs)\n', (4319, 4323), True, 'import numpy as np\n'), ((4325, 4335), 'numpy.max', 'np.max', (['hs'], {}), '(hs)\n', (4331, 4335), True, 'import numpy as np\n'), ((4353, 4363), 'numpy.min', 'np.min', (['ws'], {}), '(ws)\n', (4359, 4363), True, 'import numpy as np\n'), ((4365, 4375), 'numpy.max', 'np.max', (['ws'], {}), '(ws)\n', (4371, 4375), True, 'import numpy as np\n'), ((4797, 4828), 'numpy.random.randint', 'np.random.randint', (['hrmin', 'hrmax'], {}), '(hrmin, hrmax)\n', (4814, 4828), True, 'import numpy as np\n'), ((4896, 4927), 'numpy.random.randint', 'np.random.randint', (['wrmin', 'wrmax'], {}), '(wrmin, wrmax)\n', (4913, 4927), True, 'import numpy as np\n'), ((5291, 5329), 'numpy.zeros', 'np.zeros', (['[th, tw, 3]'], {'dtype': 'img.dtype'}), '([th, tw, 3], dtype=img.dtype)\n', (5299, 5329), True, 'import numpy as np\n'), ((5349, 5385), 'numpy.zeros', 'np.zeros', (['[th, tw]'], {'dtype': 'mask.dtype'}), '([th, tw], dtype=mask.dtype)\n', (5357, 5385), True, 'import numpy as np\n'), ((5408, 5450), 'numpy.zeros', 'np.zeros', (['[th, tw, 2]'], {'dtype': 'sym_cor.dtype'}), '([th, tw, 2], dtype=sym_cor.dtype)\n', (5416, 5450), True, 'import numpy as np\n'), ((6063, 6091), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - th)'], {}), '(0, h - th)\n', (6080, 6091), True, 'import numpy as np\n'), ((6118, 6146), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - tw)'], {}), '(0, w - tw)\n', (6135, 6146), True, 'import numpy as np\n'), ((6490, 6528), 'numpy.zeros', 'np.zeros', (['[th, tw, 3]'], {'dtype': 'img.dtype'}), '([th, tw, 3], dtype=img.dtype)\n', (6498, 6528), True, 'import numpy as np\n'), ((6548, 6584), 'numpy.zeros', 'np.zeros', (['[th, tw]'], {'dtype': 'mask.dtype'}), '([th, tw], dtype=mask.dtype)\n', (6556, 6584), True, 'import numpy as np\n'), ((6607, 6649), 'numpy.zeros', 'np.zeros', (['[th, tw, 2]'], {'dtype': 'sym_cor.dtype'}), '([th, tw, 2], dtype=sym_cor.dtype)\n', (6615, 6649), True, 'import numpy as np\n'), ((1539, 1576), 'numpy.concatenate', 'np.concatenate', (['[R, last_row]'], {'axis': '(0)'}), '([R, last_row], axis=0)\n', (1553, 1576), True, 'import numpy as np\n'), ((2237, 2248), 'numpy.mean', 'np.mean', (['ws'], {}), '(ws)\n', (2244, 2248), True, 'import numpy as np\n'), ((2250, 2261), 'numpy.mean', 'np.mean', (['hs'], {}), '(hs)\n', (2257, 2261), True, 'import numpy as np\n'), ((2614, 2651), 'numpy.concatenate', 'np.concatenate', (['[R, last_row]'], {'axis': '(0)'}), '([R, last_row], axis=0)\n', (2628, 2651), True, 'import numpy as np\n')]
|
import numpy as np
import os
import gym
import torch
import torch.nn as nn
import collections
import copy
import random
# hype-params
learn_freq = 5 #经验池攒一些经验再开启训练
buffer_size = 20000 #经验池大小
buffer_init_size = 200 #开启训练最低经验条数
batch_size = 32 #每次sample的数量
learning_rate = 0.001 #学习率
GAMMA = 0.99 # reward折扣因子
class Model(nn.Module):
def __init__(self, act_dim, state_dim):
super(Model, self).__init__()
hidden1_size = 128
hidden2_size = 128
self.input_layer = nn.Linear(state_dim, hidden1_size)
self.input_layer.weight.data.normal_(0, 0.1)
self.hidden_layer = nn.Linear(hidden1_size, hidden2_size)
self.hidden_layer.weight.data.normal_(0, 0.1)
self.output_layer = nn.Linear(hidden2_size, act_dim)
self.output_layer.weight.data.normal_(0, 0.1)
def forward(self, state):
h1 = nn.functional.relu(self.input_layer(state))
h2 = nn.functional.relu(self.hidden_layer(h1))
Q = self.output_layer(h2)
return Q
class DQN:
def __init__(self, model, act_dim=None, gamma=None, lr=None):
self.model = model
self.target_model = copy.deepcopy(model)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
self.loss = nn.MSELoss()
self.act_dim = act_dim
self.lr = lr
self.gamma = gamma
def predict(self, state):
return self.model.forward(state) # shape: batch_size x act_dim
def learn(self, state, action, reward, state_next, done): # shape: batch_size x 1
# 根据target网络求target Q
next_values = self.target_model.forward(state_next).detach() # 阻断target梯度, shape: batch_size x act_dim
target_value = reward + (1.0 - done)*self.gamma*next_values.max(1)[0] # shape: batch_size x 1
# 根据当前网络获取Q(s, a)
curr_value = self.model.forward(state)
action = action.unsqueeze(1)
pred_value = torch.gather(curr_value, 1, action.long()) # batch_size x act_dim中以第二维取action对应的Q值成为batch_size x 1
cost = self.loss(pred_value, target_value)
self.optimizer.zero_grad()
cost.backward()
self.optimizer.step()
return cost
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict()) # 更新target网络参数
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
def append(self, exp):
self.buffer.append(exp)
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state_batch, action_batch, reward_batch, state_netx_batch, done_batch = [], [], [], [], []
for exp in batch:
s, a, r, s_next, done = exp
state_batch.append(s)
action_batch.append(a)
reward_batch.append(r)
state_netx_batch.append(s_next)
done_batch.append(done)
return torch.from_numpy(np.array(state_batch).astype('float32')), \
torch.from_numpy(np.array(action_batch).astype('int32')), \
torch.from_numpy(np.array(reward_batch).astype('float32')), \
torch.from_numpy(np.array(state_netx_batch).astype('float32')), \
torch.from_numpy(np.array(done_batch).astype('float32'))
def __len__(self):
return len(self.buffer)
class Agent:
def __init__(self, algorithm, state_dim, act_dim, epsilon=0.1, epsilon_fade=0.0):
self.dqn = algorithm
self.state_dim = state_dim
self.act_dim = act_dim
self.steps = 0
self.update_target_steps = 200
self.epsilon = epsilon
self.epsilon_fade = epsilon_fade
def explore(self, state):
sample = np.random.rand()
if sample < self.epsilon:
action = np.random.randint(self.act_dim)
else:
action = self.greedy(state)
self.epsilon = max(0.01, self.epsilon - self.epsilon_fade)
return action
def greedy(self, state):
state = torch.from_numpy(state)
state = torch.tensor(state, dtype=torch.float32)
pred_value = self.dqn.target_model.forward(state)
values = pred_value.detach().numpy()
values = np.squeeze(values, axis=None)
action = np.argmax(values) # 选择值最大的下标
return action
def learn(self, state, action, reward, state_next, done):
if self.steps % self.update_target_steps == 0:
self.dqn.update_target()
self.steps += 1
cost = self.dqn.learn(state, action, reward, state_next, done)
return cost
def evaluate(env, agent, render=True):
eval_reward = []
for i in range(10):
state = env.reset()
episode_reward = 0
while True:
action = agent.greedy(state)
state, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
if __name__ == '__main__':
env = gym.make('CartPole-v0')
action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
exp_buffer = ReplayMemory(buffer_size)
model = Model(act_dim=action_dim, state_dim=state_dim)
algorithm = DQN(model, act_dim=action_dim, gamma=GAMMA, lr=learning_rate)
agent = Agent(algorithm, state_dim=state_dim, act_dim=action_dim, epsilon=0.1, epsilon_fade=1e-6)
state = env.reset()
while(len(exp_buffer)<buffer_init_size):
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
exp_buffer.append((state, action, reward, state_next, done))
state = state_next
if done:
state = env.reset()
episode = 0
while episode < 20000:
for i in range(0, 100):
episode += 1
total_reward = 0
state = env.reset()
step = 0
while True:
step += 1
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
# env.render()
exp_buffer.append((state, action, reward, state_next, done))
# train
if len(exp_buffer) > buffer_init_size and step%learn_freq == 0:
(state_batch, action_batch, reward_batch, state_next_batch, done_batch) = exp_buffer.sample(batch_size)
loss = agent.learn(state_batch, action_batch, reward_batch, state_next_batch, done_batch)
total_reward += reward
state = state_next
if done:
break
eval_reward = evaluate(env, agent, render=True)
print('episode: %d e_greed: %.5f test_reward: %.1f' %(episode, agent.epsilon, eval_reward))
torch.save(agent.dqn.target_model, './dqn.pkl')
|
[
"copy.deepcopy",
"torch.nn.MSELoss",
"gym.make",
"numpy.argmax",
"random.sample",
"torch.save",
"numpy.mean",
"numpy.random.randint",
"numpy.array",
"torch.nn.Linear",
"numpy.random.rand",
"torch.tensor",
"numpy.squeeze",
"collections.deque",
"torch.from_numpy"
] |
[((5045, 5065), 'numpy.mean', 'np.mean', (['eval_reward'], {}), '(eval_reward)\n', (5052, 5065), True, 'import numpy as np\n'), ((5104, 5127), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (5112, 5127), False, 'import gym\n'), ((6878, 6925), 'torch.save', 'torch.save', (['agent.dqn.target_model', '"""./dqn.pkl"""'], {}), "(agent.dqn.target_model, './dqn.pkl')\n", (6888, 6925), False, 'import torch\n'), ((497, 531), 'torch.nn.Linear', 'nn.Linear', (['state_dim', 'hidden1_size'], {}), '(state_dim, hidden1_size)\n', (506, 531), True, 'import torch.nn as nn\n'), ((613, 650), 'torch.nn.Linear', 'nn.Linear', (['hidden1_size', 'hidden2_size'], {}), '(hidden1_size, hidden2_size)\n', (622, 650), True, 'import torch.nn as nn\n'), ((733, 765), 'torch.nn.Linear', 'nn.Linear', (['hidden2_size', 'act_dim'], {}), '(hidden2_size, act_dim)\n', (742, 765), True, 'import torch.nn as nn\n'), ((1148, 1168), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (1161, 1168), False, 'import copy\n'), ((1274, 1286), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1284, 1286), True, 'import torch.nn as nn\n'), ((2391, 2425), 'collections.deque', 'collections.deque', ([], {'maxlen': 'max_size'}), '(maxlen=max_size)\n', (2408, 2425), False, 'import collections\n'), ((2537, 2575), 'random.sample', 'random.sample', (['self.buffer', 'batch_size'], {}), '(self.buffer, batch_size)\n', (2550, 2575), False, 'import random\n'), ((3740, 3756), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3754, 3756), True, 'import numpy as np\n'), ((4033, 4056), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (4049, 4056), False, 'import torch\n'), ((4073, 4113), 'torch.tensor', 'torch.tensor', (['state'], {'dtype': 'torch.float32'}), '(state, dtype=torch.float32)\n', (4085, 4113), False, 'import torch\n'), ((4234, 4263), 'numpy.squeeze', 'np.squeeze', (['values'], {'axis': 'None'}), '(values, axis=None)\n', (4244, 4263), True, 'import numpy as np\n'), ((4281, 4298), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (4290, 4298), True, 'import numpy as np\n'), ((3812, 3843), 'numpy.random.randint', 'np.random.randint', (['self.act_dim'], {}), '(self.act_dim)\n', (3829, 3843), True, 'import numpy as np\n'), ((2958, 2979), 'numpy.array', 'np.array', (['state_batch'], {}), '(state_batch)\n', (2966, 2979), True, 'import numpy as np\n'), ((3034, 3056), 'numpy.array', 'np.array', (['action_batch'], {}), '(action_batch)\n', (3042, 3056), True, 'import numpy as np\n'), ((3109, 3131), 'numpy.array', 'np.array', (['reward_batch'], {}), '(reward_batch)\n', (3117, 3131), True, 'import numpy as np\n'), ((3186, 3212), 'numpy.array', 'np.array', (['state_netx_batch'], {}), '(state_netx_batch)\n', (3194, 3212), True, 'import numpy as np\n'), ((3267, 3287), 'numpy.array', 'np.array', (['done_batch'], {}), '(done_batch)\n', (3275, 3287), True, 'import numpy as np\n')]
|
##
# @file electric_overflow.py
# @author <NAME>
# @date Aug 2018
#
import math
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from torch.nn import functional as F
import dreamplace.ops.electric_potential.electric_potential_cpp as electric_potential_cpp
import dreamplace.configure as configure
if configure.compile_configurations["CUDA_FOUND"] == "TRUE":
import dreamplace.ops.electric_potential.electric_potential_cuda as electric_potential_cuda
import pdb
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class ElectricDensityMapFunction(Function):
"""
@brief compute density overflow.
@param ctx pytorch API to store data for backward proporgation
@param pos location of cells, x and then y
@param node_size_x_clamped stretched size, max(bin_size*sqrt2, node_size)
@param node_size_y_clamped stretched size, max(bin_size*sqrt2, node_size)
@param offset_x (stretched size - node_size) / 2
@param offset_y (stretched size - node_size) / 2
@param ratio original area / stretched area
@param initial_density_map density_map for fixed cells
@param target_density target density
@param xl left boundary
@param yl lower boundary
@param xh right boundary
@param yh upper boundary
@param bin_size_x bin width
@param bin_size_x bin height
@param num_movable_nodes number of movable cells
@param num_filler_nodes number of filler cells
@param padding bin padding to boundary of placement region
@param padding_mask padding mask with 0 and 1 to indicate padding bins with padding regions to be 1
@param num_bins_x number of bins in horizontal direction
@param num_bins_y number of bins in vertical direction
@param num_movable_impacted_bins_x number of impacted bins for any movable cell in x direction
@param num_movable_impacted_bins_y number of impacted bins for any movable cell in y direction
@param num_filler_impacted_bins_x number of impacted bins for any filler cell in x direction
@param num_filler_impacted_bins_y number of impacted bins for any filler cell in y direction
@param sorted_node_map the indices of the movable node map
"""
@staticmethod
def forward(
pos,
node_size_x_clamped,
node_size_y_clamped,
offset_x,
offset_y,
ratio,
bin_center_x,
bin_center_y,
initial_density_map,
target_density,
xl,
yl,
xh,
yh,
bin_size_x,
bin_size_y,
num_movable_nodes,
num_filler_nodes,
padding,
padding_mask, # same dimensions as density map, with padding regions to be 1
num_bins_x,
num_bins_y,
num_movable_impacted_bins_x,
num_movable_impacted_bins_y,
num_filler_impacted_bins_x,
num_filler_impacted_bins_y,
deterministic_flag,
sorted_node_map):
if pos.is_cuda:
output = electric_potential_cuda.density_map(
pos.view(pos.numel()), node_size_x_clamped,
node_size_y_clamped, offset_x, offset_y, ratio, bin_center_x,
bin_center_y, initial_density_map, target_density, xl, yl, xh,
yh, bin_size_x, bin_size_y, num_movable_nodes,
num_filler_nodes, padding, num_bins_x, num_bins_y,
num_movable_impacted_bins_x, num_movable_impacted_bins_y,
num_filler_impacted_bins_x, num_filler_impacted_bins_y,
deterministic_flag, sorted_node_map)
else:
output = electric_potential_cpp.density_map(
pos.view(pos.numel()), node_size_x_clamped,
node_size_y_clamped, offset_x, offset_y, ratio, bin_center_x,
bin_center_y, initial_density_map, target_density, xl, yl, xh,
yh, bin_size_x, bin_size_y, num_movable_nodes,
num_filler_nodes, padding, num_bins_x, num_bins_y,
num_movable_impacted_bins_x, num_movable_impacted_bins_y,
num_filler_impacted_bins_x, num_filler_impacted_bins_y,
deterministic_flag)
density_map = output.view([num_bins_x, num_bins_y])
# set padding density
if padding > 0:
density_map.masked_fill_(padding_mask,
target_density * bin_size_x * bin_size_y)
return density_map
class ElectricOverflow(nn.Module):
def __init__(
self,
node_size_x,
node_size_y,
bin_center_x,
bin_center_y,
target_density,
xl,
yl,
xh,
yh,
bin_size_x,
bin_size_y,
num_movable_nodes,
num_terminals,
num_filler_nodes,
padding,
deterministic_flag, # control whether to use deterministic routine
sorted_node_map,
movable_macro_mask=None):
super(ElectricOverflow, self).__init__()
self.node_size_x = node_size_x
self.node_size_y = node_size_y
self.bin_center_x = bin_center_x
self.bin_center_y = bin_center_y
self.target_density = target_density
self.xl = xl
self.yl = yl
self.xh = xh
self.yh = yh
self.bin_size_x = bin_size_x
self.bin_size_y = bin_size_y
self.num_movable_nodes = num_movable_nodes
self.num_terminals = num_terminals
self.num_filler_nodes = num_filler_nodes
self.padding = padding
self.sorted_node_map = sorted_node_map
self.movable_macro_mask = movable_macro_mask
self.deterministic_flag = deterministic_flag
self.reset()
def reset(self):
sqrt2 = math.sqrt(2)
# clamped means stretch a cell to bin size
# clamped = max(bin_size*sqrt2, node_size)
# offset means half of the stretch size
# ratio means the original area over the stretched area
self.node_size_x_clamped = self.node_size_x.clamp(min=self.bin_size_x *
sqrt2)
self.offset_x = (self.node_size_x - self.node_size_x_clamped).mul(0.5)
self.node_size_y_clamped = self.node_size_y.clamp(min=self.bin_size_y *
sqrt2)
self.offset_y = (self.node_size_y - self.node_size_y_clamped).mul(0.5)
node_areas = self.node_size_x * self.node_size_y
self.ratio = node_areas / (self.node_size_x_clamped *
self.node_size_y_clamped)
# detect movable macros and scale down the density to avoid halos
# the definition of movable macros should be different according to algorithms
self.num_movable_macros = 0
if self.target_density < 1 and self.movable_macro_mask is not None:
self.num_movable_macros = self.movable_macro_mask.sum().data.item()
self.ratio[:self.num_movable_nodes][
self.movable_macro_mask] = self.target_density
# compute maximum impacted bins
self.num_bins_x = int(math.ceil((self.xh - self.xl) / self.bin_size_x))
self.num_bins_y = int(math.ceil((self.yh - self.yl) / self.bin_size_y))
if self.num_movable_nodes:
self.num_movable_impacted_bins_x = int(
((self.node_size_x[:self.num_movable_nodes].max() +
2 * sqrt2 * self.bin_size_x) /
self.bin_size_x).ceil().clamp(max=self.num_bins_x))
self.num_movable_impacted_bins_y = int(
((self.node_size_y[:self.num_movable_nodes].max() +
2 * sqrt2 * self.bin_size_y) /
self.bin_size_y).ceil().clamp(max=self.num_bins_y))
else:
self.num_movable_impacted_bins_x = 0
self.num_movable_impacted_bins_y = 0
if self.num_filler_nodes:
self.num_filler_impacted_bins_x = (
(self.node_size_x[-self.num_filler_nodes:].max() +
2 * sqrt2 * self.bin_size_x) /
self.bin_size_x).ceil().clamp(max=self.num_bins_x)
self.num_filler_impacted_bins_y = (
(self.node_size_y[-self.num_filler_nodes:].max() +
2 * sqrt2 * self.bin_size_y) /
self.bin_size_y).ceil().clamp(max=self.num_bins_y)
else:
self.num_filler_impacted_bins_x = 0
self.num_filler_impacted_bins_y = 0
if self.padding > 0:
self.padding_mask = torch.ones(self.num_bins_x,
self.num_bins_y,
dtype=torch.uint8,
device=self.node_size_x.device)
self.padding_mask[self.padding:self.num_bins_x - self.padding,
self.padding:self.num_bins_y -
self.padding].fill_(0)
else:
self.padding_mask = torch.zeros(self.num_bins_x,
self.num_bins_y,
dtype=torch.uint8,
device=self.node_size_x.device)
# initial density_map due to fixed cells
self.initial_density_map = None
def compute_initial_density_map(self, pos):
if self.num_terminals == 0:
num_fixed_impacted_bins_x = 0
num_fixed_impacted_bins_y = 0
else:
max_size_x = self.node_size_x[self.num_movable_nodes:self.
num_movable_nodes +
self.num_terminals].max()
max_size_y = self.node_size_y[self.num_movable_nodes:self.
num_movable_nodes +
self.num_terminals].max()
num_fixed_impacted_bins_x = ((max_size_x + self.bin_size_x) /
self.bin_size_x).ceil().clamp(
max=self.num_bins_x)
num_fixed_impacted_bins_y = ((max_size_y + self.bin_size_y) /
self.bin_size_y).ceil().clamp(
max=self.num_bins_y)
if pos.is_cuda:
func = electric_potential_cuda.fixed_density_map
else:
func = electric_potential_cpp.fixed_density_map
self.initial_density_map = func(
pos, self.node_size_x, self.node_size_y, self.bin_center_x,
self.bin_center_y, self.xl, self.yl, self.xh, self.yh,
self.bin_size_x, self.bin_size_y, self.num_movable_nodes,
self.num_terminals, self.num_bins_x, self.num_bins_y,
num_fixed_impacted_bins_x, num_fixed_impacted_bins_y,
self.deterministic_flag)
# scale density of fixed macros
self.initial_density_map.mul_(self.target_density)
def forward(self, pos):
if self.initial_density_map is None:
self.compute_initial_density_map(pos)
density_map = ElectricDensityMapFunction.forward(
pos, self.node_size_x_clamped, self.node_size_y_clamped,
self.offset_x, self.offset_y, self.ratio, self.bin_center_x,
self.bin_center_y, self.initial_density_map, self.target_density,
self.xl, self.yl, self.xh, self.yh, self.bin_size_x,
self.bin_size_y, self.num_movable_nodes, self.num_filler_nodes,
self.padding, self.padding_mask, self.num_bins_x, self.num_bins_y,
self.num_movable_impacted_bins_x, self.num_movable_impacted_bins_y,
self.num_filler_impacted_bins_x, self.num_filler_impacted_bins_y,
self.deterministic_flag, self.sorted_node_map)
bin_area = self.bin_size_x * self.bin_size_y
density_cost = (density_map -
self.target_density * bin_area).clamp_(min=0.0).sum().unsqueeze(0)
return density_cost, density_map.max().unsqueeze(0) / bin_area
def plot(plot_count, density_map, padding, name):
"""
density map contour and heat map
"""
density_map = density_map[padding:density_map.shape[0] - padding,
padding:density_map.shape[1] - padding]
print("max density = %g @ %s" %
(np.amax(density_map),
np.unravel_index(np.argmax(density_map), density_map.shape)))
print("mean density = %g" % (np.mean(density_map)))
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.arange(density_map.shape[0])
y = np.arange(density_map.shape[1])
x, y = np.meshgrid(x, y)
# looks like x and y should be swapped
ax.plot_surface(y, x, density_map, alpha=0.8)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('density')
# plt.tight_layout()
plt.savefig(name + ".3d.png")
plt.close()
# plt.clf()
#fig, ax = plt.subplots()
# ax.pcolor(density_map)
# Loop over data dimensions and create text annotations.
# for i in range(density_map.shape[0]):
# for j in range(density_map.shape[1]):
# text = ax.text(j, i, density_map[i, j],
# ha="center", va="center", color="w")
# fig.tight_layout()
#plt.savefig(name+".2d.%d.png" % (plot_count))
# plt.close()
|
[
"torch.ones",
"numpy.meshgrid",
"math.sqrt",
"math.ceil",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.amax",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.arange",
"numpy.mean",
"torch.zeros",
"matplotlib.pyplot.savefig"
] |
[((530, 551), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (544, 551), False, 'import matplotlib\n'), ((12631, 12643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12641, 12643), True, 'import matplotlib.pyplot as plt\n'), ((12687, 12718), 'numpy.arange', 'np.arange', (['density_map.shape[0]'], {}), '(density_map.shape[0])\n', (12696, 12718), True, 'import numpy as np\n'), ((12727, 12758), 'numpy.arange', 'np.arange', (['density_map.shape[1]'], {}), '(density_map.shape[1])\n', (12736, 12758), True, 'import numpy as np\n'), ((12771, 12788), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (12782, 12788), True, 'import numpy as np\n'), ((12988, 13017), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + '.3d.png')"], {}), "(name + '.3d.png')\n", (12999, 13017), True, 'import matplotlib.pyplot as plt\n'), ((13022, 13033), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13031, 13033), True, 'import matplotlib.pyplot as plt\n'), ((5809, 5821), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5818, 5821), False, 'import math\n'), ((7201, 7249), 'math.ceil', 'math.ceil', (['((self.xh - self.xl) / self.bin_size_x)'], {}), '((self.xh - self.xl) / self.bin_size_x)\n', (7210, 7249), False, 'import math\n'), ((7281, 7329), 'math.ceil', 'math.ceil', (['((self.yh - self.yl) / self.bin_size_y)'], {}), '((self.yh - self.yl) / self.bin_size_y)\n', (7290, 7329), False, 'import math\n'), ((8619, 8719), 'torch.ones', 'torch.ones', (['self.num_bins_x', 'self.num_bins_y'], {'dtype': 'torch.uint8', 'device': 'self.node_size_x.device'}), '(self.num_bins_x, self.num_bins_y, dtype=torch.uint8, device=self\n .node_size_x.device)\n', (8629, 8719), False, 'import torch\n'), ((9079, 9180), 'torch.zeros', 'torch.zeros', (['self.num_bins_x', 'self.num_bins_y'], {'dtype': 'torch.uint8', 'device': 'self.node_size_x.device'}), '(self.num_bins_x, self.num_bins_y, dtype=torch.uint8, device=\n self.node_size_x.device)\n', (9090, 9180), False, 'import torch\n'), ((12597, 12617), 'numpy.mean', 'np.mean', (['density_map'], {}), '(density_map)\n', (12604, 12617), True, 'import numpy as np\n'), ((12469, 12489), 'numpy.amax', 'np.amax', (['density_map'], {}), '(density_map)\n', (12476, 12489), True, 'import numpy as np\n'), ((12519, 12541), 'numpy.argmax', 'np.argmax', (['density_map'], {}), '(density_map)\n', (12528, 12541), True, 'import numpy as np\n')]
|
"""
Dependencies:
tensorflow: 1.2.0
matplotlib
numpy
"""
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
tf.set_random_seed(1)
np.random.seed(1)
#fake data
n_data = np.ones((100,2))
x0 = np.random.normal(2*n_data, 1) #class0 x shape = (100, 2))
y0 = np.zeros(100) #class0 y shape = (100, 1))
x1 = np.random.normal(-2*n_data, 1) # class1 x shape=(100, 2)
y1 = np.ones(100) # class1 y shape=(100, 1)
x = np.vstack((x0, x1)) #shape (200, 2)) + some noise
y = np.hstack((y0,y1)) #shape (200, )
#plot data
plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn')
plt.show()
tf_x = tf.placeholder(tf.float32, x.shape) #input x
tf_y = tf.placeholder(tf.int32, y.shape)
#neural network layers
l1 = tf.layers.dense(tf_x, 10, tf.nn.relu)
output = tf.layers.dense(l1, 2)
loss = tf.losses.sparse_softmax_cross_entropy(labels = tf_y, logits =output) #compute cost
accuracy = tf.metrics.accuracy(labels = tf.squeeze(tf_y), predictions = tf.argmax(output, axis=1), )[1]
optimizer = tf.train.GradientDescentOptimizer(learning_rate= 0.05)
train_op = optimizer.minimize(loss)
sess = tf.Session()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
plt.ion()
for step in range(100):
#train and net output
_, acc, pred = sess.run([train_op, accuracy, output], {tf_x: x, tf_y:y })
if step %2 == 0:
#plot and show learning process
plt.cla()
plt.scatter(x[:, 0], x[:, 1], c = pred.argmax(1), s = 100, lw = 0, cmap = 'RdYlGn')
plt.text(1.5, -4, 'Accuracy = %.2f'% acc, fontdict = {'size':20, 'color':'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
|
[
"numpy.random.seed",
"numpy.ones",
"tensorflow.local_variables_initializer",
"numpy.random.normal",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"matplotlib.pyplot.cla",
"tensorflow.squeeze",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.hstack",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ion",
"tensorflow.train.GradientDescentOptimizer",
"numpy.vstack",
"tensorflow.losses.sparse_softmax_cross_entropy",
"matplotlib.pyplot.ioff",
"tensorflow.argmax",
"matplotlib.pyplot.scatter",
"tensorflow.layers.dense",
"numpy.zeros"
] |
[((134, 155), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (152, 155), True, 'import tensorflow as tf\n'), ((156, 173), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (170, 173), True, 'import numpy as np\n'), ((195, 212), 'numpy.ones', 'np.ones', (['(100, 2)'], {}), '((100, 2))\n', (202, 212), True, 'import numpy as np\n'), ((217, 248), 'numpy.random.normal', 'np.random.normal', (['(2 * n_data)', '(1)'], {}), '(2 * n_data, 1)\n', (233, 248), True, 'import numpy as np\n'), ((289, 302), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (297, 302), True, 'import numpy as np\n'), ((361, 393), 'numpy.random.normal', 'np.random.normal', (['(-2 * n_data)', '(1)'], {}), '(-2 * n_data, 1)\n', (377, 393), True, 'import numpy as np\n'), ((431, 443), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (438, 443), True, 'import numpy as np\n'), ((500, 519), 'numpy.vstack', 'np.vstack', (['(x0, x1)'], {}), '((x0, x1))\n', (509, 519), True, 'import numpy as np\n'), ((575, 594), 'numpy.hstack', 'np.hstack', (['(y0, y1)'], {}), '((y0, y1))\n', (584, 594), True, 'import numpy as np\n'), ((643, 705), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 0]', 'x[:, 1]'], {'c': 'y', 's': '(100)', 'lw': '(0)', 'cmap': '"""RdYlGn"""'}), "(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn')\n", (654, 705), True, 'import matplotlib.pyplot as plt\n'), ((706, 716), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (714, 716), True, 'import matplotlib.pyplot as plt\n'), ((725, 760), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'x.shape'], {}), '(tf.float32, x.shape)\n', (739, 760), True, 'import tensorflow as tf\n'), ((782, 815), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'y.shape'], {}), '(tf.int32, y.shape)\n', (796, 815), True, 'import tensorflow as tf\n'), ((846, 883), 'tensorflow.layers.dense', 'tf.layers.dense', (['tf_x', '(10)', 'tf.nn.relu'], {}), '(tf_x, 10, tf.nn.relu)\n', (861, 883), True, 'import tensorflow as tf\n'), ((893, 915), 'tensorflow.layers.dense', 'tf.layers.dense', (['l1', '(2)'], {}), '(l1, 2)\n', (908, 915), True, 'import tensorflow as tf\n'), ((924, 990), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'tf_y', 'logits': 'output'}), '(labels=tf_y, logits=output)\n', (962, 990), True, 'import tensorflow as tf\n'), ((1135, 1188), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.05)'}), '(learning_rate=0.05)\n', (1168, 1188), True, 'import tensorflow as tf\n'), ((1234, 1246), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1244, 1246), True, 'import tensorflow as tf\n'), ((1354, 1363), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1361, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1803), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1801, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1814), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1812, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1299), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1297, 1299), True, 'import tensorflow as tf\n'), ((1301, 1333), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (1331, 1333), True, 'import tensorflow as tf\n'), ((1561, 1570), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1568, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1756), 'matplotlib.pyplot.text', 'plt.text', (['(1.5)', '(-4)', "('Accuracy = %.2f' % acc)"], {'fontdict': "{'size': 20, 'color': 'red'}"}), "(1.5, -4, 'Accuracy = %.2f' % acc, fontdict={'size': 20, 'color':\n 'red'})\n", (1679, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1774), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1769, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1075), 'tensorflow.squeeze', 'tf.squeeze', (['tf_y'], {}), '(tf_y)\n', (1069, 1075), True, 'import tensorflow as tf\n'), ((1091, 1116), 'tensorflow.argmax', 'tf.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (1100, 1116), True, 'import tensorflow as tf\n')]
|
import numpy as N
import win32com.client
# generate and import apogee ActiveX module
apogee_module = win32com.client.gencache.EnsureModule(
'{A2882C73-7CFB-11D4-9155-0060676644C1}', 0, 1, 0)
if apogee_module is None:
raise ImportError # prevent plugin from being imported
from win32com.client import constants as Constants
from traits.api import Str, Int, Enum, Float, Bool
from traitsui.api import View, Item
from Camera import Camera, CameraError
class ApogeeCam(Camera):
'''Apogee Alta or Ascent camera'''
plugin_info = {
'name': 'Apogee',
'description': 'Apogee Alta or Ascent camera',
'author': '<NAME>',
'copyright year': '2011',
}
camera_num2 = Int(0)
camera_model = Str()
driver_version = Str()
interface = Enum('usb', 'net')
expose_time = Float(0.05)
open_shutter = Bool(True)
view = View(
Item('interface'),
Item('camera_number'),
Item('camera_num2'),
Item('expose_time'),
Item('open_shutter'))
def __init__(self, **traits):
super(ApogeeCam, self).__init__(camera_number=0, **traits)
self._cam = win32com.client.Dispatch('Apogee.Camera2')
self._interface_constants = {
'usb': Constants.Apn_Interface_USB,
'net': Constants.Apn_Interface_NET}
self._reverse_constants = dict((v, k)
for k, v in self._interface_constants.iteritems())
self._buffer = None
def open(self):
self._cam.Init(self._interface_constants[self.interface],
self.camera_number, self.camera_num2, 0)
self._buffer = N.zeros(self.roi[-1:-3:-1], dtype=N.uint16)
def close(self):
self._cam.Close()
def query_frame(self, expose_time=None, open_shutter=None):
"""
Start an exposure and wait for it to finish.
Pass @expose_time or @open_shutter to override the camera object's
default parameters.
"""
if expose_time is None:
expose_time = self.expose_time
if open_shutter is None:
open_shutter = self.open_shutter
try:
self._cam.Expose(expose_time, open_shutter)
while self._cam.ImagingStatus != Constants.Apn_Status_ImageReady:
pass
self._cam.GetImage(self._buffer.ctypes.data)
finally:
if self._cam.ImagingStatus < 0:
self.reset()
self.frame = N.copy(self._buffer)
def choose_camera(self):
discover = win32com.client.Dispatch('Apogee.CamDiscover')
discover.DlgCheckUsb = True
discover.ShowDialog(True)
if not discover.ValidSelection:
raise ValueError('No camera selected')
self.interface = self._reverse_constants[discover.SelectedInterface]
self.camera_number = discover.SelectedCamIdOne
self.camera_num2 = discover.SelectedCamIdTwo
def reset(self):
self._cam.ResetState()
# if error status persists, raise an exception
if self._cam.ImagingStatus < 0:
raise CameraError('Error not cleared by reset', self.camera_number)
def _resolution_default(self):
return self._cam.ImagingColumns, self._cam.ImagingRows
def _camera_model_default(self):
return self._cam.CameraModel
def _driver_version_default(self):
return self._cam.DriverVersion
def _id_string_default(self):
return 'Apogee {} Driver version: {}'.format(
self.camera_model,
self.driver_version)
def _roi_default(self):
return (self._cam.RoiStartX,
self._cam.RoiStartY,
self._cam.RoiPixelsH,
self._cam.RoiPixelsV)
def _roi_changed(self, value):
x, y, w, h = value
self._cam.RoiStartX = x
self._cam.RoiStartY = y
self._cam.RoiPixelsH = w
self._cam.RoiPixelsV = h
self._buffer = N.zeros((h, w), dtype=N.uint16)
|
[
"traits.api.Float",
"Camera.CameraError",
"numpy.copy",
"traits.api.Int",
"numpy.zeros",
"traits.api.Bool",
"traits.api.Str",
"traitsui.api.Item",
"traits.api.Enum"
] |
[((715, 721), 'traits.api.Int', 'Int', (['(0)'], {}), '(0)\n', (718, 721), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((741, 746), 'traits.api.Str', 'Str', ([], {}), '()\n', (744, 746), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((768, 773), 'traits.api.Str', 'Str', ([], {}), '()\n', (771, 773), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((790, 808), 'traits.api.Enum', 'Enum', (['"""usb"""', '"""net"""'], {}), "('usb', 'net')\n", (794, 808), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((827, 838), 'traits.api.Float', 'Float', (['(0.05)'], {}), '(0.05)\n', (832, 838), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((858, 868), 'traits.api.Bool', 'Bool', (['(True)'], {}), '(True)\n', (862, 868), False, 'from traits.api import Str, Int, Enum, Float, Bool\n'), ((895, 912), 'traitsui.api.Item', 'Item', (['"""interface"""'], {}), "('interface')\n", (899, 912), False, 'from traitsui.api import View, Item\n'), ((922, 943), 'traitsui.api.Item', 'Item', (['"""camera_number"""'], {}), "('camera_number')\n", (926, 943), False, 'from traitsui.api import View, Item\n'), ((953, 972), 'traitsui.api.Item', 'Item', (['"""camera_num2"""'], {}), "('camera_num2')\n", (957, 972), False, 'from traitsui.api import View, Item\n'), ((982, 1001), 'traitsui.api.Item', 'Item', (['"""expose_time"""'], {}), "('expose_time')\n", (986, 1001), False, 'from traitsui.api import View, Item\n'), ((1011, 1031), 'traitsui.api.Item', 'Item', (['"""open_shutter"""'], {}), "('open_shutter')\n", (1015, 1031), False, 'from traitsui.api import View, Item\n'), ((1642, 1685), 'numpy.zeros', 'N.zeros', (['self.roi[-1:-3:-1]'], {'dtype': 'N.uint16'}), '(self.roi[-1:-3:-1], dtype=N.uint16)\n', (1649, 1685), True, 'import numpy as N\n'), ((2468, 2488), 'numpy.copy', 'N.copy', (['self._buffer'], {}), '(self._buffer)\n', (2474, 2488), True, 'import numpy as N\n'), ((3948, 3979), 'numpy.zeros', 'N.zeros', (['(h, w)'], {'dtype': 'N.uint16'}), '((h, w), dtype=N.uint16)\n', (3955, 3979), True, 'import numpy as N\n'), ((3097, 3158), 'Camera.CameraError', 'CameraError', (['"""Error not cleared by reset"""', 'self.camera_number'], {}), "('Error not cleared by reset', self.camera_number)\n", (3108, 3158), False, 'from Camera import Camera, CameraError\n')]
|
"""
The pypositioning.system.load_files.py module contains functions allowing to load measurement results from various
types of files. The currently available functions allow to load **.psd** files collected with TI Packet Sniffer and
results obtained using IONIS localization system.
Copyright (C) 2020 <NAME>
"""
import numpy as np
import pandas as pd
def load_ionis_file(filepath, normalize_ts=False):
""" Load measurement file from IONIS system
Parameters
----------
filepath: str
path to measurement file
normalize_ts: bool
if True set ts base to 0 (rounds ts to full seconds)
Returns
-------
ble_df: DataFrame
data frame with ble rssi results (contains whole ble packets received by the anchors)
uwb_df: Dataframe
data frame with uwb-based toa results
ts_0: float
timestamp of the first received packet
"""
# open psd file
f = open(filepath, 'r')
ble_res = []
uwb_res = []
# split and decode each line
for line in f:
s = line.split('\t')
# if int(s[3]) == 19:
# ... # the packet is empty
if int(s[5]) * 12 + int(s[6]) * 8 + 19 > int(s[3]):
print("faulty packet, ts: " + s[0])
else:
# get ble and uwb packets number
ble_n = int(s[5])
uwb_n = int(s[6])
# for each ble packet
for k in range(ble_n):
bps = 6 + k * 8
# append array [ts, an_id, an_sqn, an_pressure, BLE packet contents]
ble_res.append(s[:3] + [s[4]] + s[bps + 1:bps + 9])
for j in range(uwb_n):
ups = 6 + ble_n * 8 + j * 4
# append array [ts, an_id, an_sqn, an_pressure, BLE packet contents]
uwb_res.append(s[:3] + s[ups + 1:ups + 5])
# reshape the arrays
ble_res = np.array(ble_res)
uwb_res = np.array(uwb_res)
if ble_res.size > 0:
ble_df = pd.DataFrame(data=ble_res,
columns=['ts', 'an_id', 'an_sqn', 'an_p', 'rx_id', 'tag_id', 'ble_ts', 'rssi',
'pres', 'volt', 'steps', 'alert'])
ble_df = ble_df.astype(dtype={'ts': 'float', 'an_id': 'int32', 'an_sqn': 'int32', 'an_p': 'int32',
'rx_id': 'int32', 'tag_id': 'int32', 'ble_ts': 'int32',
'rssi': 'float', 'pres': 'int32', 'volt': 'int32',
'steps': 'int32', 'alert': 'int32'})
ble_df.loc[ble_df['rssi'] == 0, 'rssi'] = np.nan
else:
ble_df = None
if uwb_res.size > 0:
uwb_df = pd.DataFrame(data=uwb_res, columns=['ts', 'an_id', 'an_sqn', 'rx_id', 'tag_id', 'uwb_sqn', 'toa'])
uwb_df = uwb_df.astype({'ts': 'float', 'an_id': 'int32', 'an_sqn': 'int32',
'rx_id': 'int32', 'tag_id': 'int32', 'uwb_sqn': 'int32', 'toa': 'float'})
uwb_df['toa'] = uwb_df['toa'].values * 15.65e-12
else:
uwb_df = None
if normalize_ts:
ts_min = 0
if (uwb_res.size > 0 and ble_res.size > 0):
ts_min = np.minimum(ble_df.ts.min(), uwb_df.ts.min())
ble_df.ts = np.rint((ble_df.ts - ts_min).values / 1000)
uwb_df.ts = np.rint((uwb_df.ts - ts_min).values / 1000)
elif uwb_res.size > 0:
ts_min = uwb_df.ts.min()
uwb_df.ts = np.rint((uwb_df.ts - ts_min).values / 1000)
print('no ble results in a file - normalizing uwb ts only')
elif ble_res.size > 0:
ts_min = ble_df.ts.min()
ble_df.ts = np.rint((ble_df.ts - ts_min).values / 1000)
print('no uwb results in a file - normalizing ble ts only')
return ble_df, uwb_df, ts_min / 1000
return ble_df, uwb_df, 0
def synchronize_toa_ionis(m_uwb, an, an_r):
""" Synchronize toa values according to IONIS synchronization scheme
Parameters
----------
m_uwb: DataFrame
data frame with uwb measurement results
an: ndarray
anchor nodes coordinates [id,x,y,z]
an_r: ndarray
reference anchor node coordinates [x,y,z]
Returns
-------
m_uwb: DataFrame
m_uwb data frame with toa values synchronized
"""
# initialize array with empty rows for missing anchors
an_f = np.empty((int(an[:, 0].max()), 3))
an_f[:] = np.NaN
for a in an:
an_f[int(a[0]) - 1, :] = a[1:]
m_uwb["toa"] = m_uwb.toa + np.linalg.norm(an_f[m_uwb.an_id - 1] - an_r, axis=1) / 3e8
return m_uwb
def distribute_packets_ionis(df):
""" Distribute packets that could be delayed and came to the system controller at the same time
Parameters
----------
df: DataFrame
dataframe containing measurement results with timestamps and sqns. It must include columns:
[ts, an_sqn, an_id]. Timestamp must be rounded to full seconds (might be float)
Returns
-------
df_d: DataFrame
dataframe, where the packet ts were corrected to the reception times, which would occur
without the delay
"""
# copy the dataframe
df_d = df.copy()
# get unique anchor ids
anchor_ids = df.an_id.unique()
# for each anchor search for delayed packets and distribute them
for an_id in anchor_ids:
mask_an_id = df.an_id == an_id
uts = df[mask_an_id].ts.unique()
for i in range(uts.size):
ts = df[mask_an_id & (df.ts == uts[i])]
an_sqns = ts.an_sqn.unique()
# if the results
if an_sqns.size > 1:
# find last properly received packet
pi = 1
while df[mask_an_id & (df.ts == uts[i - pi])].an_sqn.unique().size > 1:
pi = pi + 1
prev_ts = uts[i - pi]
prev_an_sqn = df_d[(df_d.an_id == an_id) & (df_d.ts == uts[i - pi])].an_sqn.values[0]
# correct timestamps
tse = distribute_packet_batch(ts.ts, ts.an_sqn, prev_ts, prev_an_sqn)
df_d.ts[(df_d.an_id == an_id) & (df_d.ts == uts[i])] = tse
return df_d
def distribute_packet_batch(ts, an_sqn, ts_p, an_sqn_p):
"""Correct timestamps of the packets, which were received in a batch due to delay introduced in the WiFi interface.
Parameters
----------
ts: array_like
timestamps of packets received in a batch [in seconds]
an_sqn: array_like
anchor sqns of packets received in a batch [0-255]
ts_p: float
the timestamp of the last properly received packet
an_sqn_p: int
the anchor sqn of the last properly received packet
Returns
-------
tse: ndarray
timestamps corrected to the reception times, which would occur without the delay
"""
# empty list for collected packets
tse = []
for t, ans in zip(ts, an_sqn):
# check if anchor sqn is higher than the previous one or the counter has turned
if ans >= an_sqn_p:
te = ts_p + ans - an_sqn_p
else:
te = ts_p + (256 + ans - an_sqn_p)
tse.append(te)
return np.array(tse)
def rearrange_timestamps_ble(m_ble, tag_id, packet_rate, distribute_delayed=False):
"""Change timestamps values so that the consecutive packets sent at different times do not have the same ts.
The timestamps are changed as follows: \
new_ts = ts + 1/packet_rate * N \
where N is the sequential number of BLE packet inside WiFi frame.
Parameters
----------
m_ble: DataFrame
dataframe with measurement results
tag_id: int
tag identifier
packet_rate: float
packet rate set in the systems [packets per second]
distribute_delayed: bool, optional
if True call distribute_packets_ionis
Returns
-------
m_b: DataFrame
Input m_ble DataFrame with rearranged timestamps.
"""
# filter tag id
m_b = m_ble[m_ble.tag_id == tag_id]
if distribute_delayed: # distribute delayed packets
m_b = distribute_packets_ionis(m_b)
# group and bin by BLE ts
grouped = m_b.groupby(by=['ts', 'an_id', 'an_sqn', 'tag_id'])
bins = []
for n, g in grouped:
bins.append(pd.cut(g.ble_ts, packet_rate, labels=False))
m_b['bin'] = pd.concat(bins)
# group and average power per BLE receiver
grouped = m_b.groupby(by=['ts', 'an_id', 'tag_id', 'bin'])
m_b = grouped.agg({'rssi': log_mean})
m_b = m_b.reset_index()
# get ts with 1/rate
m_b['ts'] = m_b['ts'] + m_b['bin'] / packet_rate
return m_b
def rearrange_timestamps_uwb(m_uwb, tag_id, packet_rate, distribute_delayed=False):
"""Change timestamps values so that the consecutive packets sent at different times do not have the same ts.
The timestamps are changed as follows: \
new_ts = ts + 1/packet_rate * N \
where N is the sequential number of UWB packet inside WiFi frame.
Parameters
----------
m_uwb: DataFrame
dataframe with measurement results
tag_id: int
tag identifier
packet_rate: float
packet rate set in the systems [packets per second]
distribute_delayed: bool, optional
if True call distribute_packets_ionis
Returns
-------
m_u: DataFrame
Input m_uwb DataFrame with rearranged timestamps.
"""
# filter tag id
m_u = m_uwb[m_uwb.tag_id == tag_id].copy()
if distribute_delayed: # distribute delayed packets
m_u = distribute_packets_ionis(m_u)
# group and bin by reception ts (in this case toa value)
grouped = m_u.groupby(by=['ts', 'an_sqn'])
bins = []
for n, g in grouped:
bins.append(pd.cut(g.toa, packet_rate, labels=False))
m_u['bin'] = pd.concat(bins)
# get ts with 1/rate
m_u['ts'] = m_u['ts'] + m_u['bin'] / packet_rate
return m_u
def measurement_array(m_df, mtype, data_frame=False):
"""Create measurement array [ts, meas values...]
Parameters
----------
m_df: DataFrame
measurement dataframe
mtype: str
measurement type: 'ble', 'uwb'
data_frame: bool
return dataframe, None tuple if true
Returns
-------
array: ndarray
measurement array in format [ts, mx, my, mz ...]
an_ids: ndarray
anchor_ids: [x,y,z ...]
df: DataFrame, optional
measurement array stored as dataframe (returned when data_frame==True)
"""
if mtype == 'uwb':
m = m_df[['ts', 'uwb_sqn', 'toa', 'an_id']].copy()
elif mtype == 'ble':
m = m_df[['ts', 'rssi', 'an_id']].copy()
else:
print("Unknown type")
return None, None
df = None
# get unique anchor ids
anchor_ids = np.sort(m.an_id.unique())
# create array
if mtype == 'uwb':
for i in anchor_ids:
mp = m[m.an_id == i].rename(columns={'toa': 'toa_' + str(i)}).drop(columns='an_id')
if df is None:
df = mp
else:
df = df.merge(mp, how='outer', on=['ts', 'uwb_sqn'])
df = df.sort_values(['ts', 'uwb_sqn'], ascending=[True, True]).reset_index(drop=True)
df = df.drop(columns='uwb_sqn')
elif mtype == 'ble':
for i in anchor_ids:
mp = m[m.an_id == i].rename(columns={'rssi': 'rssi_' + str(i)}).drop(columns='an_id')
if df is None:
df = mp
else:
df = df.merge(mp, how='outer', on=['ts'])
df = df.sort_values(['ts'], ascending=[True]).reset_index(drop=True)
array = df.values
anchor_ids = np.r_[0, anchor_ids] # add 0 for ts column
if data_frame:
return array, anchor_ids, df
return array, anchor_ids
def hybrid_array(dfs, on='ts', how='outer'):
""" Create a hybrid measurement array
Parameters
----------
dfs: iterable
DataFrames which would be merged into a hybrid frame
on: str, default: 'ts'
on which column the frames will be merged
how: str, default: 'outer'
how the frames will be merged
Returns
-------
m: ndarray
measurement array in format [ts, results...]
m_type: ndarray
type of data in each of the columns e.g. ['ts', 'rssi', 'toa]
m_id: ndarray
anchor ids of the columns df. Default id is 0 - for 'ts' and other parameter
not associated with any particular anchor.
df: DataFrame
hybrid DataFrame
"""
df = dfs[0]
for d in dfs[1:]:
df = df.merge(d, on=on, how=how)
m_type= np.array([x.split('_')[0] for x in df.columns[:]])
m_id= np.array([x.split('_')[1] if '_' in x else 0 for x in df.columns[:] ]).astype('int')
return df.values, m_type, m_id, df
def log_mean(v_db, axis=None):
""" Calculate average for values in log scale by converting to linear and back to log.
Parameters
----------
v_db: ndarray
values in log scale
axis: {int, None}, optional
axis along which the mean will be calculated
Returns
-------
avg: {ndarray, double}
mean value
"""
v_lin = 10 ** (v_db / 10) # Power in mW
l_mean = np.nanmean(v_lin, axis=axis)
db_mean = 10 * np.log10(l_mean)
return db_mean
|
[
"pandas.DataFrame",
"pandas.cut",
"numpy.rint",
"numpy.array",
"numpy.linalg.norm",
"numpy.log10",
"pandas.concat",
"numpy.nanmean"
] |
[((1886, 1903), 'numpy.array', 'np.array', (['ble_res'], {}), '(ble_res)\n', (1894, 1903), True, 'import numpy as np\n'), ((1918, 1935), 'numpy.array', 'np.array', (['uwb_res'], {}), '(uwb_res)\n', (1926, 1935), True, 'import numpy as np\n'), ((7188, 7201), 'numpy.array', 'np.array', (['tse'], {}), '(tse)\n', (7196, 7201), True, 'import numpy as np\n'), ((8358, 8373), 'pandas.concat', 'pd.concat', (['bins'], {}), '(bins)\n', (8367, 8373), True, 'import pandas as pd\n'), ((9834, 9849), 'pandas.concat', 'pd.concat', (['bins'], {}), '(bins)\n', (9843, 9849), True, 'import pandas as pd\n'), ((13234, 13262), 'numpy.nanmean', 'np.nanmean', (['v_lin'], {'axis': 'axis'}), '(v_lin, axis=axis)\n', (13244, 13262), True, 'import numpy as np\n'), ((1979, 2123), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ble_res', 'columns': "['ts', 'an_id', 'an_sqn', 'an_p', 'rx_id', 'tag_id', 'ble_ts', 'rssi',\n 'pres', 'volt', 'steps', 'alert']"}), "(data=ble_res, columns=['ts', 'an_id', 'an_sqn', 'an_p',\n 'rx_id', 'tag_id', 'ble_ts', 'rssi', 'pres', 'volt', 'steps', 'alert'])\n", (1991, 2123), True, 'import pandas as pd\n'), ((2687, 2789), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'uwb_res', 'columns': "['ts', 'an_id', 'an_sqn', 'rx_id', 'tag_id', 'uwb_sqn', 'toa']"}), "(data=uwb_res, columns=['ts', 'an_id', 'an_sqn', 'rx_id',\n 'tag_id', 'uwb_sqn', 'toa'])\n", (2699, 2789), True, 'import pandas as pd\n'), ((13282, 13298), 'numpy.log10', 'np.log10', (['l_mean'], {}), '(l_mean)\n', (13290, 13298), True, 'import numpy as np\n'), ((3249, 3292), 'numpy.rint', 'np.rint', (['((ble_df.ts - ts_min).values / 1000)'], {}), '((ble_df.ts - ts_min).values / 1000)\n', (3256, 3292), True, 'import numpy as np\n'), ((3317, 3360), 'numpy.rint', 'np.rint', (['((uwb_df.ts - ts_min).values / 1000)'], {}), '((uwb_df.ts - ts_min).values / 1000)\n', (3324, 3360), True, 'import numpy as np\n'), ((4524, 4576), 'numpy.linalg.norm', 'np.linalg.norm', (['(an_f[m_uwb.an_id - 1] - an_r)'], {'axis': '(1)'}), '(an_f[m_uwb.an_id - 1] - an_r, axis=1)\n', (4538, 4576), True, 'import numpy as np\n'), ((8296, 8339), 'pandas.cut', 'pd.cut', (['g.ble_ts', 'packet_rate'], {'labels': '(False)'}), '(g.ble_ts, packet_rate, labels=False)\n', (8302, 8339), True, 'import pandas as pd\n'), ((9757, 9797), 'pandas.cut', 'pd.cut', (['g.toa', 'packet_rate'], {'labels': '(False)'}), '(g.toa, packet_rate, labels=False)\n', (9763, 9797), True, 'import pandas as pd\n'), ((3454, 3497), 'numpy.rint', 'np.rint', (['((uwb_df.ts - ts_min).values / 1000)'], {}), '((uwb_df.ts - ts_min).values / 1000)\n', (3461, 3497), True, 'import numpy as np\n'), ((3663, 3706), 'numpy.rint', 'np.rint', (['((ble_df.ts - ts_min).values / 1000)'], {}), '((ble_df.ts - ts_min).values / 1000)\n', (3670, 3706), True, 'import numpy as np\n')]
|
# A collection of various tools to help estimate and analyze the tail exponent.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from FatTailedTools.plotting import plot_survival_function
from FatTailedTools.survival import get_survival_function
def fit_alpha_linear(series, tail_start_mad=2.5, plot=True, return_loc=False):
'''
Estimates the tail parameter by fitting a linear function to the log-log tail of the survival function.
'tail_start_mad' defines where the tail starts in terms of the mean absolute deviation (typically between 2-4 MADs).
The estimated location of the Pareto (with the estimated tail exponent) will also re returned if 'return_loc' is True.
'''
# Get survival function values
if plot:
survival, ax = plot_survival_function(series, tail_zoom=False)
else:
survival = get_survival_function(series)
# Estimate tail start (= everything beyond 'tail_start_mad' mean absolute deviations)
tail_start = get_tail_start(series, tail_start_mad)
# Get tail
survival_tail = np.log10(survival.loc[survival['Values'] >= tail_start].iloc[:-1])
# Fit the tail
tail_fit = np.polyfit(survival_tail['Values'], survival_tail['P'], 1)
lin_func = np.poly1d(tail_fit)
# Get tail parameter and location/scale
tail = -tail_fit[0]
location = (1 - tail_fit[1]) / tail_fit[0]
# Get MSE (mean squared error)
mse_error = np.mean(np.square(np.subtract(lin_func(survival_tail['Values']), survival_tail['Values'])))
# Plot the fit
if plot:
ax.plot(10**survival_tail['Values'], 10**lin_func(survival_tail['Values']), 'r');
ax.legend(['Fit (MSE = {:.2f})'.format(mse_error), 'Data']);
plt.title('Tail exponent fitted to tail (alpha = {:.2f}, loc = {:.2f})'.format(tail, location));
# Construct result
result = tail, location if return_loc else tail
return result
def get_tail_start(series, tail_start_mad):
'''
Returns the start of the tail of 'series' based on 'tail_start_mad'.
'tail_start_mad' defines where the tail starts in terms of the mean absolute deviation (typically between 2-4 MADs).
'''
return tail_start_mad * series.abs().mad()
from scipy.stats import t
def fit_alpha(series, plot=True):
'''
Estimates the tail parameter by fitting a Studend-T to the data.
If the passed data is from a one-sided distribution, it will first be mirrored at 0 to make it symmetrical.
'''
# Is the data only one-sided?
if (series.dropna() < 0).sum() * (series.dropna() > 0).sum() == 0:
# ... then construct a two-sided distribution
series = pd.concat([-series.dropna().abs(), series.dropna().abs()])
# Fit the distribution
params = t.fit(series.dropna())
if plot:
_, ax = plot_survival_function(series, distribution=(t, params));
plt.title('Tail exponent estimated from fitting (alpha = {:.2f})'.format(params[0]));
return params[0]
import seaborn as sns
def fit_alpha_subsampling(series, frac=0.7, n_subsets=100, n_tail_start_samples=1, plot=True, return_loc=False):
'''
Estimates the tail parameter by fitting a linear function to the log-log tail of the survival function.
Uses 'n_subsets' subsamples to average results over subsets with a fraction 'frac' of samples kept.
If return_loc is True, also returns where the tail of the distribution is assumed to start (using random subsampling with 'n_tail_start_samples' samples per subset).
'''
# Set up lists
_results_both = []
_results_left = []
_results_right = []
# Subsample and fit
for subsample in [series.sample(frac=frac) for i in range(n_subsets)]:
for tail_start_mad in np.random.normal(2.5, 0.5, n_tail_start_samples):
_results_both.append(subsample.abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
_results_left.append(subsample.where(subsample < 0).abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
_results_right.append(subsample.where(subsample >= 0).abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
# Assemble into DataFrame
alphas = pd.DataFrame.from_records(np.hstack([_results_both, _results_left, _results_right]), columns=pd.MultiIndex.from_product([['Both', 'Left', 'Right'], ['Tail Exponent', 'Location']]))
# Plot
if plot:
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle('Tail exponents for {} with random subsamples'.format(series.name))
for idx, name in enumerate(['Both', 'Left', 'Right']):
sns.histplot(data=alphas[(name, 'Tail Exponent')], color=['C7', 'C3', 'C0'][idx], stat='probability', bins=10, ax=ax[idx]);
ax[idx].set_title('Median = {:.1f} | Mean = {:.1f} ({})'.format(alphas[(name, 'Tail Exponent')].median(), alphas[(name, 'Tail Exponent')].mean(), ['both', 'left', 'right'][idx]));
ax[idx].set_xlabel('Tail exponent ({})'.format(['both', 'left', 'right'][idx]));
plt.show();
# Also plot locations if return_loc
if return_loc:
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle('Locations for {} with random subsamples'.format(series.name))
for idx, name in enumerate(['Both', 'Left', 'Right']):
sns.histplot(data=alphas[(name, 'Location')], color=['C7', 'C3', 'C0'][idx], stat='probability', bins=10, ax=ax[idx]);
ax[idx].set_title('Median = {:.1f} | Mean = {:.1f} ({})'.format(alphas[(name, 'Location')].median(), alphas[(name, 'Location')].mean(), ['both', 'left', 'right'][idx]));
ax[idx].set_xlabel('Location ({})'.format(['both', 'left', 'right'][idx]));
plt.show();
# Construct result
result = alphas if return_loc else alphas.loc[:, (slice(None), 'Tail Exponent')]
return result
|
[
"numpy.poly1d",
"seaborn.histplot",
"matplotlib.pyplot.show",
"numpy.polyfit",
"FatTailedTools.survival.get_survival_function",
"numpy.hstack",
"pandas.MultiIndex.from_product",
"FatTailedTools.plotting.plot_survival_function",
"numpy.random.normal",
"numpy.log10",
"matplotlib.pyplot.subplots"
] |
[((1093, 1159), 'numpy.log10', 'np.log10', (["survival.loc[survival['Values'] >= tail_start].iloc[:-1]"], {}), "(survival.loc[survival['Values'] >= tail_start].iloc[:-1])\n", (1101, 1159), True, 'import numpy as np\n'), ((1199, 1257), 'numpy.polyfit', 'np.polyfit', (["survival_tail['Values']", "survival_tail['P']", '(1)'], {}), "(survival_tail['Values'], survival_tail['P'], 1)\n", (1209, 1257), True, 'import numpy as np\n'), ((1273, 1292), 'numpy.poly1d', 'np.poly1d', (['tail_fit'], {}), '(tail_fit)\n', (1282, 1292), True, 'import numpy as np\n'), ((795, 842), 'FatTailedTools.plotting.plot_survival_function', 'plot_survival_function', (['series'], {'tail_zoom': '(False)'}), '(series, tail_zoom=False)\n', (817, 842), False, 'from FatTailedTools.plotting import plot_survival_function\n'), ((872, 901), 'FatTailedTools.survival.get_survival_function', 'get_survival_function', (['series'], {}), '(series)\n', (893, 901), False, 'from FatTailedTools.survival import get_survival_function\n'), ((2873, 2929), 'FatTailedTools.plotting.plot_survival_function', 'plot_survival_function', (['series'], {'distribution': '(t, params)'}), '(series, distribution=(t, params))\n', (2895, 2929), False, 'from FatTailedTools.plotting import plot_survival_function\n'), ((3831, 3879), 'numpy.random.normal', 'np.random.normal', (['(2.5)', '(0.5)', 'n_tail_start_samples'], {}), '(2.5, 0.5, n_tail_start_samples)\n', (3847, 3879), True, 'import numpy as np\n'), ((4419, 4476), 'numpy.hstack', 'np.hstack', (['[_results_both, _results_left, _results_right]'], {}), '([_results_both, _results_left, _results_right])\n', (4428, 4476), True, 'import numpy as np\n'), ((4638, 4673), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (4650, 4673), True, 'import matplotlib.pyplot as plt\n'), ((5299, 5309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5307, 5309), True, 'import matplotlib.pyplot as plt\n'), ((4486, 4576), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Both', 'Left', 'Right'], ['Tail Exponent', 'Location']]"], {}), "([['Both', 'Left', 'Right'], ['Tail Exponent',\n 'Location']])\n", (4512, 4576), True, 'import pandas as pd\n'), ((4869, 4994), 'seaborn.histplot', 'sns.histplot', ([], {'data': "alphas[name, 'Tail Exponent']", 'color': "['C7', 'C3', 'C0'][idx]", 'stat': '"""probability"""', 'bins': '(10)', 'ax': 'ax[idx]'}), "(data=alphas[name, 'Tail Exponent'], color=['C7', 'C3', 'C0'][\n idx], stat='probability', bins=10, ax=ax[idx])\n", (4881, 4994), True, 'import seaborn as sns\n'), ((5418, 5453), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (5430, 5453), True, 'import matplotlib.pyplot as plt\n'), ((6090, 6100), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6098, 6100), True, 'import matplotlib.pyplot as plt\n'), ((5664, 5783), 'seaborn.histplot', 'sns.histplot', ([], {'data': "alphas[name, 'Location']", 'color': "['C7', 'C3', 'C0'][idx]", 'stat': '"""probability"""', 'bins': '(10)', 'ax': 'ax[idx]'}), "(data=alphas[name, 'Location'], color=['C7', 'C3', 'C0'][idx],\n stat='probability', bins=10, ax=ax[idx])\n", (5676, 5783), True, 'import seaborn as sns\n')]
|
import types
import warnings
from collections.abc import Iterable
from inspect import getfullargspec
import numpy as np
class _DatasetApply:
"""
Helper class to apply function to
`pysprint.core.bases.dataset.Dataset` objects.
"""
def __init__(
self,
obj,
func,
axis=None,
args=None,
kwargs=None
):
self.obj = obj
self.args = args or ()
self.kwargs = kwargs or {}
self.f = func
self.axis = axis
if self.axis == "x" or self.axis == 0:
self.target = "x"
elif self.axis == "y" or self.axis == 1:
self.target = "y"
else:
raise ValueError("Axis must be 'x', 'y', '0' or '1'.")
self.shape = len(getattr(self.obj, self.target))
def perform(self):
"""
Apply the specified function.
"""
if isinstance(self.f, str):
func = getattr(self.obj, self.f)
sig = getfullargspec(func)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
# Let's assume we don't mess up the shape internally
func(*self.args, **self.kwargs)
return self.obj # we need to return this because of `inplacify` deco.
elif isinstance(self.f, np.ufunc):
target = getattr(self.obj, self.target)
retval = self.f(target, *self.args, **self.kwargs)
value = self._validate(retval)
setattr(self.obj, self.target, value)
if self.target == "y":
setattr(self.obj, "y_norm", value)
return value
elif isinstance(self.f, types.FunctionType):
sig = getfullargspec(self.f)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
# we can safely vectorize it here
self.f = np.vectorize(self.f)
target = getattr(self.obj, self.target)
retval = self.f(target, *self.args, **self.kwargs)
value = self._validate(retval)
setattr(self.obj, self.target, value)
if self.target == "y":
setattr(self.obj, "y_norm", value)
return value
def _validate(self, val):
if isinstance(val, (Iterable, np.ndarray)):
val = np.asarray(val, dtype=np.float64)
if val.ndim != 1:
val = np.concatenate(val).ravel()
warnings.warn("Function return value was flattened.")
if len(val) != len(np.unique(val)):
if len(np.unique(val)) == self.shape:
return val
else:
if self.target == "x":
raise ValueError(
f"Function returned duplicated values which is not allowed when"
" modifying the x axis. After filtering to unique values "
f"a {len(np.unique(val))}-length array was produced, "
f"but {self.shape} was expected."
)
return val
if len(val) != self.shape:
retval = self._broadcast(val)
return retval
return val
else:
raise TypeError("Function should return a number or Iterable type.")
def _broadcast(self, val):
if len(val) > self.shape:
return val[:self.shape]
elif len(val) < self.shape:
if not self.shape % len(val) == 0:
raise ValueError("Cannot broadcast safely to the desired shape.")
else:
return np.repeat(val, (self.shape % len(val)))
|
[
"numpy.vectorize",
"inspect.getfullargspec",
"numpy.concatenate",
"numpy.asarray",
"warnings.warn",
"numpy.unique"
] |
[((1015, 1035), 'inspect.getfullargspec', 'getfullargspec', (['func'], {}), '(func)\n', (1029, 1035), False, 'from inspect import getfullargspec\n'), ((2363, 2396), 'numpy.asarray', 'np.asarray', (['val'], {'dtype': 'np.float64'}), '(val, dtype=np.float64)\n', (2373, 2396), True, 'import numpy as np\n'), ((2494, 2547), 'warnings.warn', 'warnings.warn', (['"""Function return value was flattened."""'], {}), "('Function return value was flattened.')\n", (2507, 2547), False, 'import warnings\n'), ((1748, 1770), 'inspect.getfullargspec', 'getfullargspec', (['self.f'], {}), '(self.f)\n', (1762, 1770), False, 'from inspect import getfullargspec\n'), ((1921, 1941), 'numpy.vectorize', 'np.vectorize', (['self.f'], {}), '(self.f)\n', (1933, 1941), True, 'import numpy as np\n'), ((2580, 2594), 'numpy.unique', 'np.unique', (['val'], {}), '(val)\n', (2589, 2594), True, 'import numpy as np\n'), ((2450, 2469), 'numpy.concatenate', 'np.concatenate', (['val'], {}), '(val)\n', (2464, 2469), True, 'import numpy as np\n'), ((2620, 2634), 'numpy.unique', 'np.unique', (['val'], {}), '(val)\n', (2629, 2634), True, 'import numpy as np\n'), ((3006, 3020), 'numpy.unique', 'np.unique', (['val'], {}), '(val)\n', (3015, 3020), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 13:22:41 2022
@author: sampasmann
"""
import numpy as np
from time import process_time
def reeds_data(Nx=1000, LB=-8.0, RB=8.0):
G = 1 # number of energy groups
sigt = np.empty((Nx,G))
sigs = np.empty((Nx,G,G))
source = np.empty((Nx,G))
dx = (RB-LB)/Nx
xspan = np.linspace(LB+dx/2, RB-dx/2, Nx)
count = 0
for x in xspan:
if (x < -6):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 0.0
elif (-6 < x) and (x < -5):
sigt[count,:] = 1.0
sigs[count,:] = 0.9
source[count,:] = 1.0
elif (-5 < x < -3): #vacuum region 1
sigt[count,:] = 0.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (-3 < x < -2):
sigt[count,:] = 5.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (-2 < x < 2):
sigt[count,:] = 50.0
sigs[count,:,:] = 0.0
source[count,:] = 50.0
elif (2 < x < 3):
sigt[count,:] = 5.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (3 < x < 5): # vacuum region 2
sigt[count,:] = 0.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (5 < x < 6):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 1.0
elif (6 < x):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 0.0
count += 1
siga = (sigt - sigs)
return sigt, sigs, siga, source, G
if __name__ == "__main__":
"""
start = process_time()
sigt, sigs, siga, source, G = reeds_data(1)
stop = process_time()
time1 = stop-start
print("Elapsed time with Compliation:", time1)
"""
start = process_time()
sigt, sigs, siga, source, G = reeds_data(100)
stop = process_time()
time2 = stop-start
#print("Elapsed time After Compliation:", time2)
# print("A {}x speed up".format(round(time1/time2)))
|
[
"numpy.empty",
"time.process_time",
"numpy.linspace"
] |
[((251, 268), 'numpy.empty', 'np.empty', (['(Nx, G)'], {}), '((Nx, G))\n', (259, 268), True, 'import numpy as np\n'), ((279, 299), 'numpy.empty', 'np.empty', (['(Nx, G, G)'], {}), '((Nx, G, G))\n', (287, 299), True, 'import numpy as np\n'), ((311, 328), 'numpy.empty', 'np.empty', (['(Nx, G)'], {}), '((Nx, G))\n', (319, 328), True, 'import numpy as np\n'), ((360, 401), 'numpy.linspace', 'np.linspace', (['(LB + dx / 2)', '(RB - dx / 2)', 'Nx'], {}), '(LB + dx / 2, RB - dx / 2, Nx)\n', (371, 401), True, 'import numpy as np\n'), ((1922, 1936), 'time.process_time', 'process_time', ([], {}), '()\n', (1934, 1936), False, 'from time import process_time\n'), ((1998, 2012), 'time.process_time', 'process_time', ([], {}), '()\n', (2010, 2012), False, 'from time import process_time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 21:52:54 2019
@author: USER
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn as sk
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, roc_auc_score,f1_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from utilities import load_census_data
from fairness_metrics import computeEDFforData
from DNN_model import NeuralNet,training_fair_model
#The function below ensures that we seed all random generators with the same value to get reproducible results
def set_random_seed(state=1):
gens = (np.random.seed, torch.manual_seed, torch.cuda.manual_seed)
for set_state in gens:
set_state(state)
RANDOM_STATE = 1
set_random_seed(RANDOM_STATE)
#%% data loading and pre-processing
# load the train dataset
X, y, S = load_census_data('data/adult.data',1)
# Define all the "intersectional groups" to maintain stochastic update of p(y|S) correctly among different batches
intersectionalGroups = np.unique(S,axis=0) # all intersecting groups, i.e. black-women, white-man etc
# load the test dataset
test_X, test_y, test_S = load_census_data('data/adult.test',0)
# scale/normalize train & test data and shuffle train data
scaler = StandardScaler().fit(X)
scale_df = lambda df, scaler: pd.DataFrame(scaler.transform(df), columns=df.columns, index=df.index)
X = X.pipe(scale_df, scaler)
test_X = test_X.pipe(scale_df, scaler)
X, y, S = sk.utils.shuffle(X, y, S, random_state=0)
X = X.values
y = y.values
S = S.values
test_X = test_X.values
test_y = test_y.values
test_S = test_S.values
X, dev_X, y, dev_y, S, dev_S = train_test_split(X, y, S, test_size=0.30,stratify=y, random_state=7)
#%%
# deep neural network using pytorch
trainData = torch.from_numpy(X)
trainLabel = torch.from_numpy(y.reshape((-1,1)))
#devData = torch.from_numpy(devData)
testData = torch.from_numpy(test_X)
devData = torch.from_numpy(dev_X)
# hyperparameters
input_size = trainData.size()[1]
hidden1 = 16
hidden2 = 16
hidden3 = 16
output_size = 1
num_epochs = 500
#burnIn = 50
stepSize = 0.1
learning_rate = 0.001
burnIn = 50
epsilonBase = torch.tensor(0.0) # To protect the 80%-rule in intersectional setting, set this variable to: - log(0.8) = 0.2231
#%%
import sys
sys.stdout=open("batch_DF_EPS0.txt","w")
#%% training DNN model with fairness constraint
# Train a fair classifier
lamda = torch.tensor(0.01) # λ is ahyper-parameter that balances between the prediction loss and fairness.
# Select λ for fair learning algorithms via rigorous grid search on the development set. See paper for details.
DF_Model = training_fair_model(input_size,hidden1,hidden2,hidden3,output_size,learning_rate,num_epochs,trainData,
trainLabel,S,intersectionalGroups,burnIn,stepSize,epsilonBase,lamda)
#%%
# Validate the model
with torch.no_grad():
devData = Variable(devData.float())
predictProb = DF_Model(devData)
predicted = ((predictProb>0.5).numpy()).reshape((-1,))
Accuracy = sum(predicted == dev_y)/len(dev_y)
# Save results
predictProb = (predictProb.numpy()).reshape((-1,))
print(f"DF classifier dev accuracy: {Accuracy: .3f}")
aucScore = roc_auc_score(dev_y,predictProb)
print(f"DF classifier dev ROC AUC: {aucScore: .3f}")
nn_f1 = f1_score(dev_y,predicted)
print(f"DF classifier dev F1 score: {nn_f1: .2f}")
epsilon_hard,epsilon_soft,gamma_hard,gamma_soft,p_rule_hard,p_rule_soft = computeEDFforData(dev_S,predicted,predictProb,intersectionalGroups)
print(f"DF classifier dev epsilon_hard: {epsilon_hard: .3f}")
print(f"DF classifier dev epsilon_soft: {epsilon_soft: .3f}")
print(f"DF classifier dev gamma_hard: {gamma_hard: .3f}")
print(f"DF classifier dev gamma_soft: {gamma_soft: .3f}")
print(f"DF classifier dev p_rule_hard: {p_rule_hard: .3f}")
print(f"DF classifier dev p_rule_soft: {p_rule_soft: .3f}")
#%%
# Test the model
with torch.no_grad():
testData = Variable(testData.float())
predictProb = DF_Model(testData)
predicted = ((predictProb>0.5).numpy()).reshape((-1,))
Accuracy = sum(predicted == test_y)/len(test_y)
# Save results
predictProb = (predictProb.numpy()).reshape((-1,))
print(f"DF_Classifier accuracy: {Accuracy: .3f}")
aucScore = roc_auc_score(test_y,predictProb)
print(f"DF_Classifier ROC AUC: {aucScore: .3f}")
nn_f1 = f1_score(test_y,predicted)
print(f"DF_Classifier F1 score: {nn_f1: .2f}")
epsilon_hard,epsilon_soft,gamma_hard,gamma_soft,p_rule_hard,p_rule_soft = computeEDFforData(test_S,predicted,predictProb,intersectionalGroups)
print(f"DF_Classifier epsilon_hard: {epsilon_hard: .3f}")
print(f"DF_Classifier epsilon_soft: {epsilon_soft: .3f}")
print(f"DF_Classifier gamma_hard: {gamma_hard: .3f}")
print(f"DF_Classifier gamma_soft: {gamma_soft: .3f}")
print(f"DF_Classifier p_rule_hard: {p_rule_hard: .3f}")
print(f"DF_Classifier p_rule_soft: {p_rule_soft: .3f}")
|
[
"torch.tensor",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"utilities.load_census_data",
"sklearn.metrics.roc_auc_score",
"DNN_model.training_fair_model",
"sklearn.metrics.f1_score",
"fairness_metrics.computeEDFforData",
"sklearn.utils.shuffle",
"torch.no_grad",
"numpy.unique",
"torch.from_numpy"
] |
[((971, 1009), 'utilities.load_census_data', 'load_census_data', (['"""data/adult.data"""', '(1)'], {}), "('data/adult.data', 1)\n", (987, 1009), False, 'from utilities import load_census_data\n'), ((1148, 1168), 'numpy.unique', 'np.unique', (['S'], {'axis': '(0)'}), '(S, axis=0)\n', (1157, 1168), True, 'import numpy as np\n'), ((1278, 1316), 'utilities.load_census_data', 'load_census_data', (['"""data/adult.test"""', '(0)'], {}), "('data/adult.test', 0)\n", (1294, 1316), False, 'from utilities import load_census_data\n'), ((1591, 1632), 'sklearn.utils.shuffle', 'sk.utils.shuffle', (['X', 'y', 'S'], {'random_state': '(0)'}), '(X, y, S, random_state=0)\n', (1607, 1632), True, 'import sklearn as sk\n'), ((1778, 1846), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y', 'S'], {'test_size': '(0.3)', 'stratify': 'y', 'random_state': '(7)'}), '(X, y, S, test_size=0.3, stratify=y, random_state=7)\n', (1794, 1846), False, 'from sklearn.model_selection import train_test_split\n'), ((1900, 1919), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (1916, 1919), False, 'import torch\n'), ((2041, 2065), 'torch.from_numpy', 'torch.from_numpy', (['test_X'], {}), '(test_X)\n', (2057, 2065), False, 'import torch\n'), ((2076, 2099), 'torch.from_numpy', 'torch.from_numpy', (['dev_X'], {}), '(dev_X)\n', (2092, 2099), False, 'import torch\n'), ((2300, 2317), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2312, 2317), False, 'import torch\n'), ((2555, 2573), 'torch.tensor', 'torch.tensor', (['(0.01)'], {}), '(0.01)\n', (2567, 2573), False, 'import torch\n'), ((2806, 2998), 'DNN_model.training_fair_model', 'training_fair_model', (['input_size', 'hidden1', 'hidden2', 'hidden3', 'output_size', 'learning_rate', 'num_epochs', 'trainData', 'trainLabel', 'S', 'intersectionalGroups', 'burnIn', 'stepSize', 'epsilonBase', 'lamda'], {}), '(input_size, hidden1, hidden2, hidden3, output_size,\n learning_rate, num_epochs, trainData, trainLabel, S,\n intersectionalGroups, burnIn, stepSize, epsilonBase, lamda)\n', (2825, 2998), False, 'from DNN_model import NeuralNet, training_fair_model\n'), ((3376, 3409), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['dev_y', 'predictProb'], {}), '(dev_y, predictProb)\n', (3389, 3409), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((3470, 3496), 'sklearn.metrics.f1_score', 'f1_score', (['dev_y', 'predicted'], {}), '(dev_y, predicted)\n', (3478, 3496), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((3622, 3692), 'fairness_metrics.computeEDFforData', 'computeEDFforData', (['dev_S', 'predicted', 'predictProb', 'intersectionalGroups'], {}), '(dev_S, predicted, predictProb, intersectionalGroups)\n', (3639, 3692), False, 'from fairness_metrics import computeEDFforData\n'), ((4414, 4448), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['test_y', 'predictProb'], {}), '(test_y, predictProb)\n', (4427, 4448), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((4505, 4532), 'sklearn.metrics.f1_score', 'f1_score', (['test_y', 'predicted'], {}), '(test_y, predicted)\n', (4513, 4532), False, 'from sklearn.metrics import accuracy_score, roc_auc_score, f1_score\n'), ((4654, 4725), 'fairness_metrics.computeEDFforData', 'computeEDFforData', (['test_S', 'predicted', 'predictProb', 'intersectionalGroups'], {}), '(test_S, predicted, predictProb, intersectionalGroups)\n', (4671, 4725), False, 'from fairness_metrics import computeEDFforData\n'), ((3040, 3055), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3053, 3055), False, 'import torch\n'), ((4077, 4092), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4090, 4092), False, 'import torch\n'), ((1385, 1401), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1399, 1401), False, 'from sklearn.preprocessing import StandardScaler\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 2 13:42:37 2019
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
import cycler
def spectral_decay(case = 4,
vname = 'example_0',
plot_type = 'val',
save = False):
## Parameters & Settings
# 0: Barbell
# 1: Tree
# 2: Gaussian Mix
# 3: Hyperuniform Circle
# 4: Hyperuniform Ellipse
## Parameters & Settings
# case = 4 # 0: Barbell; 1: Tree; 2: Gaussian Mix; 3/4: Hyperuniform Circle/Ellipse
# vname = 'example_0' # spectial tag for video file
colormap = cm.viridis # cmap for viz
# plot_type = 'val' # 'vec': right singular vec embed; 'val': singula val; '3d': 3D
# save = False # Save figure?
save_type = '.pdf'
psi_min = 2 # min right singular vector to plot; 2, 3, ..., 11
psi_max = psi_min + 1 # max right singular vector to plot
psi3d_min = 2 # 2, 3, ..., 11
psi3d_mid = psi3d_min + 1
psi3d_max = psi3d_min + 2
for sig in np.arange(2,12):
if case == 0:
fname = 'barbell'
cdir = 'barbell/'
plot_title = 'Barbell'
elif case == 1:
fname = 'tree'
cdir = 'tree/'
plot_title = 'Tree'
elif case == 2:
fname = 'gauss'
cdir = 'gauss/'
plot_title = 'Gauss'
elif case == 3:
fname = 'hyperuni_circle'
cdir = 'h_circle/'
plot_title = 'Hyperuniform Circle'
elif case == 4:
fname = 'hyperuni_ellipse'
cdir = 'ellipse/'
plot_title = 'Hyperuniform Ellipse'
sname = fname + '_' + vname # save name tag
fname += vname # load name tag
# Get # of Iterations
iter_name = 'dm/'+cdir+'iterations_'+fname+'.npy'
iterations = np.load(iter_name)
# Get Diffusion Maps Spectra
eDM_name = 'dm/'+cdir+'E_'+fname+'.npy'; eDM_sig = np.load(eDM_name)[sig - 2]
# Initialize Specra Lists
ei = []; et = []; eDM = []
# Get Epsilon (shape = (2, #iterations), 0th axis #eps doublings, 1st axis eps)
eps_name = 'dm/'+cdir+'epsilon_list_'+fname+'.npy'; eps_adjust = np.load(eps_name)
# Get Number of Points in Dataset & Color
datasize_name = 'dm/'+cdir+'V_'+fname+'.npy'; N = np.load(datasize_name).shape[0]
C_name = 'dm/'+cdir+'C_'+fname+'.npy'; C = np.load(C_name)
#%%
for i in np.arange(1, 1+iterations):
'''Singular Values (DM for Changing Data, TCDM) & Eigenvalues (DM)'''
pi_name = 'p_i/'+cdir+'Ei_'+str(i)+'_'+fname+'.npy'
pt_name = 'p_t/'+cdir+'Et_'+str(i)+'_'+fname+'.npy'
ei.append([i, np.load(pi_name)[sig - 2]]) # Operator P_i
et.append([i, np.load(pt_name)[sig - 2]]) # Composed Operator P^((t))
eDM.append([i, eDM_sig**i]) # Diffusion Maps Operator P^{t}
if plot_type == 'val':
plt.subplot(311)
plt.plot(np.asarray(ei).T[0], np.asarray(ei).T[1], marker='o', label=r'$P_{\epsilon,i}$', color = 'c')
plt.subplot(312)
plt.plot(np.asarray(et).T[0], np.asarray(et).T[1], marker='o', label=r'$P_{\epsilon}^{(t)}$', color = 'purple')
plt.ylabel(r"$\sigma_{k}$")
plt.subplot(313)
plt.plot(np.asarray(eDM).T[0], np.asarray(eDM).T[1], marker='o', label=r'$P_{\epsilon}^{t}$', color = 'g')
plt.xlabel("Iteration")
# plt.show()
save_dir = 'figs/spectral/'+cdir
save_name = sname+'_sigma'+str(sig)+'_N-'+str(N)
elif plot_type == 'vec':
# Set Singular Vectors
psiDM_name = 'p_i/'+cdir+'Vi_1_'+fname+'.npy'
psiDM = np.load(psiDM_name)
# Generate Figure
plt.title(r'$\psi_{'+str(psi_min)+'}$ & $\psi_{'+str(psi_max)+'}$ for '+plot_title+' (N = '+str(N)+')')
plt.scatter(psiDM[:,psi_min-2], psiDM[:,psi_max-2], c=C, cmap = colormap,
vmin=np.amin(C), vmax=np.amax(C), label=r'$P_{\epsilon}^{t}$')
plt.xlabel(r'$\psi_{'+str(psi_min)+'}$')
plt.ylabel(r'$\psi_{'+str(psi_max)+'}$')
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
plt.show()
save_dir = 'figs/embed/'+cdir
save_name = sname+'_psi'+str(psi_min)+'_psi'+str(psi_max)+'_N-'+str(N)
elif plot_type == '3d':
# Set Singular Vectors and Plot
from mpl_toolkits.mplot3d import Axes3D
psiDM_name = 'p_i/'+cdir+'Vi_1_'+fname+'.npy'
psiDM = np.load(psiDM_name)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.title(r'$\psi_{'+str(psi3d_min)+'}$, $\psi_{'+str(psi3d_mid)+
'}$, & $\psi_{'+str(psi3d_max)+'}$ Embedding of '
+plot_title+' (N = '+str(N)+')')
ax.scatter(psiDM[:, psi3d_min - 2], psiDM[:, psi3d_mid - 2],
psiDM[:, psi3d_max - 2], c=C, cmap=colormap)
ax.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
plt.show()
save_dir = 'figs/embed/'+cdir
save_name = sname+'_psi'+str(psi3d_min)+'_psi'+str(
psi3d_mid)+'_psi'+str(psi3d_max)+'_N-'+str(N)
# Save Figure
if save == True:
save_name = save_dir+save_name+save_type
plt.savefig(save_name, bbox_inches='tight', transparent=True, dpi=300)
return plt.show()
if "__name__" == "__main__":
spectral_decay()
# Save Figure
if save == True:
save_name = save_dir+save_name+save_type
plt.savefig(save_name, bbox_inches='tight', transparent=True, dpi=300)
|
[
"matplotlib.pyplot.subplot",
"numpy.load",
"matplotlib.pyplot.show",
"numpy.amin",
"numpy.asarray",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1133, 1149), 'numpy.arange', 'np.arange', (['(2)', '(12)'], {}), '(2, 12)\n', (1142, 1149), True, 'import numpy as np\n'), ((6195, 6205), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6203, 6205), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2028), 'numpy.load', 'np.load', (['iter_name'], {}), '(iter_name)\n', (2017, 2028), True, 'import numpy as np\n'), ((2409, 2426), 'numpy.load', 'np.load', (['eps_name'], {}), '(eps_name)\n', (2416, 2426), True, 'import numpy as np\n'), ((2627, 2642), 'numpy.load', 'np.load', (['C_name'], {}), '(C_name)\n', (2634, 2642), True, 'import numpy as np\n'), ((2681, 2709), 'numpy.arange', 'np.arange', (['(1)', '(1 + iterations)'], {}), '(1, 1 + iterations)\n', (2690, 2709), True, 'import numpy as np\n'), ((6104, 6174), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_name'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'dpi': '(300)'}), "(save_name, bbox_inches='tight', transparent=True, dpi=300)\n", (6115, 6174), True, 'import matplotlib.pyplot as plt\n'), ((6372, 6442), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_name'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'dpi': '(300)'}), "(save_name, bbox_inches='tight', transparent=True, dpi=300)\n", (6383, 6442), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2151), 'numpy.load', 'np.load', (['eDM_name'], {}), '(eDM_name)\n', (2141, 2151), True, 'import numpy as np\n'), ((3216, 3232), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (3227, 3232), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3376), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (3371, 3376), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3540), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma_{k}$"""'], {}), "('$\\\\sigma_{k}$')\n", (3523, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3553, 3569), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (3564, 3569), True, 'import matplotlib.pyplot as plt\n'), ((3701, 3724), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (3711, 3724), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2566), 'numpy.load', 'np.load', (['datasize_name'], {}), '(datasize_name)\n', (2551, 2566), True, 'import numpy as np\n'), ((4045, 4064), 'numpy.load', 'np.load', (['psiDM_name'], {}), '(psiDM_name)\n', (4052, 4064), True, 'import numpy as np\n'), ((4547, 4679), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)', 'right': '(False)', 'left': '(False)', 'labelleft': '(False)'}), "(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False, labelleft=False)\n", (4562, 4679), True, 'import matplotlib.pyplot as plt\n'), ((4725, 4735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4733, 4735), True, 'import matplotlib.pyplot as plt\n'), ((5118, 5137), 'numpy.load', 'np.load', (['psiDM_name'], {}), '(psiDM_name)\n', (5125, 5137), True, 'import numpy as np\n'), ((5173, 5185), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5183, 5185), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5797, 5799), True, 'import matplotlib.pyplot as plt\n'), ((2958, 2974), 'numpy.load', 'np.load', (['pi_name'], {}), '(pi_name)\n', (2965, 2974), True, 'import numpy as np\n'), ((3027, 3043), 'numpy.load', 'np.load', (['pt_name'], {}), '(pt_name)\n', (3034, 3043), True, 'import numpy as np\n'), ((3254, 3268), 'numpy.asarray', 'np.asarray', (['ei'], {}), '(ei)\n', (3264, 3268), True, 'import numpy as np\n'), ((3275, 3289), 'numpy.asarray', 'np.asarray', (['ei'], {}), '(ei)\n', (3285, 3289), True, 'import numpy as np\n'), ((3398, 3412), 'numpy.asarray', 'np.asarray', (['et'], {}), '(et)\n', (3408, 3412), True, 'import numpy as np\n'), ((3419, 3433), 'numpy.asarray', 'np.asarray', (['et'], {}), '(et)\n', (3429, 3433), True, 'import numpy as np\n'), ((3591, 3606), 'numpy.asarray', 'np.asarray', (['eDM'], {}), '(eDM)\n', (3601, 3606), True, 'import numpy as np\n'), ((3613, 3628), 'numpy.asarray', 'np.asarray', (['eDM'], {}), '(eDM)\n', (3623, 3628), True, 'import numpy as np\n'), ((4359, 4369), 'numpy.amin', 'np.amin', (['C'], {}), '(C)\n', (4366, 4369), True, 'import numpy as np\n'), ((4376, 4386), 'numpy.amax', 'np.amax', (['C'], {}), '(C)\n', (4383, 4386), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import mediapipe as mp
import glob
import os
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
#import os
os.environ['JOBLIB_TEMP_FOLDER'] = '/tmp'
# wait for process: "W016","W017","W018","W019","W023","W024","W025","W026","W028","W029","W033","W035","W036","W037","W038","W040"
# M028 M029 M030 M031 M032 M033 M034 M035 M037 M039 M040 M041 W009 W011 W014 M042 W015
# M012 M013 M022 M026 M027 M031 M037 M041 W014
# W016 W018 W019 W023 W024 W025 W026 W028 W029 W033 W035 W036
# W040 W038 W037
in_path = glob.glob('/data3/MEAD/W036/video/front/*/level_*/0*.mp4')
#in_path = glob.glob('/data3/MEAD/M012/video/front/disgusted/level_2/027.mp4')
#print(in_path)
out_path = []
out_path_initlmk = []
out_path_motion = []
for pid,path in enumerate(in_path):
#print(pid,path)
p,f = os.path.split(path)
na,ext = os.path.splitext(f)
#print(p+"/"+na+"_multiland.npy")
out_path.append(p+"/"+na+"_multiland.npy")
out_path_initlmk.append(p+"/"+na+"_initlmk_multiland.npy")
out_path_motion.append(p+"/"+na+"_motion_multiland.npy")
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
def vis_landmark_on_img(img, shape, linewidth=2):
'''
Visualize landmark on images.
'''
def draw_curve(idx_list, color=(0, 255, 0), loop=False, lineWidth=linewidth):
for i in idx_list:
cv2.line(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]), color, lineWidth)
if (loop):
cv2.line(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]),
(shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth)
draw_curve(list(range(0, 16)), color=(255, 144, 25)) # jaw
draw_curve(list(range(17, 21)), color=(50, 205, 50)) # eye brow
draw_curve(list(range(22, 26)), color=(50, 205, 50))
draw_curve(list(range(27, 35)), color=(208, 224, 63)) # nose
draw_curve(list(range(36, 41)), loop=True, color=(71, 99, 255)) # eyes
draw_curve(list(range(42, 47)), loop=True, color=(71, 99, 255))
draw_curve(list(range(48, 59)), loop=True, color=(238, 130, 238)) # mouth
draw_curve(list(range(60, 67)), loop=True, color=(238, 130, 238))
return img
with mp_face_mesh.FaceMesh(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as face_mesh:
for vid,vpath in enumerate(in_path):
videoReader = cv2.VideoCapture(in_path[vid])
fs = videoReader.get(cv2.CAP_PROP_FPS)
sz = (int(videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)), int(videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)))
#vw = cv2.VideoWriter('./output/video.mp4',cv2.VideoWriter_fourcc('M','P','E','G'), fs, sz)
land_res = [] # 帧数 * 3 * landmark数量
motion_res = []
initlmk_res = []
success, frame = videoReader.read()
idx = 0
k = 0
while success:
#print(success)
#print(k)
k += 1
image = frame.copy()
#cv2.imwrite("./imgs/"+str(k)+"_im.png",image)
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.multi_face_landmarks:
success, frame = videoReader.read() #获取下一帧
continue
face_landmarks = results.multi_face_landmarks[0]
land_loc = []
xlis = []
ylis = []
zlis = []
for lm in face_landmarks.landmark:
x = lm.x * sz[0]
y = lm.y * sz[1]
xlis.append(x)
ylis.append(y)
zlis.append(lm.z)
#print(x,y,lm.z)
land_loc.append(xlis)
land_loc.append(ylis)
land_loc.append(zlis)
land_res.append(land_loc)
if idx == 0 : initlmk_res.append(land_loc)
motion_res.append( list( np.array(land_loc) - np.array(land_res[ len(land_res) - 1 ]) ) )
idx += 1
# for face_landmarks in results.multi_face_landmarks:
# mp_drawing.draw_landmarks(
# image=image,
# landmark_list=face_landmarks,
# connections=mp_face_mesh.FACEMESH_CONTOURS,
# landmark_drawing_spec=drawing_spec,
# connection_drawing_spec=drawing_spec)
#cv2.imwrite('./output/video' + str(idx) + '.png', image)
#vw.write(image) # 写视频帧
success, frame = videoReader.read() #获取下一帧
videoReader.release()
#vw.release()
res = np.array(land_res)
np.save(out_path[vid],res)
#np.save(out_path_initlmk[vid],initlmk_res)
#np.save(out_path_motion[vid],motion_res)
print("out:"+out_path[vid])
|
[
"cv2.line",
"numpy.save",
"cv2.cvtColor",
"cv2.VideoCapture",
"numpy.array",
"os.path.splitext",
"glob.glob",
"os.path.split"
] |
[((612, 670), 'glob.glob', 'glob.glob', (['"""/data3/MEAD/W036/video/front/*/level_*/0*.mp4"""'], {}), "('/data3/MEAD/W036/video/front/*/level_*/0*.mp4')\n", (621, 670), False, 'import glob\n'), ((891, 910), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (904, 910), False, 'import os\n'), ((924, 943), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (940, 943), False, 'import os\n'), ((2471, 2501), 'cv2.VideoCapture', 'cv2.VideoCapture', (['in_path[vid]'], {}), '(in_path[vid])\n', (2487, 2501), False, 'import cv2\n'), ((4695, 4713), 'numpy.array', 'np.array', (['land_res'], {}), '(land_res)\n', (4703, 4713), True, 'import numpy as np\n'), ((4722, 4749), 'numpy.save', 'np.save', (['out_path[vid]', 'res'], {}), '(out_path[vid], res)\n', (4729, 4749), True, 'import numpy as np\n'), ((1445, 1545), 'cv2.line', 'cv2.line', (['img', '(shape[i, 0], shape[i, 1])', '(shape[i + 1, 0], shape[i + 1, 1])', 'color', 'lineWidth'], {}), '(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]\n ), color, lineWidth)\n', (1453, 1545), False, 'import cv2\n'), ((1572, 1714), 'cv2.line', 'cv2.line', (['img', '(shape[idx_list[0], 0], shape[idx_list[0], 1])', '(shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1])', 'color', 'lineWidth'], {}), '(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]), (shape[\n idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth)\n', (1580, 1714), False, 'import cv2\n'), ((3157, 3195), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3169, 3195), False, 'import cv2\n'), ((3971, 3989), 'numpy.array', 'np.array', (['land_loc'], {}), '(land_loc)\n', (3979, 3989), True, 'import numpy as np\n')]
|
"""
Performance Comparision with Commercial APIs like Face++, Google, MS and Amazon
"""
import sys
import os
import requests
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
sys.path.append('../')
from config.cfg import cfg
def prepare_test_imgs(type='basic'):
face_with_emt = {}
manual_annotation_dir = 'E:\DataSet\CV\TreeCNN\RAF-Face/basic\Annotation\manual'
emotion_label_txt_path = os.path.join(cfg['root'], 'RAF-Face', "%s/EmoLabel/list_patition_label.txt" % type)
emotion_dict = dict(np.loadtxt(emotion_label_txt_path, dtype=np.str))
for _ in os.listdir(manual_annotation_dir):
if _.startswith('test_'):
face_fname = _.replace('_manu_attri', '_aligned').replace('.txt', '.jpg')
face_with_emt[os.path.join(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname)] = int(
emotion_dict[face_fname.replace('_aligned', '')].strip()) - 1
return face_with_emt
def facepp(img_path):
"""
Recognition with Face++ Emotion Recognition API
:param img_path:
:return:
"""
apikey = ''
apisecret = ''
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
files = {'image_file': open(img_path, 'rb')}
payload = {'api_key': apikey, 'api_secret': apisecret,
# 'return_landmark': 0, 'face_tokens': 'none',
'return_attributes': 'emotion'}
response = requests.post(url, data=payload, files=files)
if response.status_code == 200:
res_json = response.json()
max_k = ''
max_v = 0
for k, v in res_json['faces'][0]['attributes']['emotion'].items():
if v > max_v:
max_v = v
max_k = k
return max_k
else:
print(response)
return None
if __name__ == '__main__':
img_files = prepare_test_imgs()
print(img_files)
basic_emt_map = {
'surprise': 0,
'fear': 1,
'disgust': 2,
'happiness': 3,
'sadness': 4,
'anger': 5,
'neutral': 6
}
gt = []
pred = []
for imgf, e in img_files.items():
try:
emt = facepp(imgf)
print(emt)
gt.append(e)
pred.append(basic_emt_map[emt])
except:
pass
print('Accuracy of Emotion Recognition: %s' % str(accuracy_score(gt, pred)))
print('Confusion Matrix on FER: ')
print(confusion_matrix(np.array(gt).ravel().tolist(), np.array(pred).ravel().tolist()))
|
[
"sys.path.append",
"sklearn.metrics.accuracy_score",
"numpy.array",
"numpy.loadtxt",
"requests.post",
"os.path.join",
"os.listdir"
] |
[((254, 276), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (269, 276), False, 'import sys\n'), ((481, 568), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/EmoLabel/list_patition_label.txt' % type)"], {}), "(cfg['root'], 'RAF-Face', '%s/EmoLabel/list_patition_label.txt' %\n type)\n", (493, 568), False, 'import os\n'), ((653, 686), 'os.listdir', 'os.listdir', (['manual_annotation_dir'], {}), '(manual_annotation_dir)\n', (663, 686), False, 'import os\n'), ((1480, 1525), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'files': 'files'}), '(url, data=payload, files=files)\n', (1493, 1525), False, 'import requests\n'), ((589, 637), 'numpy.loadtxt', 'np.loadtxt', (['emotion_label_txt_path'], {'dtype': 'np.str'}), '(emotion_label_txt_path, dtype=np.str)\n', (599, 637), True, 'import numpy as np\n'), ((834, 910), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/Image/aligned' % type)", 'face_fname'], {}), "(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname)\n", (846, 910), False, 'import os\n'), ((2421, 2445), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['gt', 'pred'], {}), '(gt, pred)\n', (2435, 2445), False, 'from sklearn.metrics import accuracy_score\n'), ((2515, 2527), 'numpy.array', 'np.array', (['gt'], {}), '(gt)\n', (2523, 2527), True, 'import numpy as np\n'), ((2546, 2560), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (2554, 2560), True, 'import numpy as np\n')]
|
from scipy.signal import find_peaks
import numpy as np
import math
def search_peaks(x_data, y_data, height=0.1, distance=10):
prominence = np.mean(y_data)
peak_list = find_peaks(y_data, height=height, prominence=prominence, distance=distance)
peaks = []
for i in peak_list[0]:
peak = (x_data[i], y_data[i])
peaks.append(peak)
return peaks
def search_database_peaks(all_spectrum, height=0.1, distance=10):
peaks_database = {}
for key in list(all_spectrum.keys()):
x_data = all_spectrum[key][0]
y_data = all_spectrum[key][1]
peaks = search_peaks(x_data, y_data, height=height, distance=distance)
peaks_database.update({key: peaks})
return peaks_database
def compare_peaks(peaks_database, peaks, abs_tol=5):
coincide_information = {}
for key in list(peaks_database.keys()):
coincide_list = []
for peak_d in peaks_database[key]:
for peak in peaks:
if math.isclose(peak[0], peak_d[0], abs_tol=abs_tol):
coincide_list.append([peak_d[0], peak[0]])
coincide_information.update(
{key: {'coincide_list': coincide_list, 'coincide_number': [len(peaks_database[key]), len(coincide_list)]}})
return coincide_information
def judge_matter(coincide_information, criterion=0.99):
contain_dict = {}
for key in list(coincide_information.keys()):
coincide_number = coincide_information[key]['coincide_number']
key_criterion = coincide_number[1] / coincide_number[0]
if key_criterion >= criterion:
contain_dict.update({key: key_criterion})
return contain_dict
def classify(x_data, y_data, all_spectrum):
peaks = search_peaks(x_data,y_data)
database_peaks = search_database_peaks(all_spectrum)
print(database_peaks)
compare_result = compare_peaks(database_peaks,peaks)
# pass
# print(compare_result)
return compare_result
compare_result=judge_matter(compare_result)
|
[
"numpy.mean",
"scipy.signal.find_peaks",
"math.isclose"
] |
[((144, 159), 'numpy.mean', 'np.mean', (['y_data'], {}), '(y_data)\n', (151, 159), True, 'import numpy as np\n'), ((176, 251), 'scipy.signal.find_peaks', 'find_peaks', (['y_data'], {'height': 'height', 'prominence': 'prominence', 'distance': 'distance'}), '(y_data, height=height, prominence=prominence, distance=distance)\n', (186, 251), False, 'from scipy.signal import find_peaks\n'), ((990, 1039), 'math.isclose', 'math.isclose', (['peak[0]', 'peak_d[0]'], {'abs_tol': 'abs_tol'}), '(peak[0], peak_d[0], abs_tol=abs_tol)\n', (1002, 1039), False, 'import math\n')]
|
import pandas as pd
from transformers import BertTokenizer, RobertaTokenizer, AutoTokenizer
import os
import numpy as np
import re
import glob
from nltk import sent_tokenize
from utils import num_tokens
import math
def read_generic_file(filepath):
""" reads any generic text file into
list containing one line as element
"""
text = []
with open(filepath, 'r') as f:
for line in f.read().splitlines():
text.append(line.strip())
return text
def offset_str2list(offset):
return [[int(start_end) for start_end in offset.split(',')] for offset in offset.split(';')]
def offset_decreaseSentOffset(sentOffset, scu_offsets):
return [[start_end[0] - sentOffset, start_end[1] - sentOffset] for start_end in scu_offsets]
def insert_string(string, index, value):
return string[:index] + value + string[index:]
# the next *four* functions are taken from PreSumm implementation
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
# words = _split_into_words(sentences)
words = sum(sentences, [])
# words = [w for w in words if w not in stopwords]
return _get_ngrams(n, words)
def cal_rouge(evaluated_ngrams, reference_ngrams):
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
return {"f": f1_score, "p": precision, "r": recall}
def greedy_selection(doc_sent_list, abstract_sent_list, summary_size=1000):
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
max_rouge = 0.0
abstract = sum(abstract_sent_list, [])
abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(' '.join(s)).split() for s in doc_sent_list]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
reference_1grams = _get_word_ngrams(1, [abstract])
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
reference_2grams = _get_word_ngrams(2, [abstract])
selected = []
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = cal_rouge(candidates_1, reference_1grams)['f']
rouge_2 = cal_rouge(candidates_2, reference_2grams)['f']
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
def greedy_selection_MDS(doc_sent_list, abstracts, summary_size=1000):
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
max_rouge = 0.0
# abstract = sum(abstract_sent_list, [])
abstracts = [_rouge_clean(abstract.lower().replace('...',' ... ')).split() for abstract in abstracts]
# abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(s.lower().replace('...',' ... ')).split() for s in doc_sent_list]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
references_1grams = []
references_2grams = []
for abstract in abstracts:
references_1grams.append(_get_word_ngrams(1, [abstract]))
references_2grams.append(_get_word_ngrams(2, [abstract]))
selected = []
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = np.mean([cal_rouge(candidates_1, reference_1grams)['f'] for reference_1grams in references_1grams])
rouge_2 = np.mean([cal_rouge(candidates_2, reference_2grams)['f'] for reference_2grams in references_2grams])
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
def add_sent_special_tok(document, OIE_row = None):
doc_sents = sent_tokenize(document)#[:20]
if OIE_row is not None: #if main document
doc_sents = doc_sents[:MAX_SENT_MAIN_DOC]
sent_found_flag = False
for sent_idx, sent in enumerate(doc_sents):
if sent == OIE_row['docSentText']:
sent_found_flag = True
doc_sents[sent_idx] = add_OIE_special_tok(OIE_row['docSpanOffsets'], OIE_row['docSentCharIdx'], sent)
if num_tokens('<doc-s> ' + '<s> ' + ' </s> <s> '.join(doc_sents[:sent_idx+1]) + ' </s>' + ' </doc-s>', tokenizer,
add_special_tokens=True)> MAX_TOKENS:
return None
break
if not sent_found_flag:
return None
else: #if context document
doc_sents = doc_sents[:MAX_SENT_CONTEXT_DOC]
document = '<s> ' + ' </s> <s> '.join(doc_sents) + ' </s>'
return document
def adding_files_context(file_context_combination, data_path, topic_dir):
documents = []
for file_context in file_context_combination:
text = read_generic_file(os.path.join(data_path, topic_dir, file_context))
document = " ".join(text)
document = add_sent_special_tok(document)
document = add_doc_special_tok(document)
documents.append(document)
context = ' '.join(documents)
return context
def add_special_tok(row, document):
document_tmp = document[:]#add_OIE_special_tok(docSpanOffsets, document)
document_tmp = add_sent_special_tok(document_tmp, row)
if document_tmp is not None:
document_tmp = add_doc_special_tok(document_tmp)
return document_tmp
def add_doc_special_tok(document_tmp):
return '<doc-s> ' + document_tmp + ' </doc-s>'
def add_OIE_special_tok(docSpanOffsets, docSentCharIdx, sent, special_tokens_for_global_attn = True):
# document_tmp = document[:]
span_offsets = offset_str2list(docSpanOffsets)
offsets = offset_decreaseSentOffset(docSentCharIdx, span_offsets)
# assume we have max 2 parts
if special_tokens_for_global_attn:
for offset in offsets[::-1]: #[::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' <OIE1_END> ')
sent = insert_string(sent, offset[0], ' <OIE1_START> ')
else:
for offset in offsets[::-1]: #[::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' > ')
sent = insert_string(sent, offset[0], ' < ')
return sent
def read_abstracts(DATASET, data_path, topic_dir):
abstracts = []
if DATASET.startswith('TAC'):
# for summary_path in glob.iglob(
# data_path + '/summaries/' + topic_dir[:-3].upper() + topic_dir[-2:].upper() + '.*'):
for summary_path in glob.iglob(
data_path + '/summaries/' + topic_dir[:-3].upper() + '*'):
summary = ' '.join(read_generic_file(summary_path))
abstracts.append(summary)
else:
for summary_path in glob.iglob(data_path + '/summaries/' + topic_dir[:-1].upper() + '.*'):
summary = ' '.join(read_generic_file(summary_path))
abstracts.append(summary)
return abstracts
def add_instance(full_instance, tokenizer, row, highlights_list, highlights_metadata_list, file_context_combination, alignment_label='alignment_label'):
full_instance, global_attention_idx = extract_global_attention_idx(full_instance, tokenizer)
print('num tokens:', num_tokens(full_instance, tokenizer, add_special_tokens=False))
highlights_list.append([full_instance, row[alignment_label], global_attention_idx, row['greedyMaxRouge']])
highlights_metadata_list.append(row.tolist()+ [file_context_combination])
def replace_special_token(text, special_token_char_idxes, old_special_token, new_special_token):
text = text[:special_token_char_idxes[-1]] + new_special_token + text[special_token_char_idxes[-1] + len(
old_special_token):] # replace '<OIE1_START>' with '<'
special_token_char_idxes[-1] += 1 # include new special token '<'
return text, special_token_char_idxes
def extract_global_attention_idx(text, tokenizer, model_max_tokens = None):
if model_max_tokens is None:
model_max_tokens = MAX_TOKENS
#and replace new special tokens with '<' '>' so the model wont have to learn new tokens.
special_tokens_idx_list = []
special_token_char_idxes = []
mark_start_idx = text.find('<OIE1_START>')
while mark_start_idx > -1:
# find special_token_char_idxes
special_token_char_idxes.append(mark_start_idx)
text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_START>', '<')
special_token_char_idxes.append(text.find('<OIE1_END>'))
text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_END>', '>')
mark_start_idx = text.find('<OIE1_START>')
# #find special_token_char_idxes
# special_token_char_idxes = []
# special_token_char_idxes.append(text.find('<OIE1_START>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_START>', '<')
# special_token_char_idxes.append(text.find('<OIE1_END>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_END>', '>')
# start_idx2 = text.find('<OIE2_START>')
# if start_idx2 > -1: #if exists
# special_token_char_idxes.append(start_idx2)
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE2_START>', '<')
# special_token_char_idxes.append(text.find('<OIE2_END>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE2_END>', '>')
# find special token idxes
for special_token_char_idx in special_token_char_idxes:
special_token_prev_text = text[:special_token_char_idx]
special_token_idx = num_tokens(special_token_prev_text, tokenizer) # special token start sent included as we take len of tokens which is the idx+1
assert(('<' in tokenizer.tokenize(text)[special_token_idx-1]) or ('>' in tokenizer.tokenize(text)[special_token_idx-1])) # check it finds the special token. special_token_idx-1 as we omit special start sent token, as tokemize function doesnt include it.
assert(special_token_idx < model_max_tokens) #it shouldnt be longer then 2048 (0-2047), and the last token is special end of sentence token.
special_tokens_idx_list.append(special_token_idx)
return text, special_tokens_idx_list
def createGT_labels(OIEs_topic, data_path, topic_dir, DATASET):
labels_column_name = 'greedyMaxRouge'
OIEs_topic['original_idx'] = range(len(OIEs_topic))
abstracts = read_abstracts(DATASET, data_path, topic_dir)
docFile_summSpan_cands = list(OIEs_topic['docSpanText'].values)
positive_summSpan_idx = greedy_selection_MDS(docFile_summSpan_cands, abstracts)
positive_summSpan_original_idx = [OIEs_topic['original_idx'].values[cand_idx] for cand_idx in positive_summSpan_idx]
scnd_filter_label = np.zeros(len(OIEs_topic), dtype=int)
scnd_filter_label[positive_summSpan_original_idx] = 1
if labels_column_name in OIEs_topic.columns:
scnd_filter_label = np.array(OIEs_topic[labels_column_name].to_list()) + scnd_filter_label
OIEs_topic[labels_column_name] = scnd_filter_label
##validation for correct indexes
positive_labeled_spans = OIEs_topic[OIEs_topic[labels_column_name] == 1]['docSpanText'].to_list()
positive_labeled_spans_validation = [docFile_summSpan_cands[cand_idx] in positive_labeled_spans for cand_idx in positive_summSpan_idx]
assert(all(positive_labeled_spans_validation))
return OIEs_topic
def add_sent_in_file_idx(OIEs_topic, data_path, topic_dir):
doc_sent_idx = np.zeros(len(OIEs_topic), dtype=int)
OIEs_topic['original_idx'] = range(len(OIEs_topic))
topic_files = os.listdir(os.path.join(data_path, topic_dir))
for file_idx, file in enumerate(topic_files):
OIEs_topic_file = OIEs_topic[OIEs_topic['documentFile']==file]
text = read_generic_file(os.path.join(data_path, topic_dir, file))
document = " ".join(text)
doc_sents = sent_tokenize(document)
for sent_idx, doc_sent in enumerate(doc_sents):
OIEs_topic_file_sent_original_idx = (OIEs_topic_file['original_idx'][OIEs_topic_file['docSentText'] == doc_sent]).values
doc_sent_idx[OIEs_topic_file_sent_original_idx] = sent_idx
OIEs_topic['inFile_sentIdx'] = doc_sent_idx
return OIEs_topic
def positive_augmentation(num_negative, num_positive, highlights_df, highlights_metadata_df, label_tag = 'label', SAFE_BUFFER = 100):
original_len_highlights_df = len(highlights_df)
augmentation_factor = (num_negative- num_positive - SAFE_BUFFER)/num_positive
if label_tag != 'label':
augmentation_factor = (num_negative - num_positive - SAFE_BUFFER) / len(highlights_df[highlights_df[label_tag]==1])
#threshold = 0.75
augmentation_factor = math.floor(augmentation_factor) #if augmentation_factor < (math.floor(augmentation_factor) + threshold) else math.ceil(augmentation_factor)
positive_highlights_df = highlights_df[highlights_df[label_tag] == 1]
positive_highlights_metadata_df = highlights_metadata_df.loc[positive_highlights_df.index, :]
if augmentation_factor >= 1:
for i in range(augmentation_factor):
highlights_df = highlights_df.append(positive_highlights_df)
highlights_metadata_df = highlights_metadata_df.append(positive_highlights_metadata_df)
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
# augmentation_factor = (num_negative - num_positive) / num_positive # if still not equal- add part of positive samples.
# if augmentation_factor > 0.5:
if num_negative - num_positive > SAFE_BUFFER:
selected_index = np.random.choice(positive_highlights_df.index.to_list(),num_negative - num_positive -SAFE_BUFFER,replace=False)
selected_positive_highlights_df = highlights_df[:original_len_highlights_df].loc[selected_index, :] #copy from original highlights_df (before augmentation) so rows won't be double augmented by their index
selected_positive_highlights_metadata_df = highlights_metadata_df[:original_len_highlights_df].loc[selected_index, :]
highlights_df = highlights_df.append(selected_positive_highlights_df)
highlights_metadata_df = highlights_metadata_df.append(selected_positive_highlights_metadata_df)
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
return highlights_df, highlights_metadata_df
##################################
###### main ##############
##################################
if __name__ == "__main__":
np.random.seed(42)
SET = 'train'
DATASETS = ['TAC2008','TAC2009','TAC2010']
NUM_CONTEXT_FILES = 9
MAX_TOKENS = 4096
filter_negative = False
FILTER_RATE = 0.4
over_sample_positive = False
MAX_SENT_MAIN_DOC = 20
MAX_SENT_CONTEXT_DOC = 9
sentences_level = False
if SET == 'train':
filter_negative = True
over_sample_positive = True
positive_label = 'greedyMaxRouge'
if filter_negative:
filter_negative_label = '_filter_negative'
else:
filter_negative_label = ''
if over_sample_positive:
over_sample_positive_label = '_over_sample_positive'
else:
over_sample_positive_label = ''
if sentences_level:
sentences_level_label = '_sentence_based'
else:
sentences_level_label = ''
OUTPUT_PATH = 'OIE_highlights/{}_{}_CDLM{}{}{}_fixed_truncated.csv'.format("_".join(DATASETS), SET,
filter_negative_label,
over_sample_positive_label,
sentences_level_label)
highlights_list = []
highlights_metadata_list = []
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
tokenizer = AutoTokenizer.from_pretrained('./CDLM/')
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<OIE1_START>', '<OIE1_END>', '<OIE2_START>', '<OIE2_END>']})
for DATASET in DATASETS:
data_path = 'data/{}/'.format(DATASET)
OIEs = pd.read_csv('OIE_cands/OIE_cands_{}.csv'.format(DATASET))
if sentences_level:
OIEs['docSpanText'] = OIEs['docSentText']
OIEs['docSpanOffsets'] = OIEs['docSentCharIdx'].apply(str) + ', ' + (
OIEs['docSentCharIdx'] + OIEs['docSentText'].apply(len)).apply(str)
used_positive_spans = 0
for topic_dir in os.listdir(data_path):
print(topic_dir)
if topic_dir == 'summaries':
continue
OIEs_topic = OIEs[OIEs['topic'] == topic_dir]
if DATASET.startswith('TAC'):
topic_dir_tac2011 = topic_dir[:-3].upper() + topic_dir[-2:].upper()
OIEs_topic = OIEs[OIEs['topic'] == topic_dir_tac2011]
OIEs_topic = add_sent_in_file_idx(OIEs_topic, data_path, topic_dir)
OIEs_topic = OIEs_topic[OIEs_topic['inFile_sentIdx'] < MAX_SENT_MAIN_DOC]
OIEs_topic = createGT_labels(OIEs_topic, data_path, topic_dir, DATASET)
topic_files = os.listdir(os.path.join(data_path, topic_dir))
topic_dates = [topic[re.search(r"\d", topic).start():] for topic in topic_files]#[topic_file[3:] for topic_file in topic_files]
topic_files = [x for _, x in sorted(zip(topic_dates, topic_files))]
for file_idx, file in enumerate(topic_files):
text = read_generic_file(os.path.join(data_path, topic_dir, file))
document = " ".join(text)
post_context_files = topic_files[file_idx + 1:file_idx + 1 + NUM_CONTEXT_FILES]
pre_context_files = []
if len(post_context_files) < NUM_CONTEXT_FILES:
diff_len = NUM_CONTEXT_FILES - len(post_context_files)
pre_context_files = topic_files[max(0, file_idx - diff_len):file_idx] # + context_files
assert (len(post_context_files + pre_context_files) == min(NUM_CONTEXT_FILES, len(topic_files) - 1))
# trunced_document = truncated_text_for_openie(document, tokenizer)
OIEs_topic_docFile = OIEs_topic[
OIEs_topic['documentFile'] == file]
for index, row in OIEs_topic_docFile.iterrows():
main_document = add_special_tok(row, document)
if main_document is None:
continue
if row[positive_label]:
used_positive_spans += 1
else:
if filter_negative:
if np.random.choice([0, 1], p=[FILTER_RATE,
1 - FILTER_RATE]): # 'continue' in random (1 - FILTER_RATE) of negative cases.
continue
# for file_context_combination in [context_files]:# combinations(topic_files_tmp,NUM_CONTEXT_FILES): # all context combinations of 2 files
pre_documents_context = adding_files_context(pre_context_files, data_path, topic_dir)
post_documents_context = adding_files_context(post_context_files, data_path, topic_dir)
file_context_combination = pre_context_files + post_context_files
full_instance = pre_documents_context + ' ' + main_document + ' ' + post_documents_context
add_instance(full_instance, tokenizer, row, highlights_list,
highlights_metadata_list, file_context_combination, alignment_label=positive_label)
print(len(highlights_list))
highlights_df = pd.DataFrame(highlights_list, columns=['', 'label', 'global_attention_idx', 'greedyMaxRouge'])
highlights_metadata_df = pd.DataFrame(highlights_metadata_list,
columns=OIEs_topic.columns.tolist() + ['doc_context'])
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
if over_sample_positive:
highlights_df, highlights_metadata_df = positive_augmentation(num_negative, num_positive, highlights_df,
highlights_metadata_df)
highlights_df = highlights_df[['', 'label', 'global_attention_idx']]
highlights_df.to_csv(OUTPUT_PATH, index=False)
highlights_metadata_df.to_csv(OUTPUT_PATH[:-4] + '_metadata.csv', index=False)
|
[
"pandas.DataFrame",
"utils.num_tokens",
"numpy.random.seed",
"os.path.join",
"nltk.sent_tokenize",
"math.floor",
"transformers.AutoTokenizer.from_pretrained",
"re.search",
"numpy.random.choice",
"re.sub",
"os.listdir"
] |
[((5885, 5908), 'nltk.sent_tokenize', 'sent_tokenize', (['document'], {}), '(document)\n', (5898, 5908), False, 'from nltk import sent_tokenize\n'), ((15283, 15314), 'math.floor', 'math.floor', (['augmentation_factor'], {}), '(augmentation_factor)\n', (15293, 15314), False, 'import math\n'), ((17416, 17434), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (17430, 17434), True, 'import numpy as np\n'), ((18964, 19004), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""./CDLM/"""'], {}), "('./CDLM/')\n", (18993, 19004), False, 'from transformers import BertTokenizer, RobertaTokenizer, AutoTokenizer\n'), ((22954, 23052), 'pandas.DataFrame', 'pd.DataFrame', (['highlights_list'], {'columns': "['', 'label', 'global_attention_idx', 'greedyMaxRouge']"}), "(highlights_list, columns=['', 'label', 'global_attention_idx',\n 'greedyMaxRouge'])\n", (22966, 23052), True, 'import pandas as pd\n'), ((2459, 2489), 're.sub', 're.sub', (['"""[^a-zA-Z0-9 ]"""', '""""""', 's'], {}), "('[^a-zA-Z0-9 ]', '', s)\n", (2465, 2489), False, 'import re\n'), ((4016, 4046), 're.sub', 're.sub', (['"""[^a-zA-Z0-9 ]"""', '""""""', 's'], {}), "('[^a-zA-Z0-9 ]', '', s)\n", (4022, 4046), False, 'import re\n'), ((9490, 9552), 'utils.num_tokens', 'num_tokens', (['full_instance', 'tokenizer'], {'add_special_tokens': '(False)'}), '(full_instance, tokenizer, add_special_tokens=False)\n', (9500, 9552), False, 'from utils import num_tokens\n'), ((12071, 12117), 'utils.num_tokens', 'num_tokens', (['special_token_prev_text', 'tokenizer'], {}), '(special_token_prev_text, tokenizer)\n', (12081, 12117), False, 'from utils import num_tokens\n'), ((14149, 14183), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir'], {}), '(data_path, topic_dir)\n', (14161, 14183), False, 'import os\n'), ((14440, 14463), 'nltk.sent_tokenize', 'sent_tokenize', (['document'], {}), '(document)\n', (14453, 14463), False, 'from nltk import sent_tokenize\n'), ((19627, 19648), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (19637, 19648), False, 'import os\n'), ((6985, 7033), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir', 'file_context'], {}), '(data_path, topic_dir, file_context)\n', (6997, 7033), False, 'import os\n'), ((14342, 14382), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir', 'file'], {}), '(data_path, topic_dir, file)\n', (14354, 14382), False, 'import os\n'), ((20311, 20345), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir'], {}), '(data_path, topic_dir)\n', (20323, 20345), False, 'import os\n'), ((20672, 20712), 'os.path.join', 'os.path.join', (['data_path', 'topic_dir', 'file'], {}), '(data_path, topic_dir, file)\n', (20684, 20712), False, 'import os\n'), ((21883, 21941), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': '[FILTER_RATE, 1 - FILTER_RATE]'}), '([0, 1], p=[FILTER_RATE, 1 - FILTER_RATE])\n', (21899, 21941), True, 'import numpy as np\n'), ((20381, 20404), 're.search', 're.search', (['"""\\\\d"""', 'topic'], {}), "('\\\\d', topic)\n", (20390, 20404), False, 'import re\n')]
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conformer utilities."""
import copy
from typing import List, Optional
from absl import logging
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
import tensorflow.compat.v2 as tf
def generate_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
*,
random_seed: int = -1,
prune_rms_thresh: float = -1.0,
max_iter: int = -1,
fallback_to_random: bool = False,
) -> Chem.rdchem.Mol:
"""Generates conformers for a given molecule.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
Returns:
Copy of a `molecule` with added hydrogens. The returned molecule contains
force field-optimised conformers. The number of conformers is guaranteed to
be <= max_num_conformers.
"""
mol = copy.deepcopy(molecule)
mol = Chem.AddHs(mol)
mol = _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=False)
if max_iter > 0:
mol_with_conformers = _minimize_by_mmff(mol, max_iter)
if mol_with_conformers is None:
mol_with_conformers = _minimize_by_uff(mol, max_iter)
else:
mol_with_conformers = mol
# Aligns conformations in a molecule to each other using the first
# conformation as the reference.
AllChem.AlignMolConformers(mol_with_conformers)
# We remove hydrogens to keep the number of atoms consistent with the graph
# nodes.
mol_with_conformers = Chem.RemoveHs(mol_with_conformers)
return mol_with_conformers
def atom_to_feature_vector(
atom: rdkit.Chem.rdchem.Atom,
conformer: Optional[np.ndarray] = None,
) -> List[float]:
"""Converts rdkit atom object to feature list of indices.
Args:
atom: rdkit atom object.
conformer: Generated conformers. Returns -1 values if set to None.
Returns:
List containing positions (x, y, z) of each atom from the conformer.
"""
if conformer:
pos = conformer.GetAtomPosition(atom.GetIdx())
return [pos.x, pos.y, pos.z]
return [np.nan, np.nan, np.nan]
def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray:
"""Computes conformer.
Args:
smile: Smile string.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
Returns:
A tuple containing index, fingerprint and conformer.
Raises:
RuntimeError: If unable to convert smile string to RDKit mol.
"""
mol = rdkit.Chem.MolFromSmiles(smile)
if not mol:
raise RuntimeError('Unable to convert smile to molecule: %s' % smile)
conformer_failed = False
try:
mol = generate_conformers(
mol,
max_num_conformers=1,
random_seed=45,
prune_rms_thresh=0.01,
max_iter=max_iter)
except IOError as e:
logging.exception('Failed to generate conformers for %s . IOError %s.',
smile, e)
conformer_failed = True
except ValueError:
logging.error('Failed to generate conformers for %s . ValueError', smile)
conformer_failed = True
except: # pylint: disable=bare-except
logging.error('Failed to generate conformers for %s.', smile)
conformer_failed = True
atom_features_list = []
conformer = None if conformer_failed else list(mol.GetConformers())[0]
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom, conformer))
conformer_features = np.array(atom_features_list, dtype=np.float32)
return conformer_features
def get_random_rotation_matrix(include_mirror_symmetry: bool) -> tf.Tensor:
"""Returns a single random rotation matrix."""
rotation_matrix = _get_random_rotation_3d()
if include_mirror_symmetry:
random_mirror_symmetry = _get_random_mirror_symmetry()
rotation_matrix = tf.matmul(rotation_matrix, random_mirror_symmetry)
return rotation_matrix
def rotate(vectors: tf.Tensor, rotation_matrix: tf.Tensor) -> tf.Tensor:
"""Batch of vectors on a single rotation matrix."""
return tf.matmul(vectors, rotation_matrix)
def _embed_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
random_seed: int,
prune_rms_thresh: float,
fallback_to_random: bool,
*,
use_random: bool = False,
) -> Chem.rdchem.Mol:
"""Embeds conformers into a copy of a molecule.
If random coordinates allowed, tries not to use random coordinates at first,
and uses random only if fails.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
*:
use_random: Use random coordinates. Shouldn't be set by any caller except
this function itself.
Returns:
A copy of a molecule with embedded conformers.
Raises:
ValueError: if conformers cannot be obtained for a given molecule.
"""
mol = copy.deepcopy(molecule)
# Obtains parameters for conformer generation.
# In particular, ETKDG is experimental-torsion basic knowledge distance
# geometry, which allows to randomly generate an initial conformation that
# satisfies various geometric constraints such as lower and upper bounds on
# the distances between atoms.
params = AllChem.ETKDGv3()
params.randomSeed = random_seed
params.pruneRmsThresh = prune_rms_thresh
params.numThreads = -1
params.useRandomCoords = use_random
conf_ids = AllChem.EmbedMultipleConfs(mol, max_num_conformers, params)
if not conf_ids:
if not fallback_to_random or use_random:
raise ValueError('Cant get conformers')
return _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=True)
return mol
def _minimize_by_mmff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Optional[Chem.rdchem.Mol]:
"""Minimizes forcefield for conformers using MMFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers; or None if MMFF
cannot be performed.
"""
molecule_props = AllChem.MMFFGetMoleculeProperties(molecule)
if molecule_props is None:
return None
mol = copy.deepcopy(molecule)
for conf_id in range(mol.GetNumConformers()):
ff = AllChem.MMFFGetMoleculeForceField(
mol, molecule_props, confId=conf_id, ignoreInterfragInteractions=False)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _minimize_by_uff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Chem.rdchem.Mol:
"""Minimizes forcefield for conformers using UFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers.
"""
mol = copy.deepcopy(molecule)
conf_ids = range(mol.GetNumConformers())
for conf_id in conf_ids:
ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _get_symmetry_rotation_matrix(sign: tf.Tensor) -> tf.Tensor:
"""Returns the 2d/3d matrix for mirror symmetry."""
zero = tf.zeros_like(sign)
one = tf.ones_like(sign)
# pylint: disable=bad-whitespace,bad-continuation
rot = [sign, zero, zero,
zero, one, zero,
zero, zero, one]
# pylint: enable=bad-whitespace,bad-continuation
shape = (3, 3)
rot = tf.stack(rot, axis=-1)
rot = tf.reshape(rot, shape)
return rot
def _quaternion_to_rotation_matrix(quaternion: tf.Tensor) -> tf.Tensor:
"""Converts a batch of quaternions to a batch of rotation matrices."""
q0 = quaternion[0]
q1 = quaternion[1]
q2 = quaternion[2]
q3 = quaternion[3]
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
matrix = tf.stack([r00, r01, r02,
r10, r11, r12,
r20, r21, r22], axis=-1)
return tf.reshape(matrix, [3, 3])
def _get_random_rotation_3d() -> tf.Tensor:
random_quaternions = tf.random.normal(
shape=[4], dtype=tf.float32)
random_quaternions /= tf.linalg.norm(
random_quaternions, axis=-1, keepdims=True)
return _quaternion_to_rotation_matrix(random_quaternions)
def _get_random_mirror_symmetry() -> tf.Tensor:
random_0_1 = tf.random.uniform(
shape=(), minval=0, maxval=2, dtype=tf.int32)
random_signs = tf.cast((2 * random_0_1) - 1, tf.float32)
return _get_symmetry_rotation_matrix(random_signs)
|
[
"tensorflow.compat.v2.reshape",
"rdkit.Chem.RemoveHs",
"rdkit.Chem.AllChem.UFFGetMoleculeForceField",
"absl.logging.exception",
"tensorflow.compat.v2.matmul",
"copy.deepcopy",
"rdkit.Chem.AllChem.ETKDGv3",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.linalg.norm",
"tensorflow.compat.v2.cast",
"rdkit.Chem.AllChem.MMFFGetMoleculeForceField",
"tensorflow.compat.v2.random.normal",
"rdkit.Chem.AllChem.EmbedMultipleConfs",
"absl.logging.error",
"rdkit.Chem.AllChem.AlignMolConformers",
"tensorflow.compat.v2.random.uniform",
"numpy.array",
"tensorflow.compat.v2.ones_like",
"rdkit.Chem.AllChem.MMFFGetMoleculeProperties",
"rdkit.Chem.AddHs",
"rdkit.Chem.MolFromSmiles"
] |
[((1968, 1991), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (1981, 1991), False, 'import copy\n'), ((2000, 2015), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (2010, 2015), False, 'from rdkit import Chem\n'), ((2492, 2539), 'rdkit.Chem.AllChem.AlignMolConformers', 'AllChem.AlignMolConformers', (['mol_with_conformers'], {}), '(mol_with_conformers)\n', (2518, 2539), False, 'from rdkit.Chem import AllChem\n'), ((2654, 2688), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol_with_conformers'], {}), '(mol_with_conformers)\n', (2667, 2688), False, 'from rdkit import Chem\n'), ((3676, 3707), 'rdkit.Chem.MolFromSmiles', 'rdkit.Chem.MolFromSmiles', (['smile'], {}), '(smile)\n', (3700, 3707), False, 'import rdkit\n'), ((4631, 4677), 'numpy.array', 'np.array', (['atom_features_list'], {'dtype': 'np.float32'}), '(atom_features_list, dtype=np.float32)\n', (4639, 4677), True, 'import numpy as np\n'), ((5205, 5240), 'tensorflow.compat.v2.matmul', 'tf.matmul', (['vectors', 'rotation_matrix'], {}), '(vectors, rotation_matrix)\n', (5214, 5240), True, 'import tensorflow.compat.v2 as tf\n'), ((6405, 6428), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (6418, 6428), False, 'import copy\n'), ((6752, 6769), 'rdkit.Chem.AllChem.ETKDGv3', 'AllChem.ETKDGv3', ([], {}), '()\n', (6767, 6769), False, 'from rdkit.Chem import AllChem\n'), ((6925, 6984), 'rdkit.Chem.AllChem.EmbedMultipleConfs', 'AllChem.EmbedMultipleConfs', (['mol', 'max_num_conformers', 'params'], {}), '(mol, max_num_conformers, params)\n', (6951, 6984), False, 'from rdkit.Chem import AllChem\n'), ((7729, 7772), 'rdkit.Chem.AllChem.MMFFGetMoleculeProperties', 'AllChem.MMFFGetMoleculeProperties', (['molecule'], {}), '(molecule)\n', (7762, 7772), False, 'from rdkit.Chem import AllChem\n'), ((7827, 7850), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (7840, 7850), False, 'import copy\n'), ((8518, 8541), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (8531, 8541), False, 'import copy\n'), ((8915, 8934), 'tensorflow.compat.v2.zeros_like', 'tf.zeros_like', (['sign'], {}), '(sign)\n', (8928, 8934), True, 'import tensorflow.compat.v2 as tf\n'), ((8943, 8961), 'tensorflow.compat.v2.ones_like', 'tf.ones_like', (['sign'], {}), '(sign)\n', (8955, 8961), True, 'import tensorflow.compat.v2 as tf\n'), ((9177, 9199), 'tensorflow.compat.v2.stack', 'tf.stack', (['rot'], {'axis': '(-1)'}), '(rot, axis=-1)\n', (9185, 9199), True, 'import tensorflow.compat.v2 as tf\n'), ((9208, 9230), 'tensorflow.compat.v2.reshape', 'tf.reshape', (['rot', 'shape'], {}), '(rot, shape)\n', (9218, 9230), True, 'import tensorflow.compat.v2 as tf\n'), ((9788, 9852), 'tensorflow.compat.v2.stack', 'tf.stack', (['[r00, r01, r02, r10, r11, r12, r20, r21, r22]'], {'axis': '(-1)'}), '([r00, r01, r02, r10, r11, r12, r20, r21, r22], axis=-1)\n', (9796, 9852), True, 'import tensorflow.compat.v2 as tf\n'), ((9904, 9930), 'tensorflow.compat.v2.reshape', 'tf.reshape', (['matrix', '[3, 3]'], {}), '(matrix, [3, 3])\n', (9914, 9930), True, 'import tensorflow.compat.v2 as tf\n'), ((10000, 10045), 'tensorflow.compat.v2.random.normal', 'tf.random.normal', ([], {'shape': '[4]', 'dtype': 'tf.float32'}), '(shape=[4], dtype=tf.float32)\n', (10016, 10045), True, 'import tensorflow.compat.v2 as tf\n'), ((10077, 10135), 'tensorflow.compat.v2.linalg.norm', 'tf.linalg.norm', (['random_quaternions'], {'axis': '(-1)', 'keepdims': '(True)'}), '(random_quaternions, axis=-1, keepdims=True)\n', (10091, 10135), True, 'import tensorflow.compat.v2 as tf\n'), ((10268, 10331), 'tensorflow.compat.v2.random.uniform', 'tf.random.uniform', ([], {'shape': '()', 'minval': '(0)', 'maxval': '(2)', 'dtype': 'tf.int32'}), '(shape=(), minval=0, maxval=2, dtype=tf.int32)\n', (10285, 10331), True, 'import tensorflow.compat.v2 as tf\n'), ((10356, 10395), 'tensorflow.compat.v2.cast', 'tf.cast', (['(2 * random_0_1 - 1)', 'tf.float32'], {}), '(2 * random_0_1 - 1, tf.float32)\n', (10363, 10395), True, 'import tensorflow.compat.v2 as tf\n'), ((4990, 5040), 'tensorflow.compat.v2.matmul', 'tf.matmul', (['rotation_matrix', 'random_mirror_symmetry'], {}), '(rotation_matrix, random_mirror_symmetry)\n', (4999, 5040), True, 'import tensorflow.compat.v2 as tf\n'), ((7908, 8017), 'rdkit.Chem.AllChem.MMFFGetMoleculeForceField', 'AllChem.MMFFGetMoleculeForceField', (['mol', 'molecule_props'], {'confId': 'conf_id', 'ignoreInterfragInteractions': '(False)'}), '(mol, molecule_props, confId=conf_id,\n ignoreInterfragInteractions=False)\n', (7941, 8017), False, 'from rdkit.Chem import AllChem\n'), ((8621, 8674), 'rdkit.Chem.AllChem.UFFGetMoleculeForceField', 'AllChem.UFFGetMoleculeForceField', (['mol'], {'confId': 'conf_id'}), '(mol, confId=conf_id)\n', (8653, 8674), False, 'from rdkit.Chem import AllChem\n'), ((4013, 4098), 'absl.logging.exception', 'logging.exception', (['"""Failed to generate conformers for %s . IOError %s."""', 'smile', 'e'], {}), "('Failed to generate conformers for %s . IOError %s.',\n smile, e)\n", (4030, 4098), False, 'from absl import logging\n'), ((4170, 4243), 'absl.logging.error', 'logging.error', (['"""Failed to generate conformers for %s . ValueError"""', 'smile'], {}), "('Failed to generate conformers for %s . ValueError', smile)\n", (4183, 4243), False, 'from absl import logging\n'), ((4317, 4378), 'absl.logging.error', 'logging.error', (['"""Failed to generate conformers for %s."""', 'smile'], {}), "('Failed to generate conformers for %s.', smile)\n", (4330, 4378), False, 'from absl import logging\n')]
|
"""Module for handling plotting functions
This module contains plotting classes to plot :class:`.Binning` objects.
Examples
--------
::
plt = plotting.get_plotter(binning)
plt.plot_values()
plt.savefig('output.png')
"""
from itertools import cycle
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from . import binning
def get_plotter(obj, *args, **kwargs):
"""Return a suitable plotting class instance for the object.
Parameters
----------
obj : object
The object for which a plotter should be returned.
*args : optional
**kwargs : optional
Additional arguments are passed to the init method of the plotter.
"""
if isinstance(obj, binning.RectilinearBinning):
return RectilinearBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.LinearBinning):
return LinearBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.CartesianProductBinning):
return CartesianProductBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.Binning):
return BinningPlotter(obj, *args, **kwargs)
if isinstance(obj, np.ndarray):
return ArrayPlotter(obj, *args, **kwargs)
raise TypeError(f"No known Plotter class for type {type(obj)}")
class Plotter:
"""Plotting base class.
Parameters
----------
figax : (Figure, Axes), optional
The figure and axis to plot in.
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
"""
def __init__(self, figax=None):
self.figax = figax
self.color = cycle("C%d" % (i,) for i in range(0, 10))
self.hatch = cycle([r"//", r"\\", r"O", "*"])
def __del__(self):
"""Clean up figures."""
if self.figax is not None:
plt.close(self.figax[0])
def subplots(self, *args, **kwargs):
"""Return the ``(Figure, Axes)`` tuple of the binning.
Creates one using Matplotlib's ``subplots``, if necessary.
"""
if self.figax is None:
self.figax = plt.subplots(*args, **kwargs)
return self.figax
def savefig(self, *args, **kwargs):
"""Save the figure."""
kwargs2 = {"bbox_inches": "tight"}
kwargs2.update(kwargs)
self.figax[0].savefig(*args, **kwargs2)
class ArrayPlotter(Plotter):
"""Plotting class for numpy arrays.
Parameters
----------
array : ndarray
The ndarray to be plotted.
bins_per_row : int, optional
How many bins are going to be plotted per row.
**kwargs : optional
Addittional keyword arguments are passed to :class:`Plotter`.
See also
--------
Plotter
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
array : ndarray
The ndarray to be plotted.
bins_per_row : int, optional
How many bins are going to be plotted per row.
"""
def __init__(self, array, bins_per_row=25, **kwargs):
self.array = array
self.bins_per_row = bins_per_row
Plotter.__init__(self, **kwargs)
def _get_array(self, array):
if array is None:
array = self.array
else:
array = np.asarray(array)
if array.shape != self.array.shape:
raise TypeError("Array must be of equal shape as the initial one.")
return array
def _get_arrays(self, arrays):
try:
ret = [self._get_array(a) for a in arrays]
except (TypeError, IndexError):
ret = [self._get_array(arrays)]
return np.array(ret)
def get_bin_edges(self, i_min, i_max):
"""Get the bin edges corresponding to bins i_min to i_max."""
x = np.arange(i_min, i_max)
return np.append(x - 0.5, x[-1] + 0.5) # Bins centred on integers
def get_axis_label(self):
"""Return the default label for the axis."""
return "Bin #"
@staticmethod
def _get_stack_functions(stack_function):
try:
# A number?
np.isfinite(stack_function)
except TypeError:
# Nope
pass
else:
# A number.
lobound = (1.0 - stack_function) / 2.0
hibound = 1.0 - lobound
def lower(x, axis=0, bound=lobound):
return np.quantile(x, bound, axis=axis)
def upper(x, axis=0, bound=hibound):
return np.quantile(x, bound, axis=axis)
return lower, upper
# No number
try:
# Tuple of functions?
lower, upper = stack_function
except TypeError:
# Nope
def lower(x, axis=0):
return np.sum(np.zeros_like(x), axis=axis)
upper = stack_function
return lower, upper
def plot_array(
self,
array=None,
density=False,
stack_function=np.mean,
margin_function=None,
**kwargs,
):
"""Plot an array.
Parameters
----------
array : ndarray
The thing to plot.
density : bool, optional
Divide the data by the relative bin width: ``width / total_plot_range``.
stack_function : float or function or (lower_function, function)
How to deal with multiple arrays.
When `float`, plot the respective quantile as equal-tailed interval.
When `function`, apply this function to the stack after marginalisation.
When `(function, function)`, use these functions to calculate lower and
upper bounds of the area to be plotted respectively.
Functions must accept ``axis`` keyword argument.
"""
# The `margin_function` parameter is only here so it can be
# safely used with all plotting methods
arrays = self._get_arrays(array)
lower, upper = self._get_stack_functions(stack_function)
bins_per_row = self.bins_per_row
if bins_per_row >= 1:
n_rows = int(np.ceil(arrays.shape[-1] / bins_per_row))
else:
n_rows = 1
bins_per_row = arrays.shape[-1]
figax = self.subplots(
nrows=n_rows,
sharey=True,
figsize=(6.4, max(2.4 * n_rows, 4.8)),
squeeze=False,
)
color = kwargs.get("color", next(self.color))
hatch = kwargs.get("hatch", next(self.hatch))
for i, ax in enumerate(figax[1][:, 0]):
i_min = i * bins_per_row
i_max = min((i + 1) * bins_per_row, arrays.shape[-1])
y_hi = np.asfarray(upper(arrays[:, i_min:i_max], axis=0))
y_lo = np.asfarray(lower(arrays[:, i_min:i_max], axis=0))
bins = np.asfarray(self.get_bin_edges(i_min, i_max))
# Divide by relative bin widths
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
y_hi /= np.asfarray(rel_widths)
y_lo /= np.asfarray(rel_widths)
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
y_lo = np.append(y_lo, y_lo[-1])
y_hi = np.append(y_hi, y_hi[-1])
poly = ax.fill_between(bins, y_hi, y_lo, **args)
# Add sticky y edge so histograms get plotted more beautifully
poly.sticky_edges.y.append(np.min(y_lo))
ax.autoscale_view()
ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax.set_xlabel(self.get_axis_label())
def legend(self, **kwargs):
"""Draw a legend in the first axis."""
args = {
"loc": "best",
}
args.update(kwargs)
self.figax[1][0, 0].legend(**args)
class BinningPlotter(ArrayPlotter):
"""Plotting class for the simplest :class:`.Binning` class.
Parameters
----------
binning : Binning
The binning to be plotted.
marginalize_subbinnings : bool, optional
Plot the contents of subbinnings as a single bin.
**kwargs : optional
Addittional keyword arguments are passed to :class:`ArrayPlotter`.
See also
--------
ArrayPlotter
.Binning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : Binning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
"""
def __init__(self, binning, marginalize_subbinnings=False, **kwargs):
self.binning = binning
self.marginalize_subbinnings = marginalize_subbinnings
array = self.binning.value_array
if marginalize_subbinnings:
array = self.binning.marginalize_subbinnings_on_ndarray(array)
ArrayPlotter.__init__(self, array, **kwargs)
def _get_array(self, array):
if array is None:
array = self.array
else:
array = np.asarray(array)
# Marginalize subbinnings if necessary
if self.marginalize_subbinnings and array.shape != self.array.shape:
array = self.binning.marginalize_subbinnings_on_ndarray(array)
if array.shape != self.array.shape:
raise TypeError("Array must be of equal shape as the initial one.")
return array
def _get_binning(self, binning):
if binning is None:
binning = self.binning
return binning
def plot_values(self, binning=None, **kwargs):
"""Plot the values of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.value_array, **kwargs)
def plot_entries(self, binning=None, **kwargs):
"""Plot the entries of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.entries_array, **kwargs)
def plot_sumw2(self, binning=None, **kwargs):
"""Plot the sumw2 of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.sumw2_array, **kwargs)
class CartesianProductBinningPlotter(BinningPlotter):
"""Plotting class for :class:`.CartesianProductBinning`
Parameters
----------
binning : CartesianProductBinning
The binning to be plottet
x_axis_binnings : list of int, optional
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int, optional
The indices of binnings to be plotted on the y-axis.
**kwargs : optional
Additional keyword arguments are passed to :class:`BinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
BinningPlotter
.CartesianProductBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : CartesianProductBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
x_axis_binnings : list of int
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int
The indices of binnings to be plotted on the y-axis.
"""
def __init__(self, binning, x_axis_binnings=None, y_axis_binnings=None, **kwargs):
if x_axis_binnings is None:
x_axis_binnings = list(range(int(np.ceil(len(binning.binnings) / 2.0))))
self.x_axis_binnings = x_axis_binnings
if y_axis_binnings is None:
y_axis_binnings = list(
range(int(np.ceil(len(binning.binnings) / 2.0)), len(binning.binnings))
)
self.y_axis_binnings = y_axis_binnings
kwargs["marginalize_subbinnings"] = True
kwargs["bins_per_row"] = -1
BinningPlotter.__init__(self, binning, **kwargs)
def get_bin_edges(self, i_min, i_max, j_binning):
"""Get the bin edges corresponding to bins i_min to i_max."""
x = np.arange(i_min, i_max)
return np.append(x - 0.5, x[-1] + 0.5) # Bins centred on integers
def get_axis_label(self, j_binning):
"""Return the default label for the axis."""
return "Binning %d Bin #" % (j_binning,)
def plot_array(
self,
array=None,
density=True,
stack_function=np.mean,
margin_function=np.sum,
scatter=-1,
**kwargs,
):
"""Plot an array.
Parameters
----------
array : ndarray, optional
The data to be plotted.
density : bool, optional
Divide the data by the relative bin width: ``width / total_plot_range``.
Dividing by the relative bin width, rather than the bin width directly,
ensures that the maximum values in all 1D projections are comparable.
stack_function : float or function or (lower_function, function)
How to deal with multiple arrays.
When `float`, plot the respective quantile as equal-tailed interval.
When `function`, apply this function to the stack after marginalisation.
When `(function, function)`, use these functions to calculate lower and
upper bounds of the area to be plotted respectively.
Functions must accept ``axis`` keyword argument.
margin_function : function, optional
The function used to marginalize the data.
scatter : int, optional
Use a pseudo scatter plot with `scatter` number of points instead
of a 2D histogram. Allows to draw multiple sets of 2D data in the
same plot. The number of points in each cell is proportional to
the value being plotted. Using the `scatter` option is thus
implicitly replicating the behaviour of the `density` option for
the 2D plots. The `density` argument has no effect on the scatter
plots.
"""
arrays = self._get_arrays(array)
lower, upper = self._get_stack_functions(stack_function)
shape = self.binning.bins_shape
arrays = arrays.reshape(arrays.shape[:1] + shape)
n_col = len(self.x_axis_binnings) + 1 # "+1" for the 1D projections
n_row = len(self.y_axis_binnings) + 1
# Widths and heights according to number of bins,
# 10 px (= 0.1") per bin
widths = [
0.1 * self.binning.binnings[i].data_size for i in self.x_axis_binnings
]
heights = [
0.1 * self.binning.binnings[i].data_size for i in self.y_axis_binnings
]
# Axes are counted top to bottom, but we want binnings bottom to top
heights.reverse()
# Total figure size
total_width = np.sum(widths)
total_height = np.sum(heights)
scale = 4.0 / min(max(total_width, total_height), 4.0)
# Room for the 1D histograms
if total_width == 0.0:
widths.append(6 / scale)
else:
widths.append(1.5 / scale)
if total_height == 0.0:
heights.insert(0, 4 / scale)
else:
heights.insert(0, 1.5 / scale)
# Update total sizes
total_width = np.sum(widths)
total_height = np.sum(heights)
fig_x = total_width * scale
fig_y = total_height * scale
# Subplot spacing is specified as multiple of average axis size
# We want it to be relative to the 1D projections
wspace = 0.1 * widths[-1] / (total_width / len(widths))
hspace = 0.1 * heights[0] / (total_height / len(heights))
figax = self.subplots(
nrows=n_row,
ncols=n_col,
sharex="col",
sharey="row",
figsize=(fig_x, fig_y),
gridspec_kw={
"width_ratios": widths,
"height_ratios": heights,
"wspace": wspace,
"hspace": hspace,
},
squeeze=False,
)
color = kwargs.get("color", next(self.color))
hatch = kwargs.get("hatch", next(self.hatch))
# 2D histograms
for x, i in enumerate(self.x_axis_binnings):
for y, j in enumerate(self.y_axis_binnings):
# Get axis to plot in
ax = figax[1][-y - 1, x] # rows are counted top to bottom
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
for k in sorted((i, j), reverse=True):
del axis[k]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# 2D plots only show upper limit of stack
data = upper(data, axis=0)
# Flip axes if necessary
if i < j:
data = data.T
# Bin edges
x_edg = self.get_bin_edges(0, data.shape[1], i)
y_edg = self.get_bin_edges(0, data.shape[0], j)
# Plot the data
if scatter >= 0:
# Draw a set of random points and plot these
# Get bin numbers
csum = np.asfarray(data.cumsum())
csum /= np.max(csum)
indices = np.digitize(np.random.uniform(size=scatter), csum)
# Get x and y bin numbers
x_indices = indices % data.shape[1]
y_indices = indices // data.shape[1]
# Throw X and Y for each event
x = []
y = []
for ix, iy in zip(x_indices, y_indices):
x_min = x_edg[ix]
x_max = x_edg[ix + 1]
y_min = y_edg[iy]
y_max = y_edg[iy + 1]
x.append(np.random.uniform(x_min, x_max))
y.append(np.random.uniform(y_min, y_max))
# Plot the points
if data.sum() > 0:
# Only actually draw something if we have some events
ax.scatter(x, y, 1, color=color, marker=",")
else:
# Plot a regular 2D histogram
# Bin centres
x = np.convolve(x_edg, np.ones(2) / 2, mode="valid")
y = np.convolve(y_edg, np.ones(2) / 2, mode="valid")
xx = np.broadcast_to(x, (len(y), len(x))).flatten()
yy = np.repeat(y, len(x))
# Plot it
if data.sum() == 0:
# Empty data messes with the normalisation
data.fill(0.001)
ax.hist2d(
xx, yy, weights=data.flat, bins=(x_edg, y_edg), density=density
)
# 1D vertical histograms
for x, i in enumerate(self.x_axis_binnings):
# Get axis to plot in
ax = figax[1][0, x]
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
del axis[i]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# Upper and lower limit of area
data_hi = upper(data, axis=0)
data_lo = lower(data, axis=0)
# Divide by relative bin widths
bins = np.asfarray(self.get_bin_edges(0, data.shape[1], i))
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
data_hi /= rel_widths
data_lo /= rel_widths
# Plot the data
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
data_lo = np.append(data_lo, data_lo[-1])
data_hi = np.append(data_hi, data_hi[-1])
poly = ax.fill_between(bins, data_hi, data_lo, **args)
# Add sticky y edge so histograms get plotted more beautifully
poly.sticky_edges.y.append(np.min(data_lo))
ax.autoscale_view()
# Only int tick label
ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
# Add labels at the appropriate axes
ax = figax[1][-1, x]
ax.set_xlabel(self.get_axis_label(i))
# 1D horizontal histograms
for y, i in enumerate(self.y_axis_binnings):
# Get axis to plot in
ax = figax[1][-y - 1, -1] # Rows are counted top to bottom
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
del axis[i]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# Upper and lower limit of area
data_hi = upper(data, axis=0)
data_lo = lower(data, axis=0)
# Divide by relative bin widths
bins = np.asfarray(self.get_bin_edges(0, data.shape[1], i))
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
data_hi /= rel_widths
data_lo /= rel_widths
# Plot the data
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
data_lo = np.append(data_lo, data_lo[-1])
data_hi = np.append(data_hi, data_hi[-1])
poly = ax.fill_betweenx(bins, data_hi, data_lo, **args)
# Add sticky x edge so histograms get plotted more beautifully
poly.sticky_edges.x.append(np.min(data_lo))
ax.autoscale_view()
# Only int tick label
ax.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
# Add labels at the appropriate axes
ax = figax[1][-y - 1, 0] # Rows are counted top to bottom
ax.set_ylabel(self.get_axis_label(i))
# Hide empty axes
figax[1][0, -1].set_axis_off()
def legend(self, **kwargs):
"""Draw a legend in the upper right corner of the plot."""
handles, labels = self.figax[1][0, 0].get_legend_handles_labels()
args = {
"loc": "center",
"borderaxespad": 0.0,
"frameon": False,
}
args.update(kwargs)
self.figax[1][0, -1].legend(handles, labels, **args)
class LinearBinningPlotter(BinningPlotter):
"""Plotting class for :class:`.LinearBinning`
Parameters
----------
binning : LinearBinning
The binning to be plottet
**kwargs : optional
Additional keyword arguments are passed to :class:`BinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
BinningPlotter
.LinearBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : LinearBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
"""
def __init__(self, binning, **kwargs):
kwargs["marginalize_subbinnings"] = True
args = {
"bins_per_row": -1,
}
args.update(kwargs)
BinningPlotter.__init__(self, binning, **args)
def plot_array(self, *args, **kwargs):
"""Plot an array.
See :meth:`ArrayPlotter.plot_array`.
"""
# Change default behaviour of `density`
kwargs["density"] = kwargs.get("density", True)
return ArrayPlotter.plot_array(self, *args, **kwargs)
def get_bin_edges(self, i_min, i_max):
"""Get the finite bin edges."""
bins = self.binning.bin_edges[i_min : i_max + 1]
ret = list(bins)
if not np.isfinite(ret[0]):
if len(ret) >= 3 and np.isfinite(ret[2]):
ret[0] = ret[1] - (ret[2] - ret[1])
elif np.isfinite(ret[1]):
ret[0] = ret[1] - 1
else:
ret[0] = -0.5
if not np.isfinite(ret[-1]):
if len(ret) >= 3 and np.isfinite(ret[-3]):
ret[-1] = ret[-2] + (ret[-2] - ret[-3])
else:
ret[-1] = ret[-2] + 1
return np.array(ret)
def get_axis_label(self):
"""Return variable name."""
return self.binning.variable
class RectilinearBinningPlotter(CartesianProductBinningPlotter):
"""Plotting class for :class:`.RectilinearBinning`
Parameters
----------
binning : RectilinearBinning
The binning to be plottet
x_axis_binnings : list of int/str, optional
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int/str, optional
The indices of binnings to be plotted on the y-axis.
**kwargs : optional
Additional keyword arguments are passed to :class:`CartesianProductBinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
CartesianProductBinningPlotter
.RectilinearBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : RectilinearBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
x_axis_binnings : list of int or str
The indices or variable names of to be plotted on the x-axis.
y_axis_binnings : list of int or str
The indices or variable names to be plotted on the y-axis.
"""
def __init__(self, binning, x_axis_binnings=None, y_axis_binnings=None, **kwargs):
if x_axis_binnings is None:
x_axis_binnings = list(range(int(np.ceil(len(binning.binnings) / 2.0))))
else:
x_axis_binnings = map(binning.get_variable_index, x_axis_binnings)
if y_axis_binnings is None:
y_axis_binnings = list(
range(int(np.ceil(len(binning.binnings) / 2.0)), len(binning.binnings))
)
else:
y_axis_binnings = map(binning.get_variable_index, y_axis_binnings)
kwargs["x_axis_binnings"] = x_axis_binnings
kwargs["y_axis_binnings"] = y_axis_binnings
kwargs["marginalize_subbinnings"] = True
kwargs["bins_per_row"] = -1
CartesianProductBinningPlotter.__init__(self, binning, **kwargs)
def get_bin_edges(self, i_min, i_max, j_binning):
"""Get the finite bin edges."""
bins = self.binning.binnings[j_binning].bin_edges[i_min : i_max + 1]
ret = list(bins)
if not np.isfinite(ret[0]):
if len(ret) >= 3 and np.isfinite(ret[2]):
ret[0] = ret[1] - (ret[2] - ret[1])
elif np.isfinite(ret[1]):
ret[0] = ret[1] - 1
else:
ret[0] = -0.5
if not np.isfinite(ret[-1]):
if len(ret) >= 3 and np.isfinite(ret[-3]):
ret[-1] = ret[-2] + (ret[-2] - ret[-3])
else:
ret[-1] = ret[-2] + 1
return np.array(ret)
def get_axis_label(self, j_binning):
"""Return variable name."""
return self.binning.binnings[j_binning].variable
|
[
"numpy.random.uniform",
"numpy.quantile",
"numpy.sum",
"numpy.ceil",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"numpy.asarray",
"numpy.asfarray",
"matplotlib.ticker.MaxNLocator",
"numpy.isfinite",
"numpy.ones",
"numpy.append",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.arange",
"itertools.cycle",
"matplotlib.pyplot.subplots"
] |
[((1942, 1973), 'itertools.cycle', 'cycle', (["['//', '\\\\\\\\', 'O', '*']"], {}), "(['//', '\\\\\\\\', 'O', '*'])\n", (1947, 1973), False, 'from itertools import cycle\n'), ((4113, 4126), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (4121, 4126), True, 'import numpy as np\n'), ((4253, 4276), 'numpy.arange', 'np.arange', (['i_min', 'i_max'], {}), '(i_min, i_max)\n', (4262, 4276), True, 'import numpy as np\n'), ((4292, 4323), 'numpy.append', 'np.append', (['(x - 0.5)', '(x[-1] + 0.5)'], {}), '(x - 0.5, x[-1] + 0.5)\n', (4301, 4323), True, 'import numpy as np\n'), ((13183, 13206), 'numpy.arange', 'np.arange', (['i_min', 'i_max'], {}), '(i_min, i_max)\n', (13192, 13206), True, 'import numpy as np\n'), ((13222, 13253), 'numpy.append', 'np.append', (['(x - 0.5)', '(x[-1] + 0.5)'], {}), '(x - 0.5, x[-1] + 0.5)\n', (13231, 13253), True, 'import numpy as np\n'), ((15951, 15965), 'numpy.sum', 'np.sum', (['widths'], {}), '(widths)\n', (15957, 15965), True, 'import numpy as np\n'), ((15989, 16004), 'numpy.sum', 'np.sum', (['heights'], {}), '(heights)\n', (15995, 16004), True, 'import numpy as np\n'), ((16409, 16423), 'numpy.sum', 'np.sum', (['widths'], {}), '(widths)\n', (16415, 16423), True, 'import numpy as np\n'), ((16447, 16462), 'numpy.sum', 'np.sum', (['heights'], {}), '(heights)\n', (16453, 16462), True, 'import numpy as np\n'), ((26238, 26251), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (26246, 26251), True, 'import numpy as np\n'), ((29330, 29343), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (29338, 29343), True, 'import numpy as np\n'), ((2078, 2102), 'matplotlib.pyplot.close', 'plt.close', (['self.figax[0]'], {}), '(self.figax[0])\n', (2087, 2102), True, 'from matplotlib import pyplot as plt\n'), ((2346, 2375), 'matplotlib.pyplot.subplots', 'plt.subplots', (['*args'], {}), '(*args, **kwargs)\n', (2358, 2375), True, 'from matplotlib import pyplot as plt\n'), ((3739, 3756), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (3749, 3756), True, 'import numpy as np\n'), ((4573, 4600), 'numpy.isfinite', 'np.isfinite', (['stack_function'], {}), '(stack_function)\n', (4584, 4600), True, 'import numpy as np\n'), ((7842, 7867), 'numpy.append', 'np.append', (['y_lo', 'y_lo[-1]'], {}), '(y_lo, y_lo[-1])\n', (7851, 7867), True, 'import numpy as np\n'), ((7887, 7912), 'numpy.append', 'np.append', (['y_hi', 'y_hi[-1]'], {}), '(y_hi, y_hi[-1])\n', (7896, 7912), True, 'import numpy as np\n'), ((9924, 9941), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (9934, 9941), True, 'import numpy as np\n'), ((21306, 21337), 'numpy.append', 'np.append', (['data_lo', 'data_lo[-1]'], {}), '(data_lo, data_lo[-1])\n', (21315, 21337), True, 'import numpy as np\n'), ((21360, 21391), 'numpy.append', 'np.append', (['data_hi', 'data_hi[-1]'], {}), '(data_hi, data_hi[-1])\n', (21369, 21391), True, 'import numpy as np\n'), ((23054, 23085), 'numpy.append', 'np.append', (['data_lo', 'data_lo[-1]'], {}), '(data_lo, data_lo[-1])\n', (23063, 23085), True, 'import numpy as np\n'), ((23108, 23139), 'numpy.append', 'np.append', (['data_hi', 'data_hi[-1]'], {}), '(data_hi, data_hi[-1])\n', (23117, 23139), True, 'import numpy as np\n'), ((25769, 25788), 'numpy.isfinite', 'np.isfinite', (['ret[0]'], {}), '(ret[0])\n', (25780, 25788), True, 'import numpy as np\n'), ((26033, 26053), 'numpy.isfinite', 'np.isfinite', (['ret[-1]'], {}), '(ret[-1])\n', (26044, 26053), True, 'import numpy as np\n'), ((28861, 28880), 'numpy.isfinite', 'np.isfinite', (['ret[0]'], {}), '(ret[0])\n', (28872, 28880), True, 'import numpy as np\n'), ((29125, 29145), 'numpy.isfinite', 'np.isfinite', (['ret[-1]'], {}), '(ret[-1])\n', (29136, 29145), True, 'import numpy as np\n'), ((4861, 4893), 'numpy.quantile', 'np.quantile', (['x', 'bound'], {'axis': 'axis'}), '(x, bound, axis=axis)\n', (4872, 4893), True, 'import numpy as np\n'), ((4967, 4999), 'numpy.quantile', 'np.quantile', (['x', 'bound'], {'axis': 'axis'}), '(x, bound, axis=axis)\n', (4978, 4999), True, 'import numpy as np\n'), ((6578, 6618), 'numpy.ceil', 'np.ceil', (['(arrays.shape[-1] / bins_per_row)'], {}), '(arrays.shape[-1] / bins_per_row)\n', (6585, 6618), True, 'import numpy as np\n'), ((7546, 7569), 'numpy.asfarray', 'np.asfarray', (['rel_widths'], {}), '(rel_widths)\n', (7557, 7569), True, 'import numpy as np\n'), ((7594, 7617), 'numpy.asfarray', 'np.asfarray', (['rel_widths'], {}), '(rel_widths)\n', (7605, 7617), True, 'import numpy as np\n'), ((8089, 8101), 'numpy.min', 'np.min', (['y_lo'], {}), '(y_lo)\n', (8095, 8101), True, 'import numpy as np\n'), ((8181, 8213), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (8199, 8213), False, 'from matplotlib import ticker\n'), ((21574, 21589), 'numpy.min', 'np.min', (['data_lo'], {}), '(data_lo)\n', (21580, 21589), True, 'import numpy as np\n'), ((21703, 21735), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (21721, 21735), False, 'from matplotlib import ticker\n'), ((23323, 23338), 'numpy.min', 'np.min', (['data_lo'], {}), '(data_lo)\n', (23329, 23338), True, 'import numpy as np\n'), ((23452, 23484), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (23470, 23484), False, 'from matplotlib import ticker\n'), ((25823, 25842), 'numpy.isfinite', 'np.isfinite', (['ret[2]'], {}), '(ret[2])\n', (25834, 25842), True, 'import numpy as np\n'), ((25913, 25932), 'numpy.isfinite', 'np.isfinite', (['ret[1]'], {}), '(ret[1])\n', (25924, 25932), True, 'import numpy as np\n'), ((26088, 26108), 'numpy.isfinite', 'np.isfinite', (['ret[-3]'], {}), '(ret[-3])\n', (26099, 26108), True, 'import numpy as np\n'), ((28915, 28934), 'numpy.isfinite', 'np.isfinite', (['ret[2]'], {}), '(ret[2])\n', (28926, 28934), True, 'import numpy as np\n'), ((29005, 29024), 'numpy.isfinite', 'np.isfinite', (['ret[1]'], {}), '(ret[1])\n', (29016, 29024), True, 'import numpy as np\n'), ((29180, 29200), 'numpy.isfinite', 'np.isfinite', (['ret[-3]'], {}), '(ret[-3])\n', (29191, 29200), True, 'import numpy as np\n'), ((18518, 18530), 'numpy.max', 'np.max', (['csum'], {}), '(csum)\n', (18524, 18530), True, 'import numpy as np\n'), ((5252, 5268), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5265, 5268), True, 'import numpy as np\n'), ((18573, 18604), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'scatter'}), '(size=scatter)\n', (18590, 18604), True, 'import numpy as np\n'), ((19148, 19179), 'numpy.random.uniform', 'np.random.uniform', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (19165, 19179), True, 'import numpy as np\n'), ((19214, 19245), 'numpy.random.uniform', 'np.random.uniform', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (19231, 19245), True, 'import numpy as np\n'), ((19623, 19633), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (19630, 19633), True, 'import numpy as np\n'), ((19696, 19706), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (19703, 19706), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
import h5py #s a common package to interact with
# a dataset that is stored on an H5 file.
#from lr_utils import load_dataset
#load datasets
#Load lr_utils for loading train and testinng datasets
def load_dataset():
train_dataset = h5py.File('/Users/surat/PycharmProjects/start87/datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('/Users/surat/PycharmProjects/start87/datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# Example of a picture
index = 50
example = train_set_x_orig[index]
plt.imshow(train_set_x_orig[index])
plt.show()
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# Reshape the training and test examples #Reshape the training and test data sets so that images of size (num_px, num_px, 3)
# are flattened into single vectors of shape (num_px ∗∗ num_px ∗∗ 3, 1).
# A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b ∗∗ c ∗∗ d, a) is to use:
# X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], 64*64*3).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
#To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel
# value is actually a vector of three numbers ranging from 0 to 255.
# One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you
# substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation
# of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to
# just divide every row of the dataset by 255 (the maximum value of a pixel channel).Let's standardize our dataset.
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
print('number of train datasets =' + str(train_set_x.shape))
print('number of test datasets =' + str (test_set_x.shape))
#Key steps: -
# 1. Initialize the parameters of the model
# 2. Learn the parameters for the model by minimizing the cost
# 3. Use the learned parameters to make predictions (on the test set)
# 4. Analyse the results and conclude
#algorithm building:
# The main steps for building a Neural Network are:
# Define the model structure (such as number of input features)
# Initialize the model's parameters
# Loop:
# Calculate current loss (forward propagation)
# Calculate current gradient (backward propagation)
# Update parameters (gradient descent)
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1. / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
###initialize_with_zeros
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
w = np.zeros(shape=(dim, 1), dtype=np.float32)
b = 0
assert (w.shape == (dim, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
#forward and backward propagation
#Implement a function propagate() that computes the cost function and its gradient.
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
A = sigmoid(np.dot(w.T, X) + b) # compute activation
cost = (-1. / m) * np.sum((Y * np.log(A) + (1 - Y) * np.log(1 - A)), axis=1) # compute cost
# BACKWARD PROPAGATION (TO FIND GRAD)
dw = (1. / m) * np.dot(X, ((A - Y).T))
db = (1. / m) * np.sum(A - Y, axis=1)
assert (dw.shape == w.shape)
assert (db.dtype == float)
cost = np.squeeze(cost)
assert (cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
##OPTIMIZATION
# initialized your parameters.
# to compute a cost function and its gradient.
# update the parameters using gradient descent
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
grads, cost = propagate(w=w, b=b, X=X, Y=Y)
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
w = w - learning_rate * dw
b = b - learning_rate * db
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
##PREDICTION PART
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
A = sigmoid(np.dot(w.T, X) + b)
[print(x) for x in A]
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if A[0, i] >= 0.5:
Y_prediction[0, i] = 1
else:
Y_prediction[0, i] = 0
assert (Y_prediction.shape == (1, m))
return Y_prediction
print ("predictions = " + str(predict(w, b, X)))
## MARGE ALL FUNCTION INTO A MODEL
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y,
num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# Example of a picture that was wrongly classified.
index = 49
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
plt.show()
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \""
+ classes[int(d["Y_prediction_test"][0,index])].decode("utf-8")
+ "\" picture.")
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
|
[
"h5py.File",
"matplotlib.pyplot.show",
"numpy.dot",
"matplotlib.pyplot.plot",
"numpy.sum",
"numpy.log",
"matplotlib.pyplot.imshow",
"numpy.abs",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((1383, 1418), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_set_x_orig[index]'], {}), '(train_set_x_orig[index])\n', (1393, 1418), True, 'import matplotlib.pyplot as plt\n'), ((1420, 1430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1428, 1430), True, 'import matplotlib.pyplot as plt\n'), ((12747, 12757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12755, 12757), True, 'import matplotlib.pyplot as plt\n'), ((12983, 13005), 'numpy.squeeze', 'np.squeeze', (["d['costs']"], {}), "(d['costs'])\n", (12993, 13005), True, 'import numpy as np\n'), ((13007, 13022), 'matplotlib.pyplot.plot', 'plt.plot', (['costs'], {}), '(costs)\n', (13015, 13022), True, 'import matplotlib.pyplot as plt\n'), ((13024, 13042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (13034, 13042), True, 'import matplotlib.pyplot as plt\n'), ((13044, 13083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations (per hundreds)"""'], {}), "('iterations (per hundreds)')\n", (13054, 13083), True, 'import matplotlib.pyplot as plt\n'), ((13141, 13151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13149, 13151), True, 'import matplotlib.pyplot as plt\n'), ((335, 422), 'h5py.File', 'h5py.File', (['"""/Users/surat/PycharmProjects/start87/datasets/train_catvnoncat.h5"""', '"""r"""'], {}), "('/Users/surat/PycharmProjects/start87/datasets/train_catvnoncat.h5',\n 'r')\n", (344, 422), False, 'import h5py\n'), ((443, 484), 'numpy.array', 'np.array', (["train_dataset['train_set_x'][:]"], {}), "(train_dataset['train_set_x'][:])\n", (451, 484), True, 'import numpy as np\n'), ((536, 577), 'numpy.array', 'np.array', (["train_dataset['train_set_y'][:]"], {}), "(train_dataset['train_set_y'][:])\n", (544, 577), True, 'import numpy as np\n'), ((625, 711), 'h5py.File', 'h5py.File', (['"""/Users/surat/PycharmProjects/start87/datasets/test_catvnoncat.h5"""', '"""r"""'], {}), "('/Users/surat/PycharmProjects/start87/datasets/test_catvnoncat.h5',\n 'r')\n", (634, 711), False, 'import h5py\n'), ((731, 770), 'numpy.array', 'np.array', (["test_dataset['test_set_x'][:]"], {}), "(test_dataset['test_set_x'][:])\n", (739, 770), True, 'import numpy as np\n'), ((820, 859), 'numpy.array', 'np.array', (["test_dataset['test_set_y'][:]"], {}), "(test_dataset['test_set_y'][:])\n", (828, 859), True, 'import numpy as np\n'), ((901, 942), 'numpy.array', 'np.array', (["test_dataset['list_classes'][:]"], {}), "(test_dataset['list_classes'][:])\n", (909, 942), True, 'import numpy as np\n'), ((5344, 5386), 'numpy.zeros', 'np.zeros', ([], {'shape': '(dim, 1)', 'dtype': 'np.float32'}), '(shape=(dim, 1), dtype=np.float32)\n', (5352, 5386), True, 'import numpy as np\n'), ((6842, 6858), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (6852, 6858), True, 'import numpy as np\n'), ((6982, 7002), 'numpy.array', 'np.array', (['[[1], [2]]'], {}), '([[1], [2]])\n', (6990, 7002), True, 'import numpy as np\n'), ((7006, 7032), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (7014, 7032), True, 'import numpy as np\n'), ((7031, 7049), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (7039, 7049), True, 'import numpy as np\n'), ((9838, 9854), 'numpy.zeros', 'np.zeros', (['(1, m)'], {}), '((1, m))\n', (9846, 9854), True, 'import numpy as np\n'), ((6694, 6714), 'numpy.dot', 'np.dot', (['X', '(A - Y).T'], {}), '(X, (A - Y).T)\n', (6700, 6714), True, 'import numpy as np\n'), ((6738, 6759), 'numpy.sum', 'np.sum', (['(A - Y)'], {'axis': '(1)'}), '(A - Y, axis=1)\n', (6744, 6759), True, 'import numpy as np\n'), ((4779, 4789), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (4785, 4789), True, 'import numpy as np\n'), ((6482, 6496), 'numpy.dot', 'np.dot', (['w.T', 'X'], {}), '(w.T, X)\n', (6488, 6496), True, 'import numpy as np\n'), ((10003, 10017), 'numpy.dot', 'np.dot', (['w.T', 'X'], {}), '(w.T, X)\n', (10009, 10017), True, 'import numpy as np\n'), ((4877, 4893), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (4885, 4893), True, 'import numpy as np\n'), ((6560, 6569), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (6566, 6569), True, 'import numpy as np\n'), ((6582, 6595), 'numpy.log', 'np.log', (['(1 - A)'], {}), '(1 - A)\n', (6588, 6595), True, 'import numpy as np\n'), ((1500, 1533), 'numpy.squeeze', 'np.squeeze', (['train_set_y[:, index]'], {}), '(train_set_y[:, index])\n', (1510, 1533), True, 'import numpy as np\n'), ((12060, 12096), 'numpy.abs', 'np.abs', (['(Y_prediction_train - Y_train)'], {}), '(Y_prediction_train - Y_train)\n', (12066, 12096), True, 'import numpy as np\n'), ((12160, 12194), 'numpy.abs', 'np.abs', (['(Y_prediction_test - Y_test)'], {}), '(Y_prediction_test - Y_test)\n', (12166, 12194), True, 'import numpy as np\n')]
|
import os
import unittest
import torch
import numpy as np
from PIL import Image
from embryovision import util
from embryovision.tests.common import get_loadable_filenames
class TestReadImage(unittest.TestCase):
def test_read_image_returns_numpy(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertIsInstance(image, np.ndarray)
def test_read_image_returns_correct_shape(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertEqual(image.ndim, 3)
self.assertEqual(image.shape[2], 3)
def test_read_image_returns_float_on_01(self):
filename = get_loadable_filenames()[0]
image = util.read_image(filename)
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 1)
class TestReadImageForTorch(unittest.TestCase):
def test_read_image_for_torch_returns_torch(self):
filenames = get_loadable_filenames()
as_torch = util.read_images_for_torch(filenames)
self.assertIsInstance(as_torch, torch.Tensor)
def test_read_image_for_torch_returns_correct_shape(self):
# torch expects (n_images, channels, size
filenames = get_loadable_filenames()
as_torch = util.read_images_for_torch(filenames)
n_channels = 3
self.assertEqual(as_torch.size()[:2], (len(filenames), n_channels))
class TestLoadAndCropImage(unittest.TestCase):
def test_returns_pil_image(self):
filename = get_loadable_filenames()[0]
box = (1, 1, 2, 2)
image = util.load_and_crop_image(filename, box)
self.assertIsInstance(image, Image.Image)
def test_output_image_is_correct_shape(self):
filename = get_loadable_filenames()[0]
box = (1, 1, 100, 100)
shape = (150, 140)
image = util.load_and_crop_image(filename, box, output_shape=shape)
self.assertEqual(image.size, shape)
def test_crop_box_is_used_with_resize_nearest(self):
# we crop to a 1 px image, and check that all image values
# are the same value
filename = get_loadable_filenames()[0]
box = (1, 1, 2, 2)
image = util.load_and_crop_image(filename, box)
correct_px_value = np.array(Image.open(filename))[box[0], box[1]]
self.assertTrue(np.all(np.array(image) == correct_px_value))
class TestLoadImageIntoRam(unittest.TestCase):
def test_load_image_as_bytes_io(self):
filename = get_loadable_filenames()[0]
loaded_into_ram = util.load_image_into_ram(filename)
image0 = util.read_image(filename)
image1 = util.read_image(loaded_into_ram)
self.assertTrue(np.all(image0 == image1))
class TestTransformingCollection(unittest.TestCase):
def test_getitem_transforms(self):
np.random.seed(400)
data = np.random.randn(20)
transform = lambda x: -2 * x
loader = util.TransformingCollection(data, transform)
index = 0
self.assertEqual(transform(data[index]), loader[index])
def test_len(self):
data = np.random.randn(20)
transform = lambda x: -2 * x
loader = util.TransformingCollection(data, transform)
self.assertEqual(len(loader), data.size)
def test_on_images(self):
filenames = get_loadable_filenames()
images_ram = [util.load_image_into_ram(nm) for nm in filenames]
loader = util.TransformingCollection(images_ram, util.read_image)
index = 0
image_filename = util.read_image(filenames[index])
image_loader = loader[index]
self.assertTrue(np.all(image_filename == image_loader))
class TestMisc(unittest.TestCase):
def test_split_all(self):
dummy_folder = '/some/long/directory/structure/'
filename = 'D2017_05_05_S1477_I313_pdb/WELL06/F0/016.jpg'
fullname = os.path.join(dummy_folder, filename)
fullname_f0_split = util.split_all(fullname)
correct_answer = (
'/', 'some', 'long', 'directory', 'structure',
'D2017_05_05_S1477_I313_pdb', 'WELL06', 'F0', '016.jpg')
self.assertEqual(fullname_f0_split, correct_answer)
def test_augment_focus(self):
filename = get_loadable_filenames()[0]
augmented = util.augment_focus(filename)
for foundname, focus_correct in zip(augmented, ['F-15', 'F0', 'F15']):
*head, focus_found, image_number = util.split_all(foundname)
self.assertTrue(os.path.exists(foundname))
self.assertEqual(focus_found, focus_correct)
def test_augment_focus_raises_error_when_no_filename(self):
unloadable_filename = '/some/wrong/directory/structure/001.jpg'
assert not os.path.exists(unloadable_filename)
self.assertRaises(
FileNotFoundError,
util.augment_focus,
unloadable_filename,)
def make_loader():
filenames = get_loadable_filenames()
return util.ImageTransformingCollection(filenames)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"embryovision.util.augment_focus",
"numpy.random.seed",
"numpy.random.randn",
"embryovision.util.read_image",
"os.path.exists",
"embryovision.util.split_all",
"PIL.Image.open",
"embryovision.util.ImageTransformingCollection",
"embryovision.util.TransformingCollection",
"numpy.array",
"embryovision.util.read_images_for_torch",
"embryovision.util.load_and_crop_image",
"os.path.join",
"embryovision.util.load_image_into_ram",
"numpy.all",
"embryovision.tests.common.get_loadable_filenames"
] |
[((4960, 4984), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (4982, 4984), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((4996, 5039), 'embryovision.util.ImageTransformingCollection', 'util.ImageTransformingCollection', (['filenames'], {}), '(filenames)\n', (5028, 5039), False, 'from embryovision import util\n'), ((5073, 5088), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5086, 5088), False, 'import unittest\n'), ((323, 348), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (338, 348), False, 'from embryovision import util\n'), ((515, 540), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (530, 540), False, 'from embryovision import util\n'), ((740, 765), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (755, 765), False, 'from embryovision import util\n'), ((984, 1008), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1006, 1008), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1028, 1065), 'embryovision.util.read_images_for_torch', 'util.read_images_for_torch', (['filenames'], {}), '(filenames)\n', (1054, 1065), False, 'from embryovision import util\n'), ((1254, 1278), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1276, 1278), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1298, 1335), 'embryovision.util.read_images_for_torch', 'util.read_images_for_torch', (['filenames'], {}), '(filenames)\n', (1324, 1335), False, 'from embryovision import util\n'), ((1612, 1651), 'embryovision.util.load_and_crop_image', 'util.load_and_crop_image', (['filename', 'box'], {}), '(filename, box)\n', (1636, 1651), False, 'from embryovision import util\n'), ((1874, 1933), 'embryovision.util.load_and_crop_image', 'util.load_and_crop_image', (['filename', 'box'], {'output_shape': 'shape'}), '(filename, box, output_shape=shape)\n', (1898, 1933), False, 'from embryovision import util\n'), ((2222, 2261), 'embryovision.util.load_and_crop_image', 'util.load_and_crop_image', (['filename', 'box'], {}), '(filename, box)\n', (2246, 2261), False, 'from embryovision import util\n'), ((2571, 2605), 'embryovision.util.load_image_into_ram', 'util.load_image_into_ram', (['filename'], {}), '(filename)\n', (2595, 2605), False, 'from embryovision import util\n'), ((2623, 2648), 'embryovision.util.read_image', 'util.read_image', (['filename'], {}), '(filename)\n', (2638, 2648), False, 'from embryovision import util\n'), ((2666, 2698), 'embryovision.util.read_image', 'util.read_image', (['loaded_into_ram'], {}), '(loaded_into_ram)\n', (2681, 2698), False, 'from embryovision import util\n'), ((2851, 2870), 'numpy.random.seed', 'np.random.seed', (['(400)'], {}), '(400)\n', (2865, 2870), True, 'import numpy as np\n'), ((2886, 2905), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (2901, 2905), True, 'import numpy as np\n'), ((2960, 3004), 'embryovision.util.TransformingCollection', 'util.TransformingCollection', (['data', 'transform'], {}), '(data, transform)\n', (2987, 3004), False, 'from embryovision import util\n'), ((3128, 3147), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (3143, 3147), True, 'import numpy as np\n'), ((3202, 3246), 'embryovision.util.TransformingCollection', 'util.TransformingCollection', (['data', 'transform'], {}), '(data, transform)\n', (3229, 3246), False, 'from embryovision import util\n'), ((3347, 3371), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (3369, 3371), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((3461, 3517), 'embryovision.util.TransformingCollection', 'util.TransformingCollection', (['images_ram', 'util.read_image'], {}), '(images_ram, util.read_image)\n', (3488, 3517), False, 'from embryovision import util\n'), ((3562, 3595), 'embryovision.util.read_image', 'util.read_image', (['filenames[index]'], {}), '(filenames[index])\n', (3577, 3595), False, 'from embryovision import util\n'), ((3906, 3942), 'os.path.join', 'os.path.join', (['dummy_folder', 'filename'], {}), '(dummy_folder, filename)\n', (3918, 3942), False, 'import os\n'), ((3972, 3996), 'embryovision.util.split_all', 'util.split_all', (['fullname'], {}), '(fullname)\n', (3986, 3996), False, 'from embryovision import util\n'), ((4314, 4342), 'embryovision.util.augment_focus', 'util.augment_focus', (['filename'], {}), '(filename)\n', (4332, 4342), False, 'from embryovision import util\n'), ((279, 303), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (301, 303), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((471, 495), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (493, 495), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((696, 720), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (718, 720), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1541, 1565), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1563, 1565), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((1772, 1796), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (1794, 1796), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((2151, 2175), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (2173, 2175), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((2517, 2541), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (2539, 2541), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((2723, 2747), 'numpy.all', 'np.all', (['(image0 == image1)'], {}), '(image0 == image1)\n', (2729, 2747), True, 'import numpy as np\n'), ((3394, 3422), 'embryovision.util.load_image_into_ram', 'util.load_image_into_ram', (['nm'], {}), '(nm)\n', (3418, 3422), False, 'from embryovision import util\n'), ((3657, 3695), 'numpy.all', 'np.all', (['(image_filename == image_loader)'], {}), '(image_filename == image_loader)\n', (3663, 3695), True, 'import numpy as np\n'), ((4266, 4290), 'embryovision.tests.common.get_loadable_filenames', 'get_loadable_filenames', ([], {}), '()\n', (4288, 4290), False, 'from embryovision.tests.common import get_loadable_filenames\n'), ((4469, 4494), 'embryovision.util.split_all', 'util.split_all', (['foundname'], {}), '(foundname)\n', (4483, 4494), False, 'from embryovision import util\n'), ((4763, 4798), 'os.path.exists', 'os.path.exists', (['unloadable_filename'], {}), '(unloadable_filename)\n', (4777, 4798), False, 'import os\n'), ((2299, 2319), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (2309, 2319), False, 'from PIL import Image\n'), ((4523, 4548), 'os.path.exists', 'os.path.exists', (['foundname'], {}), '(foundname)\n', (4537, 4548), False, 'import os\n'), ((2368, 2383), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2376, 2383), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
# Number of Epochs
num_epochs = 2
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 100
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 100
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
# =====================================================================================
# =====================================================================================
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
# =====================================================================================
# =====================================================================================
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
inp = loaded_graph.get_tensor_by_name("input:0")
istate = loaded_graph.get_tensor_by_name("initial_state:0")
fstate = loaded_graph.get_tensor_by_name("final_state:0")
probs = loaded_graph.get_tensor_by_name("probs:0")
return inp, istate, fstate, probs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
# =====================================================================================
# =====================================================================================
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
index = np.random.choice(len(probabilities), p=probabilities)
return int_to_vocab[index]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
# =====================================================================================
# =====================================================================================
gen_length = 20
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length - 1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
# =====================================================================================
# =====================================================================================
|
[
"helper.save_params",
"tensorflow.train.import_meta_graph",
"helper.load_params",
"tensorflow.Session",
"problem_unittests.test_pick_word",
"problem_unittests.test_get_tensors",
"helper.load_preprocess",
"numpy.array",
"tensorflow.Graph"
] |
[((531, 573), 'helper.save_params', 'helper.save_params', (['(seq_length, save_dir)'], {}), '((seq_length, save_dir))\n', (549, 573), False, 'import helper\n'), ((839, 863), 'helper.load_preprocess', 'helper.load_preprocess', ([], {}), '()\n', (861, 863), False, 'import helper\n'), ((887, 907), 'helper.load_params', 'helper.load_params', ([], {}), '()\n', (905, 907), False, 'import helper\n'), ((1736, 1771), 'problem_unittests.test_get_tensors', 'tests.test_get_tensors', (['get_tensors'], {}), '(get_tensors)\n', (1758, 1771), True, 'import problem_unittests as tests\n'), ((2435, 2466), 'problem_unittests.test_pick_word', 'tests.test_pick_word', (['pick_word'], {}), '(pick_word)\n', (2455, 2466), True, 'import problem_unittests as tests\n'), ((2818, 2828), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2826, 2828), True, 'import tensorflow as tf\n'), ((2834, 2864), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'loaded_graph'}), '(graph=loaded_graph)\n', (2844, 2864), True, 'import tensorflow as tf\n'), ((2910, 2956), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(load_dir + '.meta')"], {}), "(load_dir + '.meta')\n", (2936, 2956), True, 'import tensorflow as tf\n'), ((3234, 3249), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (3242, 3249), True, 'import numpy as np\n')]
|
from os import listdir
from os.path import isdir, isfile, join
from itertools import chain
import numpy as np
import matplotlib.pyplot as plt
from utils import shelf
def dlist(key, dat):
r"""Runs over a list of dictionaries and outputs a list of values corresponding to `key`
Short version (no checks): return np.array([d[key] for d in dat])
"""
ret = []
for i, d in enumerate(dat):
if key in d:
ret.append(d[key])
else:
print('key {} is not in dat[{}]. Skip.'.format(key, i))
return np.array(ret)
def get_data(select_dict, ARGS, key_list, DAT):
data = []
for sel, key in zip(select_dict, key_list):
# Select DAT
k, v = next(iter(sel.items()))
dat = [da[0] for da in zip(DAT, ARGS) if k in da[1] and da[1][k] == v][0]
data.append(dlist(key, dat))
return data
def color_bplot(bplot, colors):
r"""Color the boxplots"""
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
for median in bplot['medians']:
median.set(color='k', linewidth=1.5,)
def label_axis(ax, labels, xpos, ypos, fontsize=16, target_fdr=0.1):
# Partially remove frame
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# y label
ax.set_ylabel('Power and FDR', fontsize=fontsize)
ax.set_ylim([-0.05, 1.05])
# Hortizontal line for target fdr
if target_fdr:
ax.plot(ax.get_xlim(), [target_fdr, target_fdr], '--r')
# New Axis
new_ax = ax.twiny()
new_ax.set_xticks(xpos)
new_ax.set_xticklabels(labels)
new_ax.xaxis.set_ticks_position('bottom') # set the position of the second x-axis to bottom
new_ax.xaxis.set_label_position('bottom') # set the position of the second x-axis to bottom
new_ax.spines['bottom'].set_position(('outward', ypos)) # positions below
# Remove frame for new_ax
new_ax.spines['bottom'].set_visible(False)
new_ax.spines['top'].set_visible(False)
new_ax.spines['left'].set_visible(False)
new_ax.spines['right'].set_visible(False)
new_ax.tick_params(length=0, labelsize=fontsize)
new_ax.set_xlim(ax.get_xlim())
return new_ax
if __name__ == "__main__":
# Load data
PATH = 'output/'
DIRS = [d for d in listdir(PATH) if isdir(join(PATH, d))]
FILES = [join(PATH, d, f) for d in DIRS for f in listdir(join(PATH, d))
if isfile(join(PATH, d, f)) and f[-3:]=='.pt']
ARGS, DAT, MODELS = [], [], []
for f in FILES:
sh = shelf()._load(f)
ARGS.append(sh.args)
if 'd' in sh:
DAT.append(sh['d'])
MODELS.append(sh.args['model'])
else:
print("WARNING: There is no data field d field in file {}. Skip.".format(f))
continue
# ---------------------------
# Process data
# ---------------------------
select_dict, key_list, labels, positions, ax_labels, ax_positions = [], [], [], [-2], [], [-2]
# Baseline models
for m, l in zip(['en', 'rf'], ['Elastic Net', 'Random Forest']):
if m in MODELS:
select_dict += 4*[{'model': m}]
key_list += ['tpr_selected', 'fdr_selected', 'hrt_tpr_selected', 'hrt_fdr_selected']
labels += ['TPR', 'FDR', 'TPR\nHRT', 'FDR\nHRT']
p = positions[-1] + 2
positions += [1+p, 2+p, 4+p, 5+p]
ax_labels += [l]
ax_positions += [ax_positions[-1] + len(l)/2]
# Our models
for m, l, pos in zip(['sic_supervised', 'sic'], ['Sobolev Penalty', 'SIC'], [5.5, 4]):
if m in MODELS:
select_dict += 2*[{'model': m}]
key_list += ['hrt_tpr_selected', 'hrt_fdr_selected']
labels += ['TPR\nHRT', 'FDR\nHRT']
p = positions[-1] + 2
positions += [1+p, 2+p]
ax_labels += [l]
ax_positions += [ax_positions[-1] + pos]
positions.pop(0);
ax_positions.pop(0);
data = get_data(select_dict, ARGS, key_list, DAT)
# ---------------------------
# Plot
# ---------------------------
dataset = ARGS[0]['dataset'].upper()
n_samples = ARGS[0]['numSamples']
fig = plt.figure(figsize=(8, 3))
ax = plt.subplot(111)
bplot = plt.boxplot(data, positions=positions, labels=labels, patch_artist=True)
label_axis(ax, ax_labels, ax_positions, 32, fontsize=13)
color_bplot(bplot, len(positions)//2*['lightblue', 'orange'])
fig.suptitle(f'Dataset {dataset}, N={n_samples}');
fig.tight_layout()
fig.savefig(f"output/{dataset}_{n_samples}.png", bbox_inches='tight')
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.figure",
"numpy.array",
"utils.shelf",
"os.path.join",
"os.listdir"
] |
[((554, 567), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (562, 567), True, 'import numpy as np\n'), ((4199, 4225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (4209, 4225), True, 'import matplotlib.pyplot as plt\n'), ((4235, 4251), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4246, 4251), True, 'import matplotlib.pyplot as plt\n'), ((4265, 4337), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['data'], {'positions': 'positions', 'labels': 'labels', 'patch_artist': '(True)'}), '(data, positions=positions, labels=labels, patch_artist=True)\n', (4276, 4337), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2368), 'os.path.join', 'join', (['PATH', 'd', 'f'], {}), '(PATH, d, f)\n', (2356, 2368), False, 'from os.path import isdir, isfile, join\n'), ((2300, 2313), 'os.listdir', 'listdir', (['PATH'], {}), '(PATH)\n', (2307, 2313), False, 'from os import listdir\n'), ((2323, 2336), 'os.path.join', 'join', (['PATH', 'd'], {}), '(PATH, d)\n', (2327, 2336), False, 'from os.path import isdir, isfile, join\n'), ((2400, 2413), 'os.path.join', 'join', (['PATH', 'd'], {}), '(PATH, d)\n', (2404, 2413), False, 'from os.path import isdir, isfile, join\n'), ((2544, 2551), 'utils.shelf', 'shelf', ([], {}), '()\n', (2549, 2551), False, 'from utils import shelf\n'), ((2438, 2454), 'os.path.join', 'join', (['PATH', 'd', 'f'], {}), '(PATH, d, f)\n', (2442, 2454), False, 'from os.path import isdir, isfile, join\n')]
|
""" Lower level layer for slicer.
Mom's spaghetti.
"""
# TODO: Consider boolean array indexing.
from typing import Any, AnyStr, Union, List, Tuple
from abc import abstractmethod
import numbers
class AtomicSlicer:
""" Wrapping object that will unify slicing across data structures.
What we support:
Basic indexing (return references):
- (start:stop:step) slicing
- support ellipses
Advanced indexing (return references):
- integer array indexing
Numpy Reference:
Basic indexing (return views):
- (start:stop:step) slicing
- support ellipses and newaxis (alias for None)
Advanced indexing (return copy):
- integer array indexing, i.e. X[[1,2], [3,4]]
- boolean array indexing
- mixed array indexing (has integer array, ellipses, newaxis in same slice)
"""
def __init__(self, o: Any, max_dim: Union[None, int, AnyStr] = "auto"):
""" Provides a consistent slicing API to the object provided.
Args:
o: Object to enable consistent slicing.
Currently supports numpy dense arrays, recursive lists ending with list or numpy.
max_dim: Max number of dimensions the wrapped object has.
If set to "auto", max dimensions will be inferred. This comes at compute cost.
"""
self.o = o
self.max_dim = max_dim
if self.max_dim == "auto":
self.max_dim = UnifiedDataHandler.max_dim(o)
def __repr__(self) -> AnyStr:
""" Override default repr for human readability.
Returns:
String to display.
"""
return f"{self.__class__.__name__}({self.o.__repr__()})"
def __getitem__(self, item: Any) -> Any:
""" Consistent slicing into wrapped object.
Args:
item: Slicing key of type integer or slice.
Returns:
Sliced object.
Raises:
ValueError: If slicing is not compatible with wrapped object.
"""
# Turn item into tuple if not already.
index_tup = unify_slice(item, self.max_dim)
# Slice according to object type.
return UnifiedDataHandler.slice(self.o, index_tup, self.max_dim)
def unify_slice(item: Any, max_dim: int, alias_lookup=None) -> Tuple:
""" Resolves aliases and ellipses in a slice item.
Args:
item: Slicing key that is passed to __getitem__.
max_dim: Max dimension of object to be sliced.
alias_lookup: AliasLookup structure.
Returns:
A tuple representation of the item.
"""
item = _normalize_slice_key(item)
index_tup = _normalize_subkey_types(item)
index_tup = _handle_newaxis_ellipses(index_tup, max_dim)
if alias_lookup:
index_tup = _handle_aliases(index_tup, alias_lookup)
return index_tup
def _normalize_subkey_types(index_tup: Tuple) -> Tuple:
""" Casts subkeys into basic types such as int.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Tuple with subkeys casted to basic types.
"""
new_index_tup = [] # Gets casted to tuple at the end
np_int_types = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
}
for subkey in index_tup:
if _safe_isinstance(subkey, "numpy", np_int_types):
new_subkey = int(subkey)
elif _safe_isinstance(subkey, "numpy", "ndarray"):
if len(subkey.shape) == 1:
new_subkey = subkey.tolist()
else:
raise ValueError(f"Cannot use array of shape {subkey.shape} as subkey.")
else:
new_subkey = subkey
new_index_tup.append(new_subkey)
return tuple(new_index_tup)
def _normalize_slice_key(key: Any) -> Tuple:
""" Normalizes slice key into always being a top-level tuple.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Expanded slice as a tuple.
"""
if not isinstance(key, tuple):
return (key,)
else:
return key
def _handle_newaxis_ellipses(index_tup: Tuple, max_dim: int) -> Tuple:
""" Expands newaxis and ellipses within a slice for simplification.
This code is mostly adapted from: https://github.com/clbarnes/h5py_like/blob/master/h5py_like/shape_utils.py#L111
Args:
index_tup: Slicing key as a tuple.
max_dim: Maximum number of dimensions in the respective sliceable object.
Returns:
Expanded slice as a tuple.
"""
non_indexes = (None, Ellipsis)
concrete_indices = sum(idx not in non_indexes for idx in index_tup)
index_list = []
# newaxis_at = []
has_ellipsis = False
int_count = 0
for item in index_tup:
if isinstance(item, numbers.Number):
int_count += 1
# NOTE: If we need locations of new axis, re-enable this.
if item is None: # pragma: no cover
pass
# newaxis_at.append(len(index_list) + len(newaxis_at) - int_count)
elif item == Ellipsis:
if has_ellipsis: # pragma: no cover
raise IndexError("an index can only have a single ellipsis ('...')")
has_ellipsis = True
initial_len = len(index_list)
while len(index_list) + (concrete_indices - initial_len) < max_dim:
index_list.append(slice(None))
else:
index_list.append(item)
if len(index_list) > max_dim: # pragma: no cover
raise IndexError("too many indices for array")
while len(index_list) < max_dim:
index_list.append(slice(None))
# return index_list, newaxis_at
return tuple(index_list)
def _handle_aliases(index_tup: Tuple, alias_lookup) -> Tuple:
new_index_tup = []
def resolve(item, dim):
if isinstance(item, slice):
return item
# Replace element if in alias lookup, otherwise use original.
item = alias_lookup.get(dim, item, item)
return item
# Go through each element within the index and resolve if needed.
for dim, item in enumerate(index_tup):
if isinstance(item, list):
new_item = []
for sub_item in item:
new_item.append(resolve(sub_item, dim))
else:
new_item = resolve(item, dim)
new_index_tup.append(new_item)
return tuple(new_index_tup)
class Tracked(AtomicSlicer):
""" Tracked defines an object that slicer wraps."""
def __init__(self, o: Any, dim: Union[int, List, tuple, None, str] = "auto"):
""" Defines an object that will be wrapped by slicer.
Args:
o: Object that will be tracked for slicer.
dim: Target dimension(s) slicer will index on for this object.
"""
super().__init__(o)
# Protected attribute that can be overriden.
self._name = None
# Place dim into coordinate form.
if dim == "auto":
self.dim = list(range(self.max_dim))
elif dim is None:
self.dim = []
elif isinstance(dim, int):
self.dim = [dim]
elif isinstance(dim, list):
self.dim = dim
elif isinstance(dim, tuple):
self.dim = list(dim)
else: # pragma: no cover
raise ValueError(f"Cannot handle dim of type: {type(dim)}")
class Obj(Tracked):
""" An object that slicer wraps. """
def __init__(self, o, dim="auto"):
super().__init__(o, dim)
class Alias(Tracked):
""" Defines a tracked object as well as additional __getitem__ keys. """
def __init__(self, o, dim):
if not (
isinstance(dim, int) or (isinstance(dim, (list, tuple)) and len(dim) <= 1)
): # pragma: no cover
raise ValueError("Aliases must track a single dimension")
super().__init__(o, dim)
class AliasLookup:
def __init__(self, aliases):
self._lookup = {}
# Populate lookup and merge indexes.
for _, alias in aliases.items():
self.update(alias)
def update(self, alias):
if alias.dim is None or len(alias.dim) == 0:
return
dim = alias.dim[0]
if dim not in self._lookup:
self._lookup[dim] = {}
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
if x not in dim_lookup:
dim_lookup[x] = set()
dim_lookup[x].add(i)
def delete(self, alias):
'''Delete an alias that exists from lookup'''
dim = alias.dim[0]
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
del dim_lookup[x]
def get(self, dim, target, default=None):
if dim not in self._lookup:
return default
indexes = self._lookup[dim].get(target, None)
if indexes is None:
return default
if len(indexes) == 1:
return next(iter(indexes))
else:
return list(indexes)
def resolve_dim(slicer_index: Tuple, slicer_dim: List) -> List:
""" Extracts new dim after applying slicing index and maps it back to the original index list. """
new_slicer_dim = []
reduced_mask = []
for _, curr_idx in enumerate(slicer_index):
if isinstance(curr_idx, (tuple, list, slice)):
reduced_mask.append(0)
else:
reduced_mask.append(1)
for curr_dim in slicer_dim:
if reduced_mask[curr_dim] == 0:
new_slicer_dim.append(curr_dim - sum(reduced_mask[:curr_dim]))
return new_slicer_dim
def reduced_o(tracked: Tracked) -> Union[List, Any]:
os = [t.o for t in tracked]
os = os[0] if len(os) == 1 else os
return os
class BaseHandler:
@classmethod
@abstractmethod
def head_slice(cls, o, index_tup, max_dim):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def max_dim(cls, o):
raise NotImplementedError() # pragma: no cover
@classmethod
def default_alias(cls, o):
return []
class SeriesHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
is_element = True if isinstance(head_index, int) else False
sliced_o = o.iloc[head_index]
return is_element, sliced_o, 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Series only has one dimension,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
return [index_alias]
class DataFrameHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# NOTE: At head slice, we know there are two fixed dimensions.
cut_index = index_tup
is_element = True if isinstance(cut_index[-1], int) else False
sliced_o = o.iloc[cut_index]
return is_element, sliced_o, 2
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Dataframe has fixed dimensions,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
column_alias = Alias(o.columns.to_list(), 1)
column_alias._name = "columns"
return [index_alias, column_alias]
class ArrayHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# Check if head is string
head_index, tail_index = index_tup[0], index_tup[1:]
cut = 1
for sub_index in tail_index:
if isinstance(sub_index, str) or cut == len(o.shape):
break
cut += 1
# Process native array dimensions
cut_index = index_tup[:cut]
is_element = any([True if isinstance(x, int) else False for x in cut_index])
sliced_o = o[cut_index]
return is_element, sliced_o, cut
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
# NOTE: If we're dealing with a scipy matrix,
# we have to manually flatten it ourselves
# to keep consistent to the rest of slicer's API.
if _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
else:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
inner = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
if _safe_isinstance(o, "numpy", "ndarray"):
import numpy
if len(inner) > 0 and hasattr(inner[0], "__len__"):
ragged = not all(len(x) == len(inner[0]) for x in inner)
else:
ragged = False
if ragged:
return numpy.array(inner, dtype=numpy.object)
else:
return numpy.array(inner)
elif _safe_isinstance(o, "torch", "Tensor"):
import torch
if len(inner) > 0 and isinstance(inner[0], torch.Tensor):
return torch.stack(inner)
else:
return torch.tensor(inner)
elif _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csc')
return out
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csr')
return out
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='dok')
return out
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='lil')
return out
else:
raise ValueError(f"Cannot handle type {type(o)}.") # pragma: no cover
@classmethod
def max_dim(cls, o):
if _safe_isinstance(o, "numpy", "ndarray") and o.dtype == "object":
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
else:
return len(o.shape)
class DictHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
return (
False,
{
sub_index: AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
},
1,
)
elif isinstance(head_index, slice):
if head_index == slice(None, None, None):
return False, o, 1
return False, o[head_index], 1
else:
return True, o[head_index], 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
return {
k: AtomicSlicer(e, max_dim=max_dim)[tail_index] for k, e in o.items()
}
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o.values()], default=-1) + 1
class ListTupleHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
if len(head_index) == 0:
return False, o, 1
else:
results = [
AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
]
results = tuple(results) if isinstance(o, tuple) else results
return False, results, 1
elif isinstance(head_index, slice):
return False, o[head_index], 1
elif isinstance(head_index, int):
return True, o[head_index], 1
else: # pragma: no cover
raise ValueError(f"Invalid key {head_index} for {o}")
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
results = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
return tuple(results) if isinstance(o, tuple) else results
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
class UnifiedDataHandler:
""" Registry that maps types to their unified slice calls."""
""" Class attribute that maps type to their unified slice calls."""
type_map = {
("builtins", "list"): ListTupleHandler,
("builtins", "tuple"): ListTupleHandler,
("builtins", "dict"): DictHandler,
("torch", "Tensor"): ArrayHandler,
("numpy", "ndarray"): ArrayHandler,
("scipy.sparse.csc", "csc_matrix"): ArrayHandler,
("scipy.sparse.csr", "csr_matrix"): ArrayHandler,
("scipy.sparse.dok", "dok_matrix"): ArrayHandler,
("scipy.sparse.lil", "lil_matrix"): ArrayHandler,
("pandas.core.frame", "DataFrame"): DataFrameHandler,
("pandas.core.series", "Series"): SeriesHandler,
}
@classmethod
def slice(cls, o, index_tup, max_dim):
# NOTE: Unified handles base cases such as empty tuples, which
# specialized handlers do not.
if isinstance(index_tup, (tuple, list)) and len(index_tup) == 0:
return o
# Slice as delegated by data handler.
o_type = _type_name(o)
head_slice = cls.type_map[o_type].head_slice
tail_slice = cls.type_map[o_type].tail_slice
is_element, sliced_o, cut = head_slice(o, index_tup, max_dim)
out = tail_slice(sliced_o, index_tup[cut:], max_dim - cut, is_element)
return out
@classmethod
def max_dim(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return 0
return cls.type_map[o_type].max_dim(o)
@classmethod
def default_alias(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return {}
return cls.type_map[o_type].default_alias(o)
def _type_name(o: object) -> Tuple[str, str]:
return o.__class__.__module__, o.__class__.__name__
def _safe_isinstance(
o: object, module_name: str, type_name: Union[str, set, tuple]
) -> bool:
o_module, o_type = _type_name(o)
if isinstance(type_name, str):
return o_module == module_name and o_type == type_name
else:
return o_module == module_name and o_type in type_name
|
[
"scipy.sparse.vstack",
"numpy.array",
"torch.stack",
"torch.tensor"
] |
[((14349, 14387), 'numpy.array', 'numpy.array', (['inner'], {'dtype': 'numpy.object'}), '(inner, dtype=numpy.object)\n', (14360, 14387), False, 'import numpy\n'), ((14437, 14455), 'numpy.array', 'numpy.array', (['inner'], {}), '(inner)\n', (14448, 14455), False, 'import numpy\n'), ((14644, 14662), 'torch.stack', 'torch.stack', (['inner'], {}), '(inner)\n', (14655, 14662), False, 'import torch\n'), ((14712, 14731), 'torch.tensor', 'torch.tensor', (['inner'], {}), '(inner)\n', (14724, 14731), False, 'import torch\n'), ((14874, 14901), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""csc"""'}), "(inner, format='csc')\n", (14880, 14901), False, 'from scipy.sparse import vstack\n'), ((15071, 15098), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""csr"""'}), "(inner, format='csr')\n", (15077, 15098), False, 'from scipy.sparse import vstack\n'), ((15268, 15295), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""dok"""'}), "(inner, format='dok')\n", (15274, 15295), False, 'from scipy.sparse import vstack\n'), ((15465, 15492), 'scipy.sparse.vstack', 'vstack', (['inner'], {'format': '"""lil"""'}), "(inner, format='lil')\n", (15471, 15492), False, 'from scipy.sparse import vstack\n')]
|
# coding=utf-8
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from copy import deepcopy
import warnings as warnings
from collections import OrderedDict
import numpy as np
from pypint.solvers.i_iterative_time_solver import IIterativeTimeSolver
from pypint.solvers.i_parallel_solver import IParallelSolver
from pypint.communicators.message import Message
from pypint.integrators.integrator_base import IntegratorBase
from pypint.integrators.node_providers.gauss_lobatto_nodes import GaussLobattoNodes
from pypint.integrators.weight_function_providers.polynomial_weight_function import PolynomialWeightFunction
from pypint.problems import IInitialValueProblem, problem_has_exact_solution
from pypint.solvers.states.sdc_solver_state import SdcSolverState
from pypint.solvers.diagnosis import IDiagnosisValue
from pypint.solvers.diagnosis.norms import supremum_norm
from pypint.plugins.timers.timer_base import TimerBase
from pypint.utilities.threshold_check import ThresholdCheck
from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument
from pypint.utilities.logging import *
# General Notes on Implementation
# ===============================
#
# Names and Meaning of Indices
# ----------------------------
# T_max (num_time_steps) | number of time steps
# N (num_nodes) | number of integration nodes per time step
# t | index of current time step; interval: [0, T_max)
# n | index of current node of current time step; interval: [1, N)
# | the current node is always the next node, i.e. the node we are
# | calculating the value for
# i | index of current point in continuous array of points
class ParallelSdc(IIterativeTimeSolver, IParallelSolver):
"""*Spectral Deferred Corrections* method for solving first order ODEs.
The *Spectral Deferred Corrections* (SDC) method is described in [Minion2003]_ (Equation 2.7)
Default Values:
* :py:class:`.ThresholdCheck`
* ``max_threshold``: 10
* ``min_threshold``: 1e-7
* ``conditions``: ``('residual', 'iterations')``
* :py:attr:`.num_time_steps`: 1
* :py:attr:`.num_nodes`: 3
Given the total number of time steps :math:`T_{max}`, number of integration nodes per time
step :math:`N`, current time step :math:`t \\in [0,T_{max})` and the next integration node
to consider :math:`n \\in [0, N)`.
Let :math:`[a,b]` be the total time interval to integrate over.
For :math:`T_{max}=3` and :math:`N=4`, this can be visualized as::
a b
| |
| . . | . . | . . |
t 0 0 0 0 1 1 1 2 2 2
n 0 1 2 3 1 2 3 1 2 3
i 0 1 2 3 4 5 6 7 8 9
In general, the value at :math:`a` (i.e. :math:`t=n=i=0`) is the initial value.
See Also
--------
:py:class:`.IIterativeTimeSolver` :
implemented interface
:py:class:`.IParallelSolver` :
mixed-in interface
"""
def __init__(self, **kwargs):
super(ParallelSdc, self).__init__(**kwargs)
IParallelSolver.__init__(self, **kwargs)
del self._state
self.threshold = ThresholdCheck(min_threshold=1e-7, max_threshold=10, conditions=("residual", "iterations"))
self.timer = TimerBase()
self._num_time_steps = 1
self._dt = 0.0
self._deltas = {
't': 0.0,
'n': np.zeros(0)
}
self._classic = True
self.__nodes_type = GaussLobattoNodes
self.__weights_type = PolynomialWeightFunction
self.__num_nodes = 3
self.__exact = np.zeros(0)
self.__time_points = {
'steps': np.zeros(0),
'nodes': np.zeros(0)
}
def init(self, problem, integrator, **kwargs):
"""Initializes SDC solver with given problem and integrator.
Parameters
----------
num_time_steps : :py:class:`int`
Number of time steps to be used within the time interval of the problem.
num_nodes : :py:class:`int`
*(otional)*
number of nodes per time step
nodes_type : :py:class:`.INodes`
*(optional)*
Type of integration nodes to be used (class name, **NOT instance**).
weights_type : :py:class:`.IWeightFunction`
*(optional)*
Integration weights function to be used (class name, **NOT instance**).
classic : :py:class:`bool`
*(optional)*
Flag for specifying the type of the SDC sweep.
:py:class:`True`: *(default)* For the classic SDC as known from the literature;
:py:class:`False`: For the modified SDC as developed by <NAME>.
Raises
------
ValueError :
* if given problem is not an :py:class:`.IInitialValueProblem`
* if number of nodes per time step is not given; neither through ``num_nodes``, ``nodes_type`` nor
``integrator``
See Also
--------
:py:meth:`.IIterativeTimeSolver.init`
overridden method (with further parameters)
:py:meth:`.IParallelSolver.init`
mixed in overridden method (with further parameters)
"""
assert_is_instance(problem, IInitialValueProblem, descriptor="Initial Value Problem", checking_obj=self)
assert_condition(issubclass(integrator, IntegratorBase),
ValueError, message="Integrator must be an IntegratorBase: NOT %s"
% integrator.__mro__[-2].__name__,
checking_obj=self)
super(ParallelSdc, self).init(problem, integrator=integrator, **kwargs)
if 'num_time_steps' in kwargs:
self._num_time_steps = kwargs['num_time_steps']
if 'num_nodes' in kwargs:
self.__num_nodes = kwargs['num_nodes']
elif 'nodes_type' in kwargs and kwargs['nodes_type'].num_nodes is not None:
self.__num_nodes = kwargs['nodes_type'].num_nodes
elif integrator.nodes_type is not None and integrator.nodes_type.num_nodes is not None:
self.__num_nodes = integrator.nodes_type.num_nodes
else:
raise ValueError(func_name(self) + "Number of nodes per time step not given.")
if 'notes_type' in kwargs:
self.__nodes_type = kwargs['notes_type']
if 'weights_type' in kwargs:
self.__weights_type = kwargs['weights_type']
if 'classic' in kwargs:
assert_is_instance(kwargs['classic'], bool, descriptor="Classic Flag", checking_obj=self)
self._classic = kwargs['classic']
# TODO: need to store the exact solution somewhere else
self.__exact = np.zeros(self.num_time_steps * (self.__num_nodes - 1) + 1, dtype=np.object)
def run(self, core, **kwargs):
"""Applies SDC solver to the initialized problem setup.
Solves the given problem with the explicit SDC algorithm.
Parameters
----------
core : :py:class:`.SdcSolverCore`
core solver stepping method
dt : :py:class:`float`
width of the interval to work on; this is devided into the number of given
time steps this solver has been initialized with
See Also
--------
:py:meth:`.IIterativeTimeSolver.run` : overridden method
"""
super(ParallelSdc, self).run(core, **kwargs)
assert_named_argument('dt', kwargs, types=float, descriptor="Width of Interval", checking_obj=self)
self._dt = kwargs['dt']
self._print_header()
# start iterations
# TODO: exact solution storage handling
self.__exact[0] = self.problem.initial_value
_has_work = True
_previous_flag = Message.SolverFlag.none
_current_flag = Message.SolverFlag.none
__work_loop_count = 1
while _has_work:
LOG.debug("Work Loop: %d" % __work_loop_count)
_previous_flag = _current_flag
_current_flag = Message.SolverFlag.none
# receive dedicated message
_msg = self._communicator.receive()
if _msg.flag == Message.SolverFlag.failed:
# previous solver failed
# --> pass on the failure and abort
_current_flag = Message.SolverFlag.failed
_has_work = False
LOG.debug("Previous Solver Failed")
else:
if _msg.flag == Message.SolverFlag.time_adjusted:
# the previous solver has adjusted its interval
# --> we need to recompute our interval
_current_flag = self._adjust_interval_width()
# we don't immediately start the computation of the newly computed interval
# but try to pass the new interval end to the next solver as soon as possible
# (this should avoid throwing away useless computation)
LOG.debug("Previous Solver Adjusted Time")
else:
if _previous_flag in \
[Message.SolverFlag.none, Message.SolverFlag.converged, Message.SolverFlag.finished,
Message.SolverFlag.time_adjusted]:
# we just started or finished our previous interval
# --> start a new interval
_has_work = self._init_new_interval(_msg.time_point)
if _has_work:
# set initial values
self.state.initial.solution.value = _msg.value.copy()
self.state.initial.solution.time_point = _msg.time_point
self.state.initial.done()
LOG.debug("New Interval Initialized")
# start logging output
self._print_interval_header()
# start global timing (per interval)
self.timer.start()
else:
# pass
LOG.debug("No New Interval Available")
elif _previous_flag == Message.SolverFlag.iterating:
LOG.debug("Next Iteration")
else:
LOG.warn("WARNING!!! Something went wrong here")
if _has_work:
# we are still on the same interval or have just successfully initialized a new interval
# --> do the real computation
LOG.debug("Starting New Solver Main Loop")
# initialize a new iteration state
self.state.proceed()
if _msg.time_point == self.state.initial.time_point:
if _previous_flag == Message.SolverFlag.iterating:
LOG.debug("Updating initial value")
# if the previous solver has a new initial value for us, we use it
self.state.current_iteration.initial.solution.value = _msg.value.copy()
_current_flag = self._main_solver_loop()
if _current_flag in \
[Message.SolverFlag.converged, Message.SolverFlag.finished, Message.SolverFlag.failed]:
_log_msgs = {'': OrderedDict()}
if self.state.last_iteration_index <= self.threshold.max_iterations:
_group = 'Converged after %d iteration(s)' % (self.state.last_iteration_index + 1)
_log_msgs[''][_group] = OrderedDict()
_log_msgs[''][_group] = self.threshold.has_reached(log=True)
_log_msgs[''][_group]['Final Residual'] = "{:.3e}"\
.format(supremum_norm(self.state.last_iteration.final_step.solution.residual))
_log_msgs[''][_group]['Solution Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.solution_reduction(self.state.last_iteration_index)))
if problem_has_exact_solution(self.problem, self):
_log_msgs[''][_group]['Error Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.error_reduction(self.state.last_iteration_index)))
else:
warnings.warn("{}: Did not converged: {:s}".format(self._core.name, self.problem))
_group = "FAILED: After maximum of {:d} iteration(s)"\
.format(self.state.last_iteration_index + 1)
_log_msgs[''][_group] = OrderedDict()
_log_msgs[''][_group]['Final Residual'] = "{:.3e}"\
.format(supremum_norm(self.state.last_iteration.final_step.solution.residual))
_log_msgs[''][_group]['Solution Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.solution_reduction(self.state.last_iteration_index)))
if problem_has_exact_solution(self.problem, self):
_log_msgs[''][_group]['Error Reduction'] = "{:.3e}"\
.format(supremum_norm(self.state.solution
.error_reduction(self.state.last_iteration_index)))
LOG.warn(" {} Failed: Maximum number iterations reached without convergence."
.format(self._core.name))
print_logging_message_tree(_log_msgs)
elif _previous_flag in [Message.SolverFlag.converged, Message.SolverFlag.finished]:
LOG.debug("Solver Finished.")
self.timer.stop()
self._print_footer()
else:
# something went wrong
# --> we failed
LOG.warn("Solver failed.")
_current_flag = Message.SolverFlag.failed
self._communicator.send(value=self.state.current_iteration.final_step.solution.value,
time_point=self.state.current_iteration.final_step.time_point,
flag=_current_flag)
__work_loop_count += 1
# end while:has_work is None
LOG.debug("Solver Main Loop Done")
return [_s.solution for _s in self._states]
@property
def state(self):
"""Read-only accessor for the sovler's state
Returns
-------
state : :py:class:`.ISolverState`
"""
if len(self._states) > 0:
return self._states[-1]
else:
return None
@property
def num_time_steps(self):
"""Accessor for the number of time steps within the interval.
Returns
-------
number_time_steps : :py:class:`int`
Number of time steps within the problem-given time interval.
"""
return self._num_time_steps
@property
def num_nodes(self):
"""Accessor for the number of integration nodes per time step.
Returns
-------
number_of_nodes : :py:class:`int`
Number of integration nodes used within one time step.
"""
return self.__num_nodes
@property
def classic(self):
"""Read-only accessor for the type of SDC
Returns
-------
is_classic : :py:class:`bool`
:py:class:`True` if it's the classic SDC as known from papers;
:py:class:`False` if it's the modified SDC by <NAME>
"""
return self._classic
def _init_new_state(self):
"""Initialize a new state for a work task
Usually, this starts a new work task.
The previous state, if applicable, is stored in a stack.
"""
if self.state:
# finalize the current state
self.state.finalize()
# initialize solver state
self._states.append(SdcSolverState(num_nodes=self.num_nodes - 1, num_time_steps=self.num_time_steps))
def _init_new_interval(self, start):
"""Initializes a new work interval
Parameters
----------
start : :py:class:`float`
start point of new interval
Returns
-------
has_work : :py:class:`bool`
:py:class:`True` if new interval have been initialized;
:py:class:`False` if no new interval have been initialized (i.e. new interval end would exceed end of time
given by problem)
"""
assert_is_instance(start, float, descriptor="Time Point", checking_obj=self)
if start + self._dt > self.problem.time_end:
return False
if self.state and start == self.state.initial.time_point:
return False
self._init_new_state()
# set width of current interval
self.state.delta_interval = self._dt
# compute time step and node distances
self._deltas['t'] = self.state.delta_interval / self.num_time_steps # width of a single time step (equidistant)
# start time points of time steps
self.__time_points['steps'] = np.linspace(start, start + self._dt, self.num_time_steps + 1)
# initialize and transform integrator for time step width
self._integrator.init(self.__nodes_type, self.__num_nodes, self.__weights_type,
interval=np.array([self.__time_points['steps'][0], self.__time_points['steps'][1]],
dtype=np.float))
self.__time_points['nodes'] = np.zeros((self.num_time_steps, self.num_nodes), dtype=np.float)
_deltas_n = np.zeros(self.num_time_steps * (self.num_nodes - 1) + 1)
# copy the node provider so we do not alter the integrator's one
_nodes = deepcopy(self._integrator.nodes_type)
for _t in range(0, self.num_time_steps):
# transform Nodes (copy) onto new time step for retrieving actual integration nodes
_nodes.interval = np.array([self.__time_points['steps'][_t], self.__time_points['steps'][_t + 1]])
self.__time_points['nodes'][_t] = _nodes.nodes.copy()
for _n in range(0, self.num_nodes - 1):
_i = _t * (self.num_nodes - 1) + _n
_deltas_n[_i + 1] = _nodes.nodes[_n + 1] - _nodes.nodes[_n]
self._deltas['n'] = _deltas_n[1:].copy()
return True
def _adjust_interval_width(self):
"""Adjust width of time interval
"""
raise NotImplementedError("Time Adaptivity not yet implemented.")
# return Message.SolverFlag.time_adjusted
def _main_solver_loop(self):
# initialize iteration timer of same type as global timer
_iter_timer = self.timer.__class__()
self._print_iteration(self.state.current_iteration_index + 1)
# iterate on time steps
_iter_timer.start()
for _current_time_step in self.state.current_iteration:
# run this time step
self._time_step()
if self.state.current_time_step_index < len(self.state.current_iteration) - 1:
self.state.current_iteration.proceed()
_iter_timer.stop()
# check termination criteria
self.threshold.check(self.state)
# log this iteration's summary
if self.state.is_first_iteration:
# on first iteration we do not have comparison values
self._print_iteration_end(None, None, None, _iter_timer.past())
else:
if problem_has_exact_solution(self.problem, self) and not self.state.is_first_iteration:
# we could compute the correct error of our current solution
self._print_iteration_end(self.state.solution.solution_reduction(self.state.current_iteration_index),
self.state.solution.error_reduction(self.state.current_iteration_index),
self.state.current_step.solution.residual,
_iter_timer.past())
else:
self._print_iteration_end(self.state.solution.solution_reduction(self.state.current_iteration_index),
None,
self.state.current_step.solution.residual,
_iter_timer.past())
# finalize this iteration (i.e. TrajectorySolutionData.finalize())
self.state.current_iteration.finalize()
_reason = self.threshold.has_reached()
if _reason is None:
# LOG.debug("solver main loop done: no reason")
return Message.SolverFlag.iterating
elif _reason == ['iterations']:
# LOG.debug("solver main loop done: iterations")
self.state.finalize()
return Message.SolverFlag.finished
else:
# LOG.debug("solver main loop done: other")
self.state.finalize()
return Message.SolverFlag.converged
def _time_step(self):
self.state.current_time_step.delta_time_step = self._deltas['t']
for _step in range(0, len(self.state.current_time_step)):
_node_index = self.state.current_time_step_index * (self.num_nodes - 1) + _step
self.state.current_time_step[_step].delta_tau = self._deltas['n'][_node_index]
self.state.current_time_step[_step].solution.time_point = \
self.__time_points['nodes'][self.state.current_time_step_index][_step + 1]
self._print_time_step(self.state.current_time_step_index + 1,
self.state.current_time_step.initial.time_point,
self.state.current_time_step.last.time_point,
self.state.current_time_step.delta_time_step)
# for classic SDC compute integral
_integral = 0.0
_integrate_values = None
if self.classic:
if not self.state.current_time_step.initial.rhs_evaluated:
self.state.current_time_step.initial.rhs = \
self.problem.evaluate_wrt_time(self.state.current_time_step.initial.time_point,
self.state.current_time_step.initial.value)
_integrate_values = np.array([self.state.current_time_step.initial.rhs], dtype=self.problem.numeric_type)
for _step_index in range(0, len(self.state.current_time_step)):
if self.state.is_first_iteration:
_integrate_values = \
np.append(_integrate_values,
np.array([self.state.current_time_step.initial.rhs], dtype=self.problem.numeric_type),
axis=0)
else:
_step = self.state.previous_iteration[self.state.current_time_step_index][_step_index]
if not _step.rhs_evaluated:
_step.rhs = self.problem.evaluate_wrt_time(_step.time_point, _step.value)
_integrate_values = \
np.append(_integrate_values,
np.array([_step.rhs], dtype=self.problem.numeric_type),
axis=0)
assert_condition(_integrate_values.shape[0] == self.num_nodes,
ValueError, message="Number of integration values not correct: {:d} != {:d}"
.format(_integrate_values.shape[0], self.num_nodes),
checking_obj=self)
_full_integral = 0.0
# do the actual SDC steps of this SDC sweep
for _step_index in range(0, len(self.state.current_time_step)):
_current_step = self.state.current_time_step[_step_index]
if self.classic:
_integral = self._integrator.evaluate(_integrate_values,
from_node=_step_index, target_node=_step_index + 1)
# we successively compute the full integral, which is used for the residual at the end
_full_integral += _integral
_current_step.integral = _integral.copy()
# do the SDC step of this sweep
self._sdc_step()
if self.state.current_step_index < len(self.state.current_time_step) - 1:
self.state.current_time_step.proceed()
del _integrate_values
# compute residual and print step details
for _step_index in range(0, len(self.state.current_time_step)):
_step = self.state.current_time_step[_step_index]
self._core.compute_residual(self.state, step=_step, integral=_full_integral)
# finalize this step (i.e. StepSolutionData.finalize())
_step.done()
if _step_index > 0:
_previous_time = self.state.current_time_step[_step_index - 1].time_point
else:
_previous_time = self.state.current_time_step.initial.time_point
if problem_has_exact_solution(self.problem, self):
self._print_step(_step_index + 2,
_previous_time,
_step.time_point,
supremum_norm(_step.value),
_step.solution.residual,
_step.solution.error)
else:
self._print_step(_step_index + 2,
_previous_time,
_step.time_point,
supremum_norm(_step.value),
_step.solution.residual,
None)
self._print_time_step_end()
# finalizing the current time step (i.e. TrajectorySolutionData.finalize)
self.state.current_time_step.finalize()
def _sdc_step(self):
# helper variables
_current_time_step_index = self.state.current_time_step_index
_current_step_index = self.state.current_step_index
# copy solution of previous iteration to this one
if self.state.is_first_iteration:
self.state.current_step.value = self.state.initial.value.copy()
else:
self.state.current_step.value = \
self.state.previous_iteration[_current_time_step_index][_current_step_index].value.copy()
# TODO: review the custom modification
# if not self.classic:
# # gather values for integration and evaluate problem at given points
# # initial value for this time step
# _integrate_values = \
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step.initial.time_point,
# self.state.current_time_step.initial.value.copy())
# ], dtype=self.problem.numeric_type)
#
# if _current_step_index > 0:
# # values from this iteration (already calculated)
# _from_current_iteration_range = range(0, _current_step_index)
# for _index in _from_current_iteration_range:
# _integrate_values = \
# np.append(_integrate_values,
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step[_index].solution.time_point,
# self.state.current_time_step[_index].solution.value.copy())
# ], dtype=self.problem.numeric_type
# ), axis=0)
#
# # values from previous iteration
# _from_previous_iteration_range = range(_current_step_index, self.num_nodes - 1)
# for _index in _from_previous_iteration_range:
# if self.state.is_first_iteration:
# _this_value = self.problem.initial_value
# else:
# _this_value = self.state.previous_iteration[_current_time_step_index][_index].solution.value.copy()
# _integrate_values = \
# np.append(_integrate_values,
# np.array(
# [self.problem.evaluate_wrt_time(self.state.current_time_step[_index].solution.time_point,
# _this_value)
# ], dtype=self.problem.numeric_type
# ), axis=0)
# assert_condition(_integrate_values.shape[0] == self.num_nodes,
# ValueError, message="Number of integration values not correct: {:d} != {:d}"
# .format(_integrate_values.shape[0], self.num_nodes),
# checking_obj=self)
#
# # integrate
# self.state.current_step.integral = self._integrator.evaluate(_integrate_values,
# from_node=_current_step_index,
# target_node=_current_step_index + 1)
# del _integrate_values
# # END if not self.classic
# compute step
self._core.run(self.state, problem=self.problem)
# calculate error
self._core.compute_error(self.state, problem=self.problem)
# step gets finalized after computation of residual
def print_lines_for_log(self):
_lines = super(ParallelSdc, self).print_lines_for_log()
if 'Number Nodes per Time Step' not in _lines['Integrator']:
_lines['Integrator']['Number Nodes per Time Step'] = "%d" % self.__num_nodes
if 'Number Time Steps' not in _lines['Integrator']:
_lines['Integrator']['Number Time Steps'] = "%d" % self._num_time_steps
return _lines
def _print_interval_header(self):
LOG.info("%s%s" % (VERBOSITY_LVL1, SEPARATOR_LVL3))
LOG.info("{} Interval: [{:.3f}, {:.3f}]"
.format(VERBOSITY_LVL1, self.state.initial.time_point, self.state.initial.time_point + self._dt))
self._print_output_tree_header()
def _print_output_tree_header(self):
LOG.info("%s iter" % VERBOSITY_LVL1)
LOG.info("%s \\" % VERBOSITY_LVL2)
LOG.info("%s |- time start end delta" % VERBOSITY_LVL2)
LOG.info("%s | \\" % VERBOSITY_LVL3)
LOG.info("%s | |- step t_0 t_1 phi(t_1) resid err" % VERBOSITY_LVL3)
LOG.info("%s | \\_" % VERBOSITY_LVL2)
LOG.info("%s \\_ sol r.red err r.red resid time" % VERBOSITY_LVL1)
def _print_iteration(self, _iter):
_iter = self._output_format(_iter, 'int', width=5)
LOG.info("%s %s" % (VERBOSITY_LVL1, _iter))
LOG.info("%s \\" % VERBOSITY_LVL2)
def _print_iteration_end(self, solred, errred, resid, time):
_solred = self._output_format(solred, 'exp')
_errred = self._output_format(errred, 'exp')
_resid = self._output_format(resid, 'exp')
_time = self._output_format(time, 'float', width=6.3)
LOG.info("%s \\_ %s %s %s %s" % (VERBOSITY_LVL1, _solred, _errred, _resid, _time))
def _print_time_step(self, time_step, start, end, delta):
_time_step = self._output_format(time_step, 'int', width=3)
_start = self._output_format(start, 'float', width=6.3)
_end = self._output_format(end, 'float', width=6.3)
_delta = self._output_format(delta, 'float', width=6.3)
LOG.info("%s |- %s %s %s %s" % (VERBOSITY_LVL2, _time_step, _start, _end, _delta))
LOG.info("%s | \\" % VERBOSITY_LVL3)
self._print_step(1, None, self.state.current_time_step.initial.time_point,
supremum_norm(self.state.current_time_step.initial.solution.value),
None, None)
def _print_time_step_end(self):
LOG.info("%s | \\_" % VERBOSITY_LVL2)
def _print_step(self, step, t0, t1, phi, resid, err):
_step = self._output_format(step, 'int', width=2)
_t0 = self._output_format(t0, 'float', width=6.3)
_t1 = self._output_format(t1, 'float', width=6.3)
_phi = self._output_format(phi, 'float', width=6.3)
_resid = self._output_format(resid, 'exp')
_err = self._output_format(err, 'exp')
LOG.info("%s | |- %s %s %s %s %s %s"
% (VERBOSITY_LVL3, _step, _t0, _t1, _phi, _resid, _err))
def _output_format(self, value, _type, width=None):
def _value_to_numeric(val):
if isinstance(val, (np.ndarray, IDiagnosisValue)):
return supremum_norm(val)
else:
return val
if _type and width is None:
if _type == 'float':
width = 10.3
elif _type == 'int':
width = 10
elif _type == 'exp':
width = 9.2
else:
width = 10
if value is None:
_outstr = "{: ^{width}s}".format('na', width=int(width))
else:
if _type == 'float':
_outstr = "{: {width}f}".format(_value_to_numeric(value), width=width)
elif _type == 'int':
_outstr = "{: {width}d}".format(_value_to_numeric(value), width=width)
elif _type == 'exp':
_outstr = "{: {width}e}".format(_value_to_numeric(value), width=width)
else:
_outstr = "{: >{width}s}".format(value, width=width)
return _outstr
__all__ = ['ParallelSdc']
|
[
"pypint.solvers.diagnosis.norms.supremum_norm",
"copy.deepcopy",
"pypint.solvers.states.sdc_solver_state.SdcSolverState",
"pypint.solvers.i_parallel_solver.IParallelSolver.__init__",
"pypint.plugins.timers.timer_base.TimerBase",
"pypint.utilities.assert_is_instance",
"numpy.zeros",
"pypint.utilities.assert_named_argument",
"pypint.problems.problem_has_exact_solution",
"pypint.utilities.threshold_check.ThresholdCheck",
"numpy.array",
"numpy.linspace",
"collections.OrderedDict",
"pypint.utilities.func_name"
] |
[((3328, 3368), 'pypint.solvers.i_parallel_solver.IParallelSolver.__init__', 'IParallelSolver.__init__', (['self'], {}), '(self, **kwargs)\n', (3352, 3368), False, 'from pypint.solvers.i_parallel_solver import IParallelSolver\n'), ((3419, 3516), 'pypint.utilities.threshold_check.ThresholdCheck', 'ThresholdCheck', ([], {'min_threshold': '(1e-07)', 'max_threshold': '(10)', 'conditions': "('residual', 'iterations')"}), "(min_threshold=1e-07, max_threshold=10, conditions=(\n 'residual', 'iterations'))\n", (3433, 3516), False, 'from pypint.utilities.threshold_check import ThresholdCheck\n'), ((3532, 3543), 'pypint.plugins.timers.timer_base.TimerBase', 'TimerBase', ([], {}), '()\n', (3541, 3543), False, 'from pypint.plugins.timers.timer_base import TimerBase\n'), ((3870, 3881), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3878, 3881), True, 'import numpy as np\n'), ((5505, 5614), 'pypint.utilities.assert_is_instance', 'assert_is_instance', (['problem', 'IInitialValueProblem'], {'descriptor': '"""Initial Value Problem"""', 'checking_obj': 'self'}), "(problem, IInitialValueProblem, descriptor=\n 'Initial Value Problem', checking_obj=self)\n", (5523, 5614), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((7021, 7096), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps * (self.__num_nodes - 1) + 1)'], {'dtype': 'np.object'}), '(self.num_time_steps * (self.__num_nodes - 1) + 1, dtype=np.object)\n', (7029, 7096), True, 'import numpy as np\n'), ((7738, 7842), 'pypint.utilities.assert_named_argument', 'assert_named_argument', (['"""dt"""', 'kwargs'], {'types': 'float', 'descriptor': '"""Width of Interval"""', 'checking_obj': 'self'}), "('dt', kwargs, types=float, descriptor=\n 'Width of Interval', checking_obj=self)\n", (7759, 7842), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((17641, 17717), 'pypint.utilities.assert_is_instance', 'assert_is_instance', (['start', 'float'], {'descriptor': '"""Time Point"""', 'checking_obj': 'self'}), "(start, float, descriptor='Time Point', checking_obj=self)\n", (17659, 17717), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((18257, 18318), 'numpy.linspace', 'np.linspace', (['start', '(start + self._dt)', '(self.num_time_steps + 1)'], {}), '(start, start + self._dt, self.num_time_steps + 1)\n', (18268, 18318), True, 'import numpy as np\n'), ((18692, 18755), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, self.num_nodes)'], {'dtype': 'np.float'}), '((self.num_time_steps, self.num_nodes), dtype=np.float)\n', (18700, 18755), True, 'import numpy as np\n'), ((18776, 18832), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps * (self.num_nodes - 1) + 1)'], {}), '(self.num_time_steps * (self.num_nodes - 1) + 1)\n', (18784, 18832), True, 'import numpy as np\n'), ((18924, 18961), 'copy.deepcopy', 'deepcopy', (['self._integrator.nodes_type'], {}), '(self._integrator.nodes_type)\n', (18932, 18961), False, 'from copy import deepcopy\n'), ((3665, 3676), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3673, 3676), True, 'import numpy as np\n'), ((3934, 3945), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3942, 3945), True, 'import numpy as np\n'), ((3968, 3979), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3976, 3979), True, 'import numpy as np\n'), ((6797, 6890), 'pypint.utilities.assert_is_instance', 'assert_is_instance', (["kwargs['classic']", 'bool'], {'descriptor': '"""Classic Flag"""', 'checking_obj': 'self'}), "(kwargs['classic'], bool, descriptor='Classic Flag',\n checking_obj=self)\n", (6815, 6890), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((17055, 17140), 'pypint.solvers.states.sdc_solver_state.SdcSolverState', 'SdcSolverState', ([], {'num_nodes': '(self.num_nodes - 1)', 'num_time_steps': 'self.num_time_steps'}), '(num_nodes=self.num_nodes - 1, num_time_steps=self.num_time_steps\n )\n', (17069, 17140), False, 'from pypint.solvers.states.sdc_solver_state import SdcSolverState\n'), ((19137, 19222), 'numpy.array', 'np.array', (["[self.__time_points['steps'][_t], self.__time_points['steps'][_t + 1]]"], {}), "([self.__time_points['steps'][_t], self.__time_points['steps'][_t + 1]]\n )\n", (19145, 19222), True, 'import numpy as np\n'), ((23475, 23565), 'numpy.array', 'np.array', (['[self.state.current_time_step.initial.rhs]'], {'dtype': 'self.problem.numeric_type'}), '([self.state.current_time_step.initial.rhs], dtype=self.problem.\n numeric_type)\n', (23483, 23565), True, 'import numpy as np\n'), ((26265, 26311), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (26291, 26311), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((33389, 33455), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['self.state.current_time_step.initial.solution.value'], {}), '(self.state.current_time_step.initial.solution.value)\n', (33402, 33455), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((18513, 18607), 'numpy.array', 'np.array', (["[self.__time_points['steps'][0], self.__time_points['steps'][1]]"], {'dtype': 'np.float'}), "([self.__time_points['steps'][0], self.__time_points['steps'][1]],\n dtype=np.float)\n", (18521, 18607), True, 'import numpy as np\n'), ((20659, 20705), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (20685, 20705), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((34307, 34325), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['val'], {}), '(val)\n', (34320, 34325), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((26496, 26522), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['_step.value'], {}), '(_step.value)\n', (26509, 26522), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((26838, 26864), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['_step.value'], {}), '(_step.value)\n', (26851, 26864), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((23816, 23906), 'numpy.array', 'np.array', (['[self.state.current_time_step.initial.rhs]'], {'dtype': 'self.problem.numeric_type'}), '([self.state.current_time_step.initial.rhs], dtype=self.problem.\n numeric_type)\n', (23824, 23906), True, 'import numpy as np\n'), ((24349, 24403), 'numpy.array', 'np.array', (['[_step.rhs]'], {'dtype': 'self.problem.numeric_type'}), '([_step.rhs], dtype=self.problem.numeric_type)\n', (24357, 24403), True, 'import numpy as np\n'), ((6506, 6521), 'pypint.utilities.func_name', 'func_name', (['self'], {}), '(self)\n', (6515, 6521), False, 'from pypint.utilities import assert_is_instance, assert_condition, func_name, assert_named_argument\n'), ((11836, 11849), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11847, 11849), False, 'from collections import OrderedDict\n'), ((12119, 12132), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12130, 12132), False, 'from collections import OrderedDict\n'), ((12739, 12785), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (12765, 12785), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((13450, 13463), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13461, 13463), False, 'from collections import OrderedDict\n'), ((13977, 14023), 'pypint.problems.problem_has_exact_solution', 'problem_has_exact_solution', (['self.problem', 'self'], {}), '(self.problem, self)\n', (14003, 14023), False, 'from pypint.problems import IInitialValueProblem, problem_has_exact_solution\n'), ((12354, 12423), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['self.state.last_iteration.final_step.solution.residual'], {}), '(self.state.last_iteration.final_step.solution.residual)\n', (12367, 12423), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n'), ((13592, 13661), 'pypint.solvers.diagnosis.norms.supremum_norm', 'supremum_norm', (['self.state.last_iteration.final_step.solution.residual'], {}), '(self.state.last_iteration.final_step.solution.residual)\n', (13605, 13661), False, 'from pypint.solvers.diagnosis.norms import supremum_norm\n')]
|
"""
Get the timestamps of all claims and plot the cumulative number vs. time!
"""
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import requests
import sqlite3
import time
def make_graph(mode, show=True):
"""
mode must be "claims" or "channels"
"""
if mode != "claims" and mode != "channels":
return
plt.close("all")
# Open the DB
db_file = "/home/brewer/local/lbry-sdk/lbry/lbryum-data/claims.db"
conn = sqlite3.connect(db_file)
c = conn.cursor()
# List for results
times = []
# Query
if mode == "claims":
x = "<>"
else:
x = "="
query = "SELECT creation_timestamp FROM claim\
WHERE claim_type {x} 2;".format(x=x)
# Iterate over query results
i = 0
for t in c.execute(query):
times.append(t)
i = i + 1
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
# Sort the times and convert to a numpy array
times = np.sort(np.array(times).flatten())
# Save some stats to JSON for Electron
now = time.time()
my_dict = {}
my_dict["unix_time"] = now
my_dict["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC"
my_dict["total_{mode}".format(mode=mode)] = int(\
len(times))
my_dict["new_{mode}_1_hour".format(mode=mode)] = int(\
np.sum(times > (now - 3600.0)))
my_dict["new_{mode}_24_hours".format(mode=mode)] = int(\
np.sum(times > (now - 86400.0)))
my_dict["new_{mode}_7_days".format(mode=mode)] = int(\
np.sum(times > (now - 7*86400.0)))
my_dict["new_{mode}_30_days".format(mode=mode)] = int(\
np.sum(times > (now - 30*86400.0)))
f = open("{mode}_stats.json".format(mode=mode), "w")
f.write(json.dumps(my_dict))
f.close()
# Count new claims this UTC day
count_today = np.sum(times > 86400.0*int(now/86400.0))
if mode == "claims":
string = "publications"
else:
string = "channels"
print("{K} {mode}, {n} from today so far (UTC). ".format(K=len(times), mode=string, n=count_today), end="", flush=True)
# Plotting stuff
plt.rcParams["font.family"] = "Liberation Sans"
plt.rcParams["font.size"] = 14
plt.style.use("dark_background")
plt.rcParams["axes.facecolor"] = "#3c3d3c"
plt.rcParams["savefig.facecolor"] = "#3c3d3c"
plt.figure(figsize=(15, 11))
plt.subplot(2, 1, 1)
times_in_days = (times - 1483228800)/86400.0
days = times_in_days.astype("int64")
plt.plot(times_in_days,
np.arange(len(times)), "w-", linewidth=1.5)
plt.ylabel("Cumulative number of {mode}".format(mode=string))
plt.title("Total number of {mode} = {n}.".format(n=len(times), mode=string))
plt.xlim([0.0, days.max() + 1])
plt.ylim(bottom=-100)
plt.gca().tick_params(labelright=True)
# Add vertical lines for new years (approximately)
new_years = np.arange(0, 5)*365.2425
for year in new_years:
plt.axvline(year, color="r", alpha=0.8, linestyle="--")
# Add text about years
year_names = [2017, 2018, 2019]
for i in range(len(year_names)):
year = new_years[i]
plt.text(year+5.0, 0.95*plt.gca().get_ylim()[1],
"{text} begins".format(text=year_names[i]),
fontsize=10)
# Add line and text about MH's video
plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g")
plt.text(890.0, 0.2*plt.gca().get_ylim()[1],
"@MH video\n\'Why I Left YouTube\'\ngoes viral",
fontsize=10)
plt.subplot(2, 1, 2)
bin_width = 1.0
# Bin edges including right edge of last bin
bins = np.arange(0, np.max(days)+2) - 0.5*bin_width
color = "#6b95ef"
counts = plt.hist(days, bins, alpha=0.9, color=color, label="Raw",
width=bin_width, align="mid")[0]
# Compute 10-day moving average
moving_average = np.zeros(len(bins)-1)
for i in range(len(moving_average)):
subset = counts[0:(i+1)]
if len(subset) >= 10:
subset = subset[-10:]
moving_average[i] = np.mean(subset)
plt.plot(bins[0:-2] + 0.5*bin_width, moving_average[0:-1], "w-",
label="10-day moving average", linewidth=1.5)
plt.xlim([0.0, days.max() + 1])
plt.xlabel("Time (days since 2017-01-01)")
plt.ylabel("New {mode} added each day".format(mode=string))
subset = counts[-31:-1]
plt.title("Recent average rate (last 30 days) = {n} {mode} per day.".\
format(n=int(np.sum(time.time() - times <= 30.0*86400.0)/30.0),
mode=string))
plt.gca().tick_params(labelright=True)
# Year lines
for year in new_years:
plt.axvline(year, color="r", alpha=0.8, linestyle="--")
# MH line
plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g")
# plt.gca().set_yticks([1.0, 10.0, 100.0, 1000.0, 10000.0])
# plt.gca().set_yticklabels(["1", "10", "100", "1000", "10000"])
plt.legend()
plt.savefig("{mode}.svg".format(mode=mode), bbox_inches="tight")
plt.savefig("{mode}.png".format(mode=mode), bbox_inches="tight", dpi=70)
print("Figure saved to {mode}.svg and {mode}.png.".format(mode=mode))
if show:
plt.show()
def aggregate_tips():
"""
Calculate tips over past X amount of time and write JSON output
"""
# The SQL query to perform
now = time.time()
print("Computing tip stats...", end="", flush=True)
labels = ["30_days", "7_days", "24_hours", "1_hour"]
windows = [30*86400.0, 7*86400.0, 1*86400.0, 3600.0]
result = {}
result["unix_time"] = now
result["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC"
# Agrees with old method, but should it be SUM(amount)?
query = "SELECT support_id, amount, time, claim_name, claim_id, is_nsfw, SUM(to_claim_address) tot FROM (SELECT support.id as support_id, support.support_amount amount,\
transaction.transaction_time time, claim.is_nsfw is_nsfw,\
claim.claim_id claim_id, claim.name claim_name,\
(CASE WHEN (output.address_list LIKE CONCAT('%25', claim_address, '%25')) THEN '1' ELSE '0' END) to_claim_address\
FROM claim\
INNER JOIN support ON support.supported_claim_id = claim.claim_id\
INNER JOIN transaction ON support.transaction_hash_id = transaction.hash\
INNER JOIN output ON transaction.hash = output.transaction_hash \
WHERE transaction.transaction_time > ({now} - {window})\
AND transaction.transaction_time <= {now}) AS result\
GROUP BY support_id, amount;".format(now=now, window=windows[0])
request = requests.get("https://chainquery.lbry.com/api/sql?query=" + query)
the_dict = request.json()
# Get tips into numpy array
times = []
tips = []
is_tip = []
links = []
is_nsfw = []
for row in the_dict["data"]:
times.append(float(row["time"]))
tips.append(float(row["amount"]))
links.append("https://open.lbry.com/" + str(row["claim_name"]) + ":"\
+ str(row["claim_id"]))
is_nsfw.append(row["is_nsfw"])
if row["tot"] > 0:
is_tip.append(True)
else:
is_tip.append(False)
times = np.array(times)
tips = np.array(tips)
is_tip = np.array(is_tip)
links = np.array(links)
is_nsfw = np.array(is_nsfw)
# Write tips
for i in range(len(labels)):
keep = (times > (now - windows[i])) & is_tip
_times = times[keep]
_tips = tips[keep]
_links = links[keep]
_is_nsfw = is_nsfw[keep]
result["num_tips_{label}".format(label=labels[i])] = len(_tips)
result["lbc_tipped_{label}".format(label=labels[i])] = float(_tips.sum())
maxtip = 0
maxtip_link = None
maxtip_is_nsfw = None
if len(_tips) > 0:
maxtip = float(_tips.max())
index = np.argmax(_tips)
maxtip_link = _links[index]
maxtip_is_nsfw = _is_nsfw[index]
result["biggest_tip_{label}".format(label=labels[i])] = maxtip
result["biggest_tip_{label}_link".format(label=labels[i])] = maxtip_link
result["biggest_tip_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw)
# Write supports
for i in range(len(labels)):
keep = (times > (now - windows[i])) & (~is_tip)
_times = times[keep]
_tips = tips[keep]
_links = links[keep]
_is_nsfw = is_nsfw[keep]
result["num_supports_{label}".format(label=labels[i])] = len(_tips)
result["lbc_supports_{label}".format(label=labels[i])] = float(_tips.sum())
maxtip = 0
maxtip_link = None
maxtip_is_nsfw = None
if len(_tips) > 0:
maxtip = float(_tips.max())
index = np.argmax(_tips)
maxtip_link = _links[index]
maxtip_is_nsfw = _is_nsfw[index]
result["biggest_support_{label}".format(label=labels[i])] = maxtip
result["biggest_support_{label}_link".format(label=labels[i])] = maxtip_link
result["biggest_support_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw)
f = open("tips_stats.json", "w")
f.write(json.dumps(result))
f.close()
print("done. ", flush=True, end="")
def publish_files():
"""
Publish files to somewhere on the internet.
"""
print("Publishing files to the internet...", end="", flush=True)
import subprocess
try:
subprocess.run("./upload.sh", timeout=120.0)
print("done.\n")
except:
print("failed.\n")
if __name__ == "__main__":
# Do it manually once then enter the infinite loop
now = time.time()
print("The time is " + str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC.")
make_graph("claims")
make_graph("channels")
try:
aggregate_tips()
except:
pass
import os
try:
publish_files()
except:
pass
import time
while True:
print("", flush=True)
time.sleep(530.0)
now = time.time()
print("The time is " + str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC.")
make_graph("claims", show=False)
make_graph("channels", show=False)
try:
aggregate_tips()
except:
pass
try:
publish_files()
except:
pass
|
[
"numpy.sum",
"numpy.argmax",
"json.dumps",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.close",
"numpy.max",
"requests.get",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"time.sleep",
"sqlite3.connect",
"matplotlib.pyplot.subplot",
"subprocess.run",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"time.time",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((373, 389), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (382, 389), True, 'import matplotlib.pyplot as plt\n'), ((491, 515), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (506, 515), False, 'import sqlite3\n'), ((1203, 1214), 'time.time', 'time.time', ([], {}), '()\n', (1212, 1214), False, 'import time\n'), ((2409, 2441), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (2422, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 11)'}), '(figsize=(15, 11))\n', (2554, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2597), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2588, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2963, 2984), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(-100)'}), '(bottom=-100)\n', (2971, 2984), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3607), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(890.0)'], {'linestyle': '"""dotted"""', 'linewidth': '(2)', 'color': '"""g"""'}), "(890.0, linestyle='dotted', linewidth=2, color='g')\n", (3556, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3748, 3768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3759, 3768), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4428), 'matplotlib.pyplot.plot', 'plt.plot', (['(bins[0:-2] + 0.5 * bin_width)', 'moving_average[0:-1]', '"""w-"""'], {'label': '"""10-day moving average"""', 'linewidth': '(1.5)'}), "(bins[0:-2] + 0.5 * bin_width, moving_average[0:-1], 'w-', label=\n '10-day moving average', linewidth=1.5)\n", (4319, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4479, 4521), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days since 2017-01-01)"""'], {}), "('Time (days since 2017-01-01)')\n", (4489, 4521), True, 'import matplotlib.pyplot as plt\n'), ((4977, 5039), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(890.0)'], {'linestyle': '"""dotted"""', 'linewidth': '(2)', 'color': '"""g"""'}), "(890.0, linestyle='dotted', linewidth=2, color='g')\n", (4988, 5039), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5205), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5203, 5205), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5619), 'time.time', 'time.time', ([], {}), '()\n', (5617, 5619), False, 'import time\n'), ((6996, 7062), 'requests.get', 'requests.get', (["('https://chainquery.lbry.com/api/sql?query=' + query)"], {}), "('https://chainquery.lbry.com/api/sql?query=' + query)\n", (7008, 7062), False, 'import requests\n'), ((7607, 7622), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (7615, 7622), True, 'import numpy as np\n'), ((7634, 7648), 'numpy.array', 'np.array', (['tips'], {}), '(tips)\n', (7642, 7648), True, 'import numpy as np\n'), ((7662, 7678), 'numpy.array', 'np.array', (['is_tip'], {}), '(is_tip)\n', (7670, 7678), True, 'import numpy as np\n'), ((7691, 7706), 'numpy.array', 'np.array', (['links'], {}), '(links)\n', (7699, 7706), True, 'import numpy as np\n'), ((7721, 7738), 'numpy.array', 'np.array', (['is_nsfw'], {}), '(is_nsfw)\n', (7729, 7738), True, 'import numpy as np\n'), ((10059, 10070), 'time.time', 'time.time', ([], {}), '()\n', (10068, 10070), False, 'import time\n'), ((1511, 1539), 'numpy.sum', 'np.sum', (['(times > now - 3600.0)'], {}), '(times > now - 3600.0)\n', (1517, 1539), True, 'import numpy as np\n'), ((1620, 1649), 'numpy.sum', 'np.sum', (['(times > now - 86400.0)'], {}), '(times > now - 86400.0)\n', (1626, 1649), True, 'import numpy as np\n'), ((1728, 1761), 'numpy.sum', 'np.sum', (['(times > now - 7 * 86400.0)'], {}), '(times > now - 7 * 86400.0)\n', (1734, 1761), True, 'import numpy as np\n'), ((1839, 1873), 'numpy.sum', 'np.sum', (['(times > now - 30 * 86400.0)'], {}), '(times > now - 30 * 86400.0)\n', (1845, 1873), True, 'import numpy as np\n'), ((1944, 1963), 'json.dumps', 'json.dumps', (['my_dict'], {}), '(my_dict)\n', (1954, 1963), False, 'import json\n'), ((3100, 3115), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (3109, 3115), True, 'import numpy as np\n'), ((3160, 3215), 'matplotlib.pyplot.axvline', 'plt.axvline', (['year'], {'color': '"""r"""', 'alpha': '(0.8)', 'linestyle': '"""--"""'}), "(year, color='r', alpha=0.8, linestyle='--')\n", (3171, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3930, 4021), 'matplotlib.pyplot.hist', 'plt.hist', (['days', 'bins'], {'alpha': '(0.9)', 'color': 'color', 'label': '"""Raw"""', 'width': 'bin_width', 'align': '"""mid"""'}), "(days, bins, alpha=0.9, color=color, label='Raw', width=bin_width,\n align='mid')\n", (3938, 4021), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4306), 'numpy.mean', 'np.mean', (['subset'], {}), '(subset)\n', (4298, 4306), True, 'import numpy as np\n'), ((4902, 4957), 'matplotlib.pyplot.axvline', 'plt.axvline', (['year'], {'color': '"""r"""', 'alpha': '(0.8)', 'linestyle': '"""--"""'}), "(year, color='r', alpha=0.8, linestyle='--')\n", (4913, 4957), True, 'import matplotlib.pyplot as plt\n'), ((5448, 5458), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5456, 5458), True, 'import matplotlib.pyplot as plt\n'), ((9586, 9604), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (9596, 9604), False, 'import json\n'), ((9855, 9899), 'subprocess.run', 'subprocess.run', (['"""./upload.sh"""'], {'timeout': '(120.0)'}), "('./upload.sh', timeout=120.0)\n", (9869, 9899), False, 'import subprocess\n'), ((10413, 10430), 'time.sleep', 'time.sleep', (['(530.0)'], {}), '(530.0)\n', (10423, 10430), False, 'import time\n'), ((10446, 10457), 'time.time', 'time.time', ([], {}), '()\n', (10455, 10457), False, 'import time\n'), ((2989, 2998), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2996, 2998), True, 'import matplotlib.pyplot as plt\n'), ((4811, 4820), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4818, 4820), True, 'import matplotlib.pyplot as plt\n'), ((8278, 8294), 'numpy.argmax', 'np.argmax', (['_tips'], {}), '(_tips)\n', (8287, 8294), True, 'import numpy as np\n'), ((9177, 9193), 'numpy.argmax', 'np.argmax', (['_tips'], {}), '(_tips)\n', (9186, 9193), True, 'import numpy as np\n'), ((1122, 1137), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (1130, 1137), True, 'import numpy as np\n'), ((3863, 3875), 'numpy.max', 'np.max', (['days'], {}), '(days)\n', (3869, 3875), True, 'import numpy as np\n'), ((3632, 3641), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3639, 3641), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3386), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3384, 3386), True, 'import matplotlib.pyplot as plt\n'), ((4725, 4736), 'time.time', 'time.time', ([], {}), '()\n', (4734, 4736), False, 'import time\n')]
|
import numpy as np
import scipy.sparse as sp
import datasets
import utils
import argparse
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cora', help='Datasets: cora, email, ssets')
parser.add_argument('--version', default='1', help='version for ssets, default 1 for others')
args = parser.parse_args()
if __name__ == '__main__':
A, labels = datasets.load_graph(args.dataset, args.version)
# dense
if not isinstance(A, np.ndarray):
A = np.array(A.todense())
L = utils.laplacian(A)
N = utils.normalized_laplacian(A)
# sparse
A = sp.csr_matrix(A)
L = sp.csr_matrix(L)
N = sp.csr_matrix(N)
matrices = {
'A': A,
'L': L,
'N': N
}
for matrix_id in matrices:
matrix = matrices[matrix_id]
eig_val, eig_vec = np.linalg.eigh(matrix.todense())
path = f"{args.dataset}/embeddings/{matrix_id}_{args.dataset}_v{args.version}.npy"
np.save(path, eig_vec)
|
[
"numpy.save",
"argparse.ArgumentParser",
"datasets.load_graph",
"scipy.sparse.csr_matrix",
"utils.laplacian",
"utils.normalized_laplacian"
] |
[((113, 138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (136, 138), False, 'import argparse\n'), ((390, 437), 'datasets.load_graph', 'datasets.load_graph', (['args.dataset', 'args.version'], {}), '(args.dataset, args.version)\n', (409, 437), False, 'import datasets\n'), ((531, 549), 'utils.laplacian', 'utils.laplacian', (['A'], {}), '(A)\n', (546, 549), False, 'import utils\n'), ((558, 587), 'utils.normalized_laplacian', 'utils.normalized_laplacian', (['A'], {}), '(A)\n', (584, 587), False, 'import utils\n'), ((610, 626), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['A'], {}), '(A)\n', (623, 626), True, 'import scipy.sparse as sp\n'), ((635, 651), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['L'], {}), '(L)\n', (648, 651), True, 'import scipy.sparse as sp\n'), ((660, 676), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['N'], {}), '(N)\n', (673, 676), True, 'import scipy.sparse as sp\n'), ((976, 998), 'numpy.save', 'np.save', (['path', 'eig_vec'], {}), '(path, eig_vec)\n', (983, 998), True, 'import numpy as np\n')]
|
from scipy.stats import multivariate_normal
from scipy.signal import convolve2d
import matplotlib
try:
matplotlib.pyplot.figure()
matplotlib.pyplot.close()
except Exception:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
# the colormap should assign light colors to low values
TERRAIN_CMAP = 'Greens'
DEFAULT_PATH = '/tmp/mujoco_terrains'
STEP = 0.1
def generate_hills(width, height, nhills):
'''
@param width float, terrain width
@param height float, terrain height
@param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
'''
# setup coordinate grid
xmin, xmax = -width/2.0, width/2.0
ymin, ymax = -height/2.0, height/2.0
x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
# generate hilltops
xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
mu = np.c_[xm.flat, ym.flat]
sigma = float(width*height)/(nhills*8)
for i in range(mu.shape[0]):
mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
# generate hills
sigma = sigma + sigma*np.random.rand(mu.shape[0])
rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
return x, y, hfield
def clear_patch(hfield, box):
''' Clears a patch shaped like box, assuming robot is placed in center of hfield
@param box: rllab.spaces.Box-like
'''
if box.flat_dim > 2:
raise ValueError("Provide 2dim box")
# clear patch
h_center = int(0.5 * hfield.shape[0])
w_center = int(0.5 * hfield.shape[1])
fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
hfield[fromrow:torow, fromcol:tocol] = 0.0
# convolve to smoothen edges somewhat, in case hills were cut off
K = np.ones((10,10)) / 100.0
s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s
return hfield
def _checkpath(path_):
if path_ is None:
path_ = DEFAULT_PATH
if not os.path.exists(path_):
os.makedirs(path_)
return path_
def save_heightfield(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute
of the <asset> element in the env XML where the height field is defined
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) # terrain_cmap is necessary to make sure tops get light color
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
def save_texture(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the
<compiler> element in the env XML
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
# for some reason plt.grid does not work here, so generate gridlines manually
for i in np.arange(xmin,xmax,0.5):
plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1)
for i in np.arange(ymin,ymax,0.5):
plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1)
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
|
[
"scipy.stats.multivariate_normal.rvs",
"scipy.signal.convolve2d",
"os.makedirs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.empty",
"os.path.exists",
"scipy.stats.multivariate_normal",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.use",
"matplotlib.pyplot.contourf",
"numpy.arange",
"numpy.random.rand",
"os.path.join",
"numpy.sqrt"
] |
[((107, 133), 'matplotlib.pyplot.figure', 'matplotlib.pyplot.figure', ([], {}), '()\n', (131, 133), False, 'import matplotlib\n'), ((138, 163), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (161, 163), False, 'import matplotlib\n'), ((789, 813), 'numpy.empty', 'np.empty', (['(x.shape + (2,))'], {}), '(x.shape + (2,))\n', (797, 813), True, 'import numpy as np\n'), ((2071, 2173), 'scipy.signal.convolve2d', 'convolve2d', (['hfield[fromrow - 9:torow + 9, fromcol - 9:tocol + 9]', 'K'], {'mode': '"""same"""', 'boundary': '"""symm"""'}), "(hfield[fromrow - 9:torow + 9, fromcol - 9:tocol + 9], K, mode=\n 'same', boundary='symm')\n", (2081, 2173), False, 'from scipy.signal import convolve2d\n'), ((2711, 2723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2721, 2723), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2779), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', '(-hfield)', '(100)'], {'cmap': 'TERRAIN_CMAP'}), '(x, y, -hfield, 100, cmap=TERRAIN_CMAP)\n', (2740, 2779), True, 'import matplotlib.pyplot as plt\n'), ((2910, 2921), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2919, 2921), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3194), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3192, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3250), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', '(-hfield)', '(100)'], {'cmap': 'TERRAIN_CMAP'}), '(x, y, -hfield, 100, cmap=TERRAIN_CMAP)\n', (3211, 3250), True, 'import matplotlib.pyplot as plt\n'), ((3414, 3440), 'numpy.arange', 'np.arange', (['xmin', 'xmax', '(0.5)'], {}), '(xmin, xmax, 0.5)\n', (3423, 3440), True, 'import numpy as np\n'), ((3510, 3536), 'numpy.arange', 'np.arange', (['ymin', 'ymax', '(0.5)'], {}), '(ymin, ymax, 0.5)\n', (3519, 3536), True, 'import numpy as np\n'), ((3660, 3671), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3669, 3671), True, 'import matplotlib.pyplot as plt\n'), ((186, 207), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (200, 207), False, 'import matplotlib\n'), ((1096, 1142), 'scipy.stats.multivariate_normal.rvs', 'multivariate_normal.rvs', ([], {'mean': 'mu[i]', 'cov': 'sigma'}), '(mean=mu[i], cov=sigma)\n', (1119, 1142), False, 'from scipy.stats import multivariate_normal\n'), ((1235, 1278), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu[i, :]'], {'cov': 'sigma[i]'}), '(mu[i, :], cov=sigma[i])\n', (1254, 1278), False, 'from scipy.stats import multivariate_normal\n'), ((2038, 2055), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (2045, 2055), True, 'import numpy as np\n'), ((2327, 2348), 'os.path.exists', 'os.path.exists', (['path_'], {}), '(path_)\n', (2341, 2348), False, 'import os\n'), ((2358, 2376), 'os.makedirs', 'os.makedirs', (['path_'], {}), '(path_)\n', (2369, 2376), False, 'import os\n'), ((2858, 2883), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (2870, 2883), False, 'import os\n'), ((3448, 3498), 'matplotlib.pyplot.plot', 'plt.plot', (['[i, i]', '[ymin, ymax]', '"""k"""'], {'linewidth': '(0.1)'}), "([i, i], [ymin, ymax], 'k', linewidth=0.1)\n", (3456, 3498), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3594), 'matplotlib.pyplot.plot', 'plt.plot', (['[xmin, xmax]', '[i, i]', '"""k"""'], {'linewidth': '(0.1)'}), "([xmin, xmax], [i, i], 'k', linewidth=0.1)\n", (3552, 3594), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3633), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (3620, 3633), False, 'import os\n'), ((1195, 1222), 'numpy.random.rand', 'np.random.rand', (['mu.shape[0]'], {}), '(mu.shape[0])\n', (1209, 1222), True, 'import numpy as np\n'), ((920, 935), 'numpy.sqrt', 'np.sqrt', (['nhills'], {}), '(nhills)\n', (927, 935), True, 'import numpy as np\n'), ((954, 969), 'numpy.sqrt', 'np.sqrt', (['nhills'], {}), '(nhills)\n', (961, 969), True, 'import numpy as np\n')]
|
from datetime import datetime, timedelta
from pyclarify import APIClient
import orchest
import pandas as pd
import numpy as np
from merlion.utils import TimeSeries
from merlion.models.forecast.prophet import Prophet, ProphetConfig
from merlion.transform.base import Identity
def pipeline_data(times, values, new_id,new_name, original_id, original_name):
labels = {"source":["Orchest pipelines"], "original_id":[original_id]}
var_name = "clfy_"+new_id
data = {
"name" : new_name,
"labels" : labels,
"times" : times,
"series" : values,
"kargs" : {"sourceType" : "prediction",
"data-source": ["Orchest"],
"description" : f"Forecast for {original_name}"
}
}
return {var_name : data }
def generate_future_timestamps(n_future, timestamps, start):
deltas = [x-timestamps[0] for x in timestamps]
avg_delta=np.mean(deltas)
future = [(i+1)*avg_delta+start for i in range(n_future)]
return future
client = APIClient("./clarify-credentials.json")
inputs = orchest.get_inputs()
invars = [x for x in inputs.keys() if x.startswith("read_config_forecast")]
print(invars)
output_dict={}
for name in invars:
item_id = inputs[name]['item_id']
days = inputs[name]['lag_days']
test_lag = inputs[name]['time_split']
future = inputs[name]['future']
data_params = {
"items": {
"include": True,
"filter": {
"id": {
"$in": [
item_id
]
}
}
},
"data": {
"include": True,
"notBefore": (datetime.now() - timedelta(days=days)).astimezone().isoformat()
}
}
response = client.select_items(data_params)
signal_name = list(response.result.items.values())[0].name
print(f"Name {signal_name} and id {item_id}")
times = response.result.data.times
series = response.result.data.series
df = pd.DataFrame(series)
df.index = [time.replace(tzinfo=None) for time in times]
if len(times) > 0:
tzinfo = times[0].tzinfo
test_data = TimeSeries.from_pd(df[-test_lag:])
train_data = TimeSeries.from_pd(df[0:-test_lag])
config = ProphetConfig(max_forecast_steps=test_lag, add_seasonality="auto", transform=Identity())
model = Prophet(config)
model.train(train_data=train_data)
test_times = test_data.time_stamps
if future > 0:
test_times=test_times+generate_future_timestamps(future, test_data.time_stamps, start=test_data.time_stamps[-1])
test_pred, test_err = model.forecast(time_stamps=test_times)
col = test_pred.names[0]
col_err = test_err.names[0]
forecast_name=col+"_pred"
forecast_name_upper=col+"_upper"
forecast_name_lower=col+"_lower"
forecast_values = test_pred.univariates[col].values
forecast_upper_values= [x+y for x,y in zip(test_pred.univariates[col].values, test_err.univariates[col_err].values)]
forecast_lower_values= [x-y for x,y in zip(test_pred.univariates[col].values, test_err.univariates[col_err].values)]
output_dict.update(pipeline_data(test_pred.time_stamps,forecast_values, forecast_name, f"Forecast {signal_name}", col, signal_name ))
output_dict.update(pipeline_data(test_err.time_stamps,forecast_upper_values, forecast_name_upper, f"Forecast {signal_name} upper bound", col, signal_name ))
output_dict.update(pipeline_data(test_err.time_stamps,forecast_lower_values, forecast_name_lower, f"Forecast {signal_name} lower bound", col, signal_name ))
orchest.output(output_dict, "clfy_dict")
|
[
"pandas.DataFrame",
"orchest.output",
"orchest.get_inputs",
"merlion.utils.TimeSeries.from_pd",
"datetime.datetime.now",
"merlion.models.forecast.prophet.Prophet",
"numpy.mean",
"datetime.timedelta",
"merlion.transform.base.Identity",
"pyclarify.APIClient"
] |
[((1072, 1111), 'pyclarify.APIClient', 'APIClient', (['"""./clarify-credentials.json"""'], {}), "('./clarify-credentials.json')\n", (1081, 1111), False, 'from pyclarify import APIClient\n'), ((1122, 1142), 'orchest.get_inputs', 'orchest.get_inputs', ([], {}), '()\n', (1140, 1142), False, 'import orchest\n'), ((3609, 3649), 'orchest.output', 'orchest.output', (['output_dict', '"""clfy_dict"""'], {}), "(output_dict, 'clfy_dict')\n", (3623, 3649), False, 'import orchest\n'), ((965, 980), 'numpy.mean', 'np.mean', (['deltas'], {}), '(deltas)\n', (972, 980), True, 'import numpy as np\n'), ((2010, 2030), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (2022, 2030), True, 'import pandas as pd\n'), ((2169, 2203), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['df[-test_lag:]'], {}), '(df[-test_lag:])\n', (2187, 2203), False, 'from merlion.utils import TimeSeries\n'), ((2221, 2256), 'merlion.utils.TimeSeries.from_pd', 'TimeSeries.from_pd', (['df[0:-test_lag]'], {}), '(df[0:-test_lag])\n', (2239, 2256), False, 'from merlion.utils import TimeSeries\n'), ((2372, 2387), 'merlion.models.forecast.prophet.Prophet', 'Prophet', (['config'], {}), '(config)\n', (2379, 2387), False, 'from merlion.models.forecast.prophet import Prophet, ProphetConfig\n'), ((2347, 2357), 'merlion.transform.base.Identity', 'Identity', ([], {}), '()\n', (2355, 2357), False, 'from merlion.transform.base import Identity\n'), ((1679, 1693), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1691, 1693), False, 'from datetime import datetime, timedelta\n'), ((1696, 1716), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (1705, 1716), False, 'from datetime import datetime, timedelta\n')]
|
import tensorflow as tf
import numpy as np
class RBFolution(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size=(1, 3, 3, 1), padding="VALID", strides=(1, 1, 1, 1), name="RBFolution",
dilation_rate=(1,1),
ccs_initializer=tf.keras.initializers.RandomUniform(0,1),
beta_initilizer=tf.keras.initializers.RandomUniform(0,1)):
super(RBFolution, self).__init__(name=name)
self.padding = padding
self.strides = strides
self.filters = filters
self.kernel_size = kernel_size
self.ccs_initializer = ccs_initializer
self.beta_initilizer = beta_initilizer
self.dilation_rate = dilation_rate
def build(self, input_shape):
self.input_s = input_shape
self.output_s = self.compute_output_shape(input_shape)
patch_dim = np.prod(self.kernel_size[1:])
self.ccs_tensor = self.add_weight("cluster_centers", shape=(patch_dim, self.filters), dtype="float32", initializer=self.ccs_initializer)
self.beta = self.add_weight("beta", shape=[self.filters], dtype="float32", initializer=self.beta_initilizer)
def call(self, input, **kwargs):
return tf.reshape(self.__rbfolution(input), self.output_s)
def compute_output_shape(self, input_shape):
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = RBFolution.conv_output_length(
space[i],
self.kernel_size[1:-1][i],
padding=self.padding.lower(),
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim.value)
return (-1,) + tuple(new_space) + (self.filters,)
def __rbfolution(self, inputs):
batch_size = tf.shape(inputs)[0]
patch_dim = np.prod(self.kernel_size[1:])
# Patches extracted from the images (convolution-like).
# shape=[batch_size, new_height, new_width, patch_dim] (i. e. individual
# patches are flattened)
# tf.extract_image_patches "Only supports ksizes across space" -> we change
# kernel_size[3] to 1.
patches = tf.extract_image_patches(
inputs,
ksizes=list(self.kernel_size[:3]) + [1],
strides=self.strides,
rates=[1, 1, 1, 1],
padding=self.padding
)
patches_shape = tf.shape(patches)
new_height = patches_shape[1]
new_width = patches_shape[2]
# shape=[batch_size, num_patches, patch_dim]
reshaped_patches = tf.reshape(patches, [batch_size, -1, patch_dim])
# all_scores[i,j,k] = sum_{l=0}^{patch_dim-1} (
# (ccs_tensor[l,k] - reshaped_patches[i,j,l]) ** 2
# )
# shape=[batch_size, num_patches, filters]
all_scores = (
tf.reduce_sum(tf.square(reshaped_patches), 2, keepdims=True) -
2 * tf.einsum("aij,jk->aik", reshaped_patches, self.ccs_tensor) +
tf.reduce_sum(tf.square(self.ccs_tensor), 0, keepdims=True)
)
res = tf.reshape(
tf.exp(tf.multiply(-self.beta, all_scores)),
[batch_size, new_height, new_width, self.filters],
name="rbfolution_activation"
)
return res
@staticmethod
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
|
[
"tensorflow.einsum",
"tensorflow.reshape",
"tensorflow.multiply",
"tensorflow.keras.initializers.RandomUniform",
"tensorflow.shape",
"tensorflow.square",
"numpy.prod"
] |
[((273, 314), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', (['(0)', '(1)'], {}), '(0, 1)\n', (308, 314), True, 'import tensorflow as tf\n'), ((348, 389), 'tensorflow.keras.initializers.RandomUniform', 'tf.keras.initializers.RandomUniform', (['(0)', '(1)'], {}), '(0, 1)\n', (383, 389), True, 'import tensorflow as tf\n'), ((866, 895), 'numpy.prod', 'np.prod', (['self.kernel_size[1:]'], {}), '(self.kernel_size[1:])\n', (873, 895), True, 'import numpy as np\n'), ((1863, 1892), 'numpy.prod', 'np.prod', (['self.kernel_size[1:]'], {}), '(self.kernel_size[1:])\n', (1870, 1892), True, 'import numpy as np\n'), ((2438, 2455), 'tensorflow.shape', 'tf.shape', (['patches'], {}), '(patches)\n', (2446, 2455), True, 'import tensorflow as tf\n'), ((2612, 2660), 'tensorflow.reshape', 'tf.reshape', (['patches', '[batch_size, -1, patch_dim]'], {}), '(patches, [batch_size, -1, patch_dim])\n', (2622, 2660), True, 'import tensorflow as tf\n'), ((1823, 1839), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1831, 1839), True, 'import tensorflow as tf\n'), ((3046, 3072), 'tensorflow.square', 'tf.square', (['self.ccs_tensor'], {}), '(self.ccs_tensor)\n', (3055, 3072), True, 'import tensorflow as tf\n'), ((3148, 3183), 'tensorflow.multiply', 'tf.multiply', (['(-self.beta)', 'all_scores'], {}), '(-self.beta, all_scores)\n', (3159, 3183), True, 'import tensorflow as tf\n'), ((2893, 2920), 'tensorflow.square', 'tf.square', (['reshaped_patches'], {}), '(reshaped_patches)\n', (2902, 2920), True, 'import tensorflow as tf\n'), ((2958, 3017), 'tensorflow.einsum', 'tf.einsum', (['"""aij,jk->aik"""', 'reshaped_patches', 'self.ccs_tensor'], {}), "('aij,jk->aik', reshaped_patches, self.ccs_tensor)\n", (2967, 3017), True, 'import tensorflow as tf\n')]
|
'''
Action policy methods to sampling actions
Algorithm provides a `calc_pdparam` which takes a state and do a forward pass through its net,
and the pdparam is used to construct an action probability distribution as appropriate per the action type as indicated by the body
Then the prob. dist. is used to sample action.
The default form looks like:
```
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
```
We can also augment pdparam before sampling - as in the case of Boltzmann sampling,
or do epsilon-greedy to use pdparam-sampling or random sampling.
'''
from slm_lab.env.wrapper import LazyFrames
from slm_lab.lib import logger, math_util, util
from torch import distributions
import numpy as np
import pydash as ps
import torch
logger = logger.get_logger(__name__)
# probability distributions constraints for different action types; the first in the list is the default
ACTION_PDS = {
'continuous': ['Normal', 'Beta', 'Gumbel', 'LogNormal'],
'multi_continuous': ['MultivariateNormal'],
'discrete': ['Categorical', 'Argmax'],
'multi_discrete': ['MultiCategorical'],
'multi_binary': ['Bernoulli'],
}
class Argmax(distributions.Categorical):
'''
Special distribution class for argmax sampling, where probability is always 1 for the argmax.
NOTE although argmax is not a sampling distribution, this implementation is for API consistency.
'''
def __init__(self, probs=None, logits=None, validate_args=None):
if probs is not None:
new_probs = torch.zeros_like(probs, dtype=torch.float)
new_probs[torch.argmax(probs, dim=0)] = 1.0
probs = new_probs
elif logits is not None:
new_logits = torch.full_like(logits, -1e8, dtype=torch.float)
max_idx = torch.argmax(logits, dim=0)
new_logits[max_idx] = logits[max_idx]
logits = new_logits
super(Argmax, self).__init__(probs=probs, logits=logits, validate_args=validate_args)
class MultiCategorical(distributions.Categorical):
'''MultiCategorical as collection of Categoricals'''
def __init__(self, probs=None, logits=None, validate_args=None):
self.categoricals = []
if probs is None:
probs = [None] * len(logits)
elif logits is None:
logits = [None] * len(probs)
else:
raise ValueError('Either probs or logits must be None')
for sub_probs, sub_logits in zip(probs, logits):
categorical = distributions.Categorical(probs=sub_probs, logits=sub_logits, validate_args=validate_args)
self.categoricals.append(categorical)
@property
def logits(self):
return [cat.logits for cat in self.categoricals]
@property
def probs(self):
return [cat.probs for cat in self.categoricals]
@property
def param_shape(self):
return [cat.param_shape for cat in self.categoricals]
@property
def mean(self):
return torch.stack([cat.mean for cat in self.categoricals])
@property
def variance(self):
return torch.stack([cat.variance for cat in self.categoricals])
def sample(self, sample_shape=torch.Size()):
return torch.stack([cat.sample(sample_shape=sample_shape) for cat in self.categoricals])
def log_prob(self, value):
return torch.stack([cat.log_prob(value[idx]) for idx, cat in enumerate(self.categoricals)])
def entropy(self):
return torch.stack([cat.entropy() for cat in self.categoricals])
def enumerate_support(self):
return [cat.enumerate_support() for cat in self.categoricals]
setattr(distributions, 'Argmax', Argmax)
setattr(distributions, 'MultiCategorical', MultiCategorical)
# base methods
def try_preprocess(state, algorithm, body, append=True):
'''Try calling preprocess as implemented in body's memory to use for net input'''
if isinstance(state, LazyFrames):
state = state.__array__() # from global env preprocessor
if hasattr(body.memory, 'preprocess_state'):
state = body.memory.preprocess_state(state, append=append)
# as float, and always as minibatch for net input
state = torch.from_numpy(state).float().unsqueeze(dim=0)
return state
def cond_squeeze(out):
'''Helper to squeeze output depending if it is tensor (discrete pdparam) or list of tensors (continuous pdparam of loc and scale)'''
if isinstance(out, list):
return [out_t.squeeze(dim=0) for out_t in out]
else:
return out.squeeze(dim=0)
def init_action_pd(state, algorithm, body, append=True):
'''
Build the proper action prob. dist. to use for action sampling.
state is passed through algorithm's net via calc_pdparam, which the algorithm must implement using its proper net.
This will return body, ActionPD and pdparam to allow augmentation, e.g. applying temperature tau to pdparam for boltzmann.
Then, output must be called with sample_action_pd(body, ActionPD, pdparam) to sample action.
@returns {cls, tensor, *} ActionPD, pdparam, body
'''
pdtypes = ACTION_PDS[body.action_type]
assert body.action_pdtype in pdtypes, f'Pdtype {body.action_pdtype} is not compatible/supported with action_type {body.action_type}. Options are: {ACTION_PDS[body.action_type]}'
ActionPD = getattr(distributions, body.action_pdtype)
state = try_preprocess(state, algorithm, body, append=append)
state = state.to(algorithm.net.device)
pdparam = algorithm.calc_pdparam(state, evaluate=False)
return ActionPD, pdparam, body
def sample_action_pd(ActionPD, pdparam, body):
'''
This uses the outputs from init_action_pd and an optionally augmented pdparam to construct a action_pd for sampling action
@returns {tensor, distribution} action, action_pd A sampled action, and the prob. dist. used for sampling to enable calculations like kl, entropy, etc. later.
'''
pdparam = cond_squeeze(pdparam)
if body.is_discrete:
action_pd = ActionPD(logits=pdparam)
else: # continuous outputs a list, loc and scale
assert len(pdparam) == 2, pdparam
# scale (stdev) must be >0, use softplus
if pdparam[1] < 5:
pdparam[1] = torch.log(1 + torch.exp(pdparam[1])) + 1e-8
action_pd = ActionPD(*pdparam)
action = action_pd.sample()
return action, action_pd
# interface action sampling methods
def default(state, algorithm, body):
'''Plain policy by direct sampling using outputs of net as logits and constructing ActionPD as appropriate'''
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
return action, action_pd
def random(state, algorithm, body):
'''Random action sampling that returns the same data format as default(), but without forward pass. Uses gym.space.sample()'''
state = try_preprocess(state, algorithm, body, append=True) # for consistency with init_action_pd inner logic
if body.action_type == 'discrete':
action_pd = distributions.Categorical(logits=torch.ones(body.action_space.high, device=algorithm.net.device))
elif body.action_type == 'continuous':
# Possibly this should this have a 'device' set
action_pd = distributions.Uniform(
low=torch.tensor(body.action_space.low).float(),
high=torch.tensor(body.action_space.high).float())
elif body.action_type == 'multi_discrete':
action_pd = distributions.Categorical(
logits=torch.ones(body.action_space.high.size, body.action_space.high[0], device=algorithm.net.device))
elif body.action_type == 'multi_continuous':
raise NotImplementedError
elif body.action_type == 'multi_binary':
raise NotImplementedError
else:
raise NotImplementedError
sample = body.action_space.sample()
action = torch.tensor(sample, device=algorithm.net.device)
return action, action_pd
def epsilon_greedy(state, algorithm, body):
'''Epsilon-greedy policy: with probability epsilon, do random action, otherwise do default sampling.'''
epsilon = body.explore_var
if epsilon > np.random.rand():
return random(state, algorithm, body)
else:
return default(state, algorithm, body)
def boltzmann(state, algorithm, body):
'''
Boltzmann policy: adjust pdparam with temperature tau; the higher the more randomness/noise in action.
'''
tau = body.explore_var
ActionPD, pdparam, body = init_action_pd(state, algorithm, body)
pdparam /= tau
action, action_pd = sample_action_pd(ActionPD, pdparam, body)
return action, action_pd
# multi-body policy with a single forward pass to calc pdparam
def multi_default(states, algorithm, body_list, pdparam):
'''
Apply default policy body-wise
Note, for efficiency, do a single forward pass to calculate pdparam, then call this policy like:
@example
pdparam = self.calc_pdparam(state, evaluate=False)
action_a, action_pd_a = self.action_policy(pdparam, self, body_list)
'''
pdparam = pdparam.squeeze(dim=0)
# assert pdparam has been chunked
assert len(pdparam.shape) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_random(states, algorithm, body_list, pdparam):
'''Apply random policy body-wise.'''
pdparam = pdparam.squeeze(dim=0)
action_list, action_pd_a = [], []
for idx, body in body_list:
action, action_pd = random(states[idx], algorithm, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_epsilon_greedy(states, algorithm, body_list, pdparam):
'''Apply epsilon-greedy policy body-wise'''
assert len(pdparam) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
epsilon = body.explore_var
if epsilon > np.random.rand():
action, action_pd = random(states[idx], algorithm, body)
else:
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
def multi_boltzmann(states, algorithm, body_list, pdparam):
'''Apply Boltzmann policy body-wise'''
assert len(pdparam) > 1 and len(pdparam) == len(body_list), f'pdparam shape: {pdparam.shape}, bodies: {len(body_list)}'
action_list, action_pd_a = [], []
for idx, sub_pdparam in enumerate(pdparam):
body = body_list[idx]
try_preprocess(states[idx], algorithm, body, append=True) # for consistency with init_action_pd inner logic
tau = body.explore_var
sub_pdparam /= tau
ActionPD = getattr(distributions, body.action_pdtype)
action, action_pd = sample_action_pd(ActionPD, sub_pdparam, body)
action_list.append(action)
action_pd_a.append(action_pd)
action_a = torch.tensor(action_list, device=algorithm.net.device).unsqueeze(dim=1)
return action_a, action_pd_a
# action policy update methods
class VarScheduler:
'''
Variable scheduler for decaying variables such as explore_var (epsilon, tau) and entropy
e.g. spec
"explore_var_spec": {
"name": "linear_decay",
"start_val": 1.0,
"end_val": 0.1,
"start_step": 0,
"end_step": 800,
},
'''
def __init__(self, var_decay_spec=None):
self._updater_name = 'no_decay' if var_decay_spec is None else var_decay_spec['name']
self._updater = getattr(math_util, self._updater_name)
util.set_attr(self, dict(
start_val=np.nan,
))
util.set_attr(self, var_decay_spec, [
'start_val',
'end_val',
'start_step',
'end_step',
])
if not getattr(self, 'end_val', None):
self.end_val = self.start_val
def update(self, algorithm, clock):
'''Get an updated value for var'''
if (util.in_eval_lab_modes()) or self._updater_name == 'no_decay':
return self.end_val
step = clock.get(clock.max_tick_unit)
val = self._updater(self.start_val, self.end_val, self.start_step, self.end_step, step)
return val
# misc calc methods
def guard_multi_pdparams(pdparams, body):
'''Guard pdparams for multi action'''
action_dim = body.action_dim
is_multi_action = ps.is_iterable(action_dim)
if is_multi_action:
assert ps.is_list(pdparams)
pdparams = [t.clone() for t in pdparams] # clone for grad safety
assert len(pdparams) == len(action_dim), pdparams
# transpose into (batch_size, [action_dims])
pdparams = [list(torch.split(t, action_dim, dim=0)) for t in torch.cat(pdparams, dim=1)]
return pdparams
def calc_log_probs(algorithm, net, body, batch):
'''
Method to calculate log_probs fresh from batch data
Body already stores log_prob from self.net. This is used for PPO where log_probs needs to be recalculated.
'''
states, actions = batch['states'], batch['actions']
action_dim = body.action_dim
is_multi_action = ps.is_iterable(action_dim)
# construct log_probs for each state-action
pdparams = algorithm.calc_pdparam(states, net=net)
pdparams = guard_multi_pdparams(pdparams, body)
assert len(pdparams) == len(states), f'batch_size of pdparams: {len(pdparams)} vs states: {len(states)}'
pdtypes = ACTION_PDS[body.action_type]
ActionPD = getattr(distributions, body.action_pdtype)
log_probs = []
for idx, pdparam in enumerate(pdparams):
if not is_multi_action: # already cloned for multi_action above
pdparam = pdparam.clone() # clone for grad safety
_action, action_pd = sample_action_pd(ActionPD, pdparam, body)
log_probs.append(action_pd.log_prob(actions[idx].float()).sum(dim=0))
log_probs = torch.stack(log_probs)
assert not torch.isnan(log_probs).any(), f'log_probs: {log_probs}, \npdparams: {pdparams} \nactions: {actions}'
logger.debug(f'log_probs: {log_probs}')
return log_probs
def update_online_stats(body, state):
'''
Method to calculate the running mean and standard deviation of the state space.
See https://www.johndcook.com/blog/standard_deviation/ for more details
for n >= 1
M_n = M_n-1 + (state - M_n-1) / n
S_n = S_n-1 + (state - M_n-1) * (state - M_n)
variance = S_n / (n - 1)
std_dev = sqrt(variance)
'''
logger.debug(f'mean: {body.state_mean}, std: {body.state_std_dev}, num examples: {body.state_n}')
# Assumes only one state is given
if ('Atari' in util.get_class_name(body.memory)):
assert state.ndim == 3
elif getattr(body.memory, 'raw_state_dim', False):
assert state.size == body.memory.raw_state_dim
else:
assert state.size == body.state_dim or state.shape == body.state_dim
mean = body.state_mean
body.state_n += 1
if np.isnan(mean).any():
assert np.isnan(body.state_std_dev_int)
assert np.isnan(body.state_std_dev)
body.state_mean = state
body.state_std_dev_int = 0
body.state_std_dev = 0
else:
assert body.state_n > 1
body.state_mean = mean + (state - mean) / body.state_n
body.state_std_dev_int = body.state_std_dev_int + (state - mean) * (state - body.state_mean)
body.state_std_dev = np.sqrt(body.state_std_dev_int / (body.state_n - 1))
# Guard against very small std devs
if (body.state_std_dev < 1e-8).any():
body.state_std_dev[np.where(body.state_std_dev < 1e-8)] += 1e-8
logger.debug(f'new mean: {body.state_mean}, new std: {body.state_std_dev}, num examples: {body.state_n}')
def normalize_state(body, state):
'''
Normalizes one or more states using a running mean and standard deviation
Details of the normalization from Deep RL Bootcamp, L6
https://www.youtube.com/watch?v=8EcdaCk9KaQ&feature=youtu.be
'''
same_shape = False if type(state) == list else state.shape == body.state_mean.shape
has_preprocess = getattr(body.memory, 'preprocess_state', False)
if ('Atari' in util.get_class_name(body.memory)):
# never normalize atari, it has its own normalization step
logger.debug('skipping normalizing for Atari, already handled by preprocess')
return state
elif ('Replay' in util.get_class_name(body.memory)) and has_preprocess:
# normalization handled by preprocess_state function in the memory
logger.debug('skipping normalizing, already handled by preprocess')
return state
elif same_shape:
# if not atari, always normalize the state the first time we see it during act
# if the shape is not transformed in some way
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
else:
# broadcastable sample from an un-normalized memory so we should normalize
logger.debug('normalizing sample from memory')
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
# TODO Not currently used, this will crash for more exotic memory structures
# def unnormalize_state(body, state):
# '''
# Un-normalizes one or more states using a running mean and new_std_dev
# '''
# return state * body.state_mean + body.state_std_dev
def update_online_stats_and_normalize_state(body, state):
'''
Convenience combination function for updating running state mean and std_dev and normalizing the state in one go.
'''
logger.debug(f'state: {state}')
update_online_stats(body, state)
state = normalize_state(body, state)
logger.debug(f'normalized state: {state}')
return state
def normalize_states_and_next_states(body, batch, episodic_flag=None):
'''
Convenience function for normalizing the states and next states in a batch of data
'''
logger.debug(f'states: {batch["states"]}')
logger.debug(f'next states: {batch["next_states"]}')
episodic = episodic_flag if episodic_flag is not None else body.memory.is_episodic
logger.debug(f'Episodic: {episodic}, episodic_flag: {episodic_flag}, body.memory: {body.memory.is_episodic}')
if episodic:
normalized = []
for epi in batch['states']:
normalized.append(normalize_state(body, epi))
batch['states'] = normalized
normalized = []
for epi in batch['next_states']:
normalized.append(normalize_state(body, epi))
batch['next_states'] = normalized
else:
batch['states'] = normalize_state(body, batch['states'])
batch['next_states'] = normalize_state(body, batch['next_states'])
logger.debug(f'normalized states: {batch["states"]}')
logger.debug(f'normalized next states: {batch["next_states"]}')
return batch
|
[
"slm_lab.lib.util.get_class_name",
"torch.distributions.Categorical",
"slm_lab.lib.logger.debug",
"numpy.sum",
"torch.argmax",
"torch.cat",
"numpy.isnan",
"numpy.clip",
"slm_lab.lib.util.set_attr",
"torch.isnan",
"torch.ones",
"pydash.is_list",
"slm_lab.lib.logger.get_logger",
"torch.exp",
"torch.zeros_like",
"torch.split",
"torch.Size",
"torch.from_numpy",
"torch.full_like",
"torch.stack",
"slm_lab.lib.util.in_eval_lab_modes",
"pydash.is_iterable",
"numpy.where",
"numpy.random.rand",
"torch.tensor",
"numpy.sqrt"
] |
[((824, 851), 'slm_lab.lib.logger.get_logger', 'logger.get_logger', (['__name__'], {}), '(__name__)\n', (841, 851), False, 'from slm_lab.lib import logger, math_util, util\n'), ((7960, 8009), 'torch.tensor', 'torch.tensor', (['sample'], {'device': 'algorithm.net.device'}), '(sample, device=algorithm.net.device)\n', (7972, 8009), False, 'import torch\n'), ((13584, 13610), 'pydash.is_iterable', 'ps.is_iterable', (['action_dim'], {}), '(action_dim)\n', (13598, 13610), True, 'import pydash as ps\n'), ((14318, 14344), 'pydash.is_iterable', 'ps.is_iterable', (['action_dim'], {}), '(action_dim)\n', (14332, 14344), True, 'import pydash as ps\n'), ((15078, 15100), 'torch.stack', 'torch.stack', (['log_probs'], {}), '(log_probs)\n', (15089, 15100), False, 'import torch\n'), ((15221, 15260), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""log_probs: {log_probs}"""'], {}), "(f'log_probs: {log_probs}')\n", (15233, 15260), False, 'from slm_lab.lib import logger, math_util, util\n'), ((15679, 15786), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""mean: {body.state_mean}, std: {body.state_std_dev}, num examples: {body.state_n}"""'], {}), "(\n f'mean: {body.state_mean}, std: {body.state_std_dev}, num examples: {body.state_n}'\n )\n", (15691, 15786), False, 'from slm_lab.lib import logger, math_util, util\n'), ((16823, 16938), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""new mean: {body.state_mean}, new std: {body.state_std_dev}, num examples: {body.state_n}"""'], {}), "(\n f'new mean: {body.state_mean}, new std: {body.state_std_dev}, num examples: {body.state_n}'\n )\n", (16835, 16938), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19001, 19032), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""state: {state}"""'], {}), "(f'state: {state}')\n", (19013, 19032), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19115, 19157), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""normalized state: {state}"""'], {}), "(f'normalized state: {state}')\n", (19127, 19157), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19355, 19397), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""states: {batch[\'states\']}"""'], {}), '(f"states: {batch[\'states\']}")\n', (19367, 19397), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19402, 19454), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""next states: {batch[\'next_states\']}"""'], {}), '(f"next states: {batch[\'next_states\']}")\n', (19414, 19454), False, 'from slm_lab.lib import logger, math_util, util\n'), ((19546, 19665), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""Episodic: {episodic}, episodic_flag: {episodic_flag}, body.memory: {body.memory.is_episodic}"""'], {}), "(\n f'Episodic: {episodic}, episodic_flag: {episodic_flag}, body.memory: {body.memory.is_episodic}'\n )\n", (19558, 19665), False, 'from slm_lab.lib import logger, math_util, util\n'), ((20147, 20200), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""normalized states: {batch[\'states\']}"""'], {}), '(f"normalized states: {batch[\'states\']}")\n', (20159, 20200), False, 'from slm_lab.lib import logger, math_util, util\n'), ((20205, 20268), 'slm_lab.lib.logger.debug', 'logger.debug', (['f"""normalized next states: {batch[\'next_states\']}"""'], {}), '(f"normalized next states: {batch[\'next_states\']}")\n', (20217, 20268), False, 'from slm_lab.lib import logger, math_util, util\n'), ((3047, 3099), 'torch.stack', 'torch.stack', (['[cat.mean for cat in self.categoricals]'], {}), '([cat.mean for cat in self.categoricals])\n', (3058, 3099), False, 'import torch\n'), ((3154, 3210), 'torch.stack', 'torch.stack', (['[cat.variance for cat in self.categoricals]'], {}), '([cat.variance for cat in self.categoricals])\n', (3165, 3210), False, 'import torch\n'), ((3246, 3258), 'torch.Size', 'torch.Size', ([], {}), '()\n', (3256, 3258), False, 'import torch\n'), ((8241, 8257), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8255, 8257), True, 'import numpy as np\n'), ((12834, 12925), 'slm_lab.lib.util.set_attr', 'util.set_attr', (['self', 'var_decay_spec', "['start_val', 'end_val', 'start_step', 'end_step']"], {}), "(self, var_decay_spec, ['start_val', 'end_val', 'start_step',\n 'end_step'])\n", (12847, 12925), False, 'from slm_lab.lib import logger, math_util, util\n'), ((13650, 13670), 'pydash.is_list', 'ps.is_list', (['pdparams'], {}), '(pdparams)\n', (13660, 13670), True, 'import pydash as ps\n'), ((15834, 15866), 'slm_lab.lib.util.get_class_name', 'util.get_class_name', (['body.memory'], {}), '(body.memory)\n', (15853, 15866), False, 'from slm_lab.lib import logger, math_util, util\n'), ((16190, 16222), 'numpy.isnan', 'np.isnan', (['body.state_std_dev_int'], {}), '(body.state_std_dev_int)\n', (16198, 16222), True, 'import numpy as np\n'), ((16238, 16266), 'numpy.isnan', 'np.isnan', (['body.state_std_dev'], {}), '(body.state_std_dev)\n', (16246, 16266), True, 'import numpy as np\n'), ((16600, 16652), 'numpy.sqrt', 'np.sqrt', (['(body.state_std_dev_int / (body.state_n - 1))'], {}), '(body.state_std_dev_int / (body.state_n - 1))\n', (16607, 16652), True, 'import numpy as np\n'), ((17359, 17391), 'slm_lab.lib.util.get_class_name', 'util.get_class_name', (['body.memory'], {}), '(body.memory)\n', (17378, 17391), False, 'from slm_lab.lib import logger, math_util, util\n'), ((17469, 17546), 'slm_lab.lib.logger.debug', 'logger.debug', (['"""skipping normalizing for Atari, already handled by preprocess"""'], {}), "('skipping normalizing for Atari, already handled by preprocess')\n", (17481, 17546), False, 'from slm_lab.lib import logger, math_util, util\n'), ((1589, 1631), 'torch.zeros_like', 'torch.zeros_like', (['probs'], {'dtype': 'torch.float'}), '(probs, dtype=torch.float)\n', (1605, 1631), False, 'import torch\n'), ((2566, 2661), 'torch.distributions.Categorical', 'distributions.Categorical', ([], {'probs': 'sub_probs', 'logits': 'sub_logits', 'validate_args': 'validate_args'}), '(probs=sub_probs, logits=sub_logits, validate_args\n =validate_args)\n', (2591, 2661), False, 'from torch import distributions\n'), ((9816, 9870), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (9828, 9870), False, 'import torch\n'), ((10281, 10335), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (10293, 10335), False, 'import torch\n'), ((10797, 10813), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10811, 10813), True, 'import numpy as np\n'), ((11251, 11305), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (11263, 11305), False, 'import torch\n'), ((12100, 12154), 'torch.tensor', 'torch.tensor', (['action_list'], {'device': 'algorithm.net.device'}), '(action_list, device=algorithm.net.device)\n', (12112, 12154), False, 'import torch\n'), ((13166, 13190), 'slm_lab.lib.util.in_eval_lab_modes', 'util.in_eval_lab_modes', ([], {}), '()\n', (13188, 13190), False, 'from slm_lab.lib import logger, math_util, util\n'), ((16153, 16167), 'numpy.isnan', 'np.isnan', (['mean'], {}), '(mean)\n', (16161, 16167), True, 'import numpy as np\n'), ((17727, 17794), 'slm_lab.lib.logger.debug', 'logger.debug', (['"""skipping normalizing, already handled by preprocess"""'], {}), "('skipping normalizing, already handled by preprocess')\n", (17739, 17794), False, 'from slm_lab.lib import logger, math_util, util\n'), ((1654, 1680), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(0)'}), '(probs, dim=0)\n', (1666, 1680), False, 'import torch\n'), ((1776, 1832), 'torch.full_like', 'torch.full_like', (['logits', '(-100000000.0)'], {'dtype': 'torch.float'}), '(logits, -100000000.0, dtype=torch.float)\n', (1791, 1832), False, 'import torch\n'), ((1847, 1874), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(0)'}), '(logits, dim=0)\n', (1859, 1874), False, 'import torch\n'), ((7160, 7223), 'torch.ones', 'torch.ones', (['body.action_space.high'], {'device': 'algorithm.net.device'}), '(body.action_space.high, device=algorithm.net.device)\n', (7170, 7223), False, 'import torch\n'), ((13881, 13914), 'torch.split', 'torch.split', (['t', 'action_dim'], {'dim': '(0)'}), '(t, action_dim, dim=0)\n', (13892, 13914), False, 'import torch\n'), ((13925, 13951), 'torch.cat', 'torch.cat', (['pdparams'], {'dim': '(1)'}), '(pdparams, dim=1)\n', (13934, 13951), False, 'import torch\n'), ((15116, 15138), 'torch.isnan', 'torch.isnan', (['log_probs'], {}), '(log_probs)\n', (15127, 15138), False, 'import torch\n'), ((16774, 16810), 'numpy.where', 'np.where', (['(body.state_std_dev < 1e-08)'], {}), '(body.state_std_dev < 1e-08)\n', (16782, 16810), True, 'import numpy as np\n'), ((17590, 17622), 'slm_lab.lib.util.get_class_name', 'util.get_class_name', (['body.memory'], {}), '(body.memory)\n', (17609, 17622), False, 'from slm_lab.lib import logger, math_util, util\n'), ((18282, 18328), 'slm_lab.lib.logger.debug', 'logger.debug', (['"""normalizing sample from memory"""'], {}), "('normalizing sample from memory')\n", (18294, 18328), False, 'from slm_lab.lib import logger, math_util, util\n'), ((4242, 4265), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (4258, 4265), False, 'import torch\n'), ((17989, 18015), 'numpy.sum', 'np.sum', (['body.state_std_dev'], {}), '(body.state_std_dev)\n', (17995, 18015), True, 'import numpy as np\n'), ((18041, 18082), 'numpy.clip', 'np.clip', (['(state - body.state_mean)', '(-10)', '(10)'], {}), '(state - body.state_mean, -10, 10)\n', (18048, 18082), True, 'import numpy as np\n'), ((18116, 18180), 'numpy.clip', 'np.clip', (['((state - body.state_mean) / body.state_std_dev)', '(-10)', '(10)'], {}), '((state - body.state_mean) / body.state_std_dev, -10, 10)\n', (18123, 18180), True, 'import numpy as np\n'), ((18340, 18366), 'numpy.sum', 'np.sum', (['body.state_std_dev'], {}), '(body.state_std_dev)\n', (18346, 18366), True, 'import numpy as np\n'), ((18392, 18433), 'numpy.clip', 'np.clip', (['(state - body.state_mean)', '(-10)', '(10)'], {}), '(state - body.state_mean, -10, 10)\n', (18399, 18433), True, 'import numpy as np\n'), ((18467, 18531), 'numpy.clip', 'np.clip', (['((state - body.state_mean) / body.state_std_dev)', '(-10)', '(10)'], {}), '((state - body.state_mean) / body.state_std_dev, -10, 10)\n', (18474, 18531), True, 'import numpy as np\n'), ((6299, 6320), 'torch.exp', 'torch.exp', (['pdparam[1]'], {}), '(pdparam[1])\n', (6308, 6320), False, 'import torch\n'), ((7604, 7704), 'torch.ones', 'torch.ones', (['body.action_space.high.size', 'body.action_space.high[0]'], {'device': 'algorithm.net.device'}), '(body.action_space.high.size, body.action_space.high[0], device=\n algorithm.net.device)\n', (7614, 7704), False, 'import torch\n'), ((7383, 7418), 'torch.tensor', 'torch.tensor', (['body.action_space.low'], {}), '(body.action_space.low)\n', (7395, 7418), False, 'import torch\n'), ((7445, 7481), 'torch.tensor', 'torch.tensor', (['body.action_space.high'], {}), '(body.action_space.high)\n', (7457, 7481), False, 'import torch\n')]
|
"""Example case for particle travel times in a straight channel."""
import numpy as np
import matplotlib.pyplot as plt
import dorado.particle_track as pt
# fix the random seed so it stays the same as weights change
np.random.seed(1)
# create synthetic domain and flow field
domain = np.zeros((100, 50))
depth = np.zeros_like(domain)
stage = np.zeros_like(domain)
u = np.zeros_like(domain)
v = np.zeros_like(domain)
dx = 50.
Np_tracer = 500
seed_xloc = [10]
seed_yloc = [25]
# set up straight channel
depth[:, 10:40] = 1.0
stage[:, 10:40] = 1.0
v[:, 10:40] = -10.0
# choose number of iterations for particle to route
num_iter = 100
# define your 'known' or 'expected' travel time for this simple geometry
# picking expected time from location x=10 to x=70
# (really the boundary of row 70, so 1/2 a cell)
# 59.5 cells * 50 m/cell / 10 m/s = 297.5 seconds
target_row = 70
expected_time = 297.5
# assign particle parameters
params = pt.modelParams()
params.depth = depth
params.stage = stage
params.u = u
params.v = v
params.dx = dx
# set-up figure
plt.figure()
plt.imshow(np.sqrt(u**2 + v**2))
plt.colorbar()
plt.scatter(seed_yloc, seed_xloc, c='k', marker='o', s=5)
# plot the target line where time is measured
plt.plot(np.linspace(0, 50, 100), np.ones(100)*target_row, c='red')
plt.title('Velocity Field')
plt.legend(labels=['Target Row to Measure Times',
'Particle Seeding Location'],
loc='best')
plt.tight_layout()
plt.show()
# do the routing twice, once without any diffusivity added to the travel times
# (diff_coeff==0) then a second time with significant diffusion (diff_coeff==1)
for dc in list(range(0, 2)):
# set diff_coeff
if dc == 0:
params.diff_coeff = 0.0
else:
params.diff_coeff = 1.0
# make particle
particle = pt.Particles(params)
# walk it
particle.generate_particles(Np_tracer, seed_xloc, seed_yloc)
for i in list(range(0, num_iter)):
walk_data = particle.run_iteration()
# get travel times associated with particles when they are at coord x=70
# use the exposure_time function to measure this
roi = np.zeros_like(depth, dtype='int')
roi[0:target_row, :] = 1
target_times = pt.exposure_time(walk_data, roi)
# plot histogram
plt.subplot(1, 2, dc+1)
n, bins, _ = plt.hist(target_times, bins=100, range=(200, 400),
histtype='bar', density=True,
color=[0.5, 0.5, 1, 0.5])
# plot expected travel time to row 70
plt.scatter(expected_time, np.max(n),
s=75, c='green', marker='x', linewidths=20)
plt.legend(['Expected Travel Time',
'Histogram of Final Travel Times'], ncol=2,
loc='upper left', bbox_to_anchor=(0.0, -0.06), fontsize=16)
plt.title('Travel Time Distribution at Target Row \n'
'Diffusion Coefficient : ' + str(params.diff_coeff), fontsize=20)
plt.xlabel('Travel Time at Target Row [s]', fontsize=16)
plt.ylabel('Probability Density', fontsize=16)
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.random.seed",
"dorado.particle_track.Particles",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.zeros_like",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"dorado.particle_track.exposure_time",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"dorado.particle_track.modelParams",
"numpy.sqrt"
] |
[((216, 233), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (230, 233), True, 'import numpy as np\n'), ((285, 304), 'numpy.zeros', 'np.zeros', (['(100, 50)'], {}), '((100, 50))\n', (293, 304), True, 'import numpy as np\n'), ((313, 334), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (326, 334), True, 'import numpy as np\n'), ((343, 364), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (356, 364), True, 'import numpy as np\n'), ((369, 390), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (382, 390), True, 'import numpy as np\n'), ((395, 416), 'numpy.zeros_like', 'np.zeros_like', (['domain'], {}), '(domain)\n', (408, 416), True, 'import numpy as np\n'), ((936, 952), 'dorado.particle_track.modelParams', 'pt.modelParams', ([], {}), '()\n', (950, 952), True, 'import dorado.particle_track as pt\n'), ((1053, 1065), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1063, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1113), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1111, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1171), 'matplotlib.pyplot.scatter', 'plt.scatter', (['seed_yloc', 'seed_xloc'], {'c': '"""k"""', 'marker': '"""o"""', 's': '(5)'}), "(seed_yloc, seed_xloc, c='k', marker='o', s=5)\n", (1125, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1313), 'matplotlib.pyplot.title', 'plt.title', (['"""Velocity Field"""'], {}), "('Velocity Field')\n", (1295, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1409), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['Target Row to Measure Times', 'Particle Seeding Location']", 'loc': '"""best"""'}), "(labels=['Target Row to Measure Times',\n 'Particle Seeding Location'], loc='best')\n", (1324, 1409), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1454), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1452, 1454), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1463, 1465), True, 'import matplotlib.pyplot as plt\n'), ((3042, 3052), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3050, 3052), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1101), 'numpy.sqrt', 'np.sqrt', (['(u ** 2 + v ** 2)'], {}), '(u ** 2 + v ** 2)\n', (1084, 1101), True, 'import numpy as np\n'), ((1227, 1250), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(100)'], {}), '(0, 50, 100)\n', (1238, 1250), True, 'import numpy as np\n'), ((1802, 1822), 'dorado.particle_track.Particles', 'pt.Particles', (['params'], {}), '(params)\n', (1814, 1822), True, 'import dorado.particle_track as pt\n'), ((2128, 2161), 'numpy.zeros_like', 'np.zeros_like', (['depth'], {'dtype': '"""int"""'}), "(depth, dtype='int')\n", (2141, 2161), True, 'import numpy as np\n'), ((2210, 2242), 'dorado.particle_track.exposure_time', 'pt.exposure_time', (['walk_data', 'roi'], {}), '(walk_data, roi)\n', (2226, 2242), True, 'import dorado.particle_track as pt\n'), ((2269, 2294), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(dc + 1)'], {}), '(1, 2, dc + 1)\n', (2280, 2294), True, 'import matplotlib.pyplot as plt\n'), ((2310, 2421), 'matplotlib.pyplot.hist', 'plt.hist', (['target_times'], {'bins': '(100)', 'range': '(200, 400)', 'histtype': '"""bar"""', 'density': '(True)', 'color': '[0.5, 0.5, 1, 0.5]'}), "(target_times, bins=100, range=(200, 400), histtype='bar', density=\n True, color=[0.5, 0.5, 1, 0.5])\n", (2318, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2762), 'matplotlib.pyplot.legend', 'plt.legend', (["['Expected Travel Time', 'Histogram of Final Travel Times']"], {'ncol': '(2)', 'loc': '"""upper left"""', 'bbox_to_anchor': '(0.0, -0.06)', 'fontsize': '(16)'}), "(['Expected Travel Time', 'Histogram of Final Travel Times'],\n ncol=2, loc='upper left', bbox_to_anchor=(0.0, -0.06), fontsize=16)\n", (2629, 2762), True, 'import matplotlib.pyplot as plt\n'), ((2933, 2989), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Travel Time at Target Row [s]"""'], {'fontsize': '(16)'}), "('Travel Time at Target Row [s]', fontsize=16)\n", (2943, 2989), True, 'import matplotlib.pyplot as plt\n'), ((2994, 3040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability Density"""'], {'fontsize': '(16)'}), "('Probability Density', fontsize=16)\n", (3004, 3040), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1264), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (1259, 1264), True, 'import numpy as np\n'), ((2543, 2552), 'numpy.max', 'np.max', (['n'], {}), '(n)\n', (2549, 2552), True, 'import numpy as np\n')]
|
import numpy as np
import esutil as eu
def randcap(*,
rng,
nrand,
ra,
dec,
radius,
get_radius=False,
dorot=False):
"""
Generate random points in a sherical cap
parameters
----------
nrand:
The number of random points
ra,dec:
The center of the cap in degrees. The ra should be within [0,360) and
dec from [-90,90]
radius: float
radius of the cap, same units as ra,dec
get_radius: bool, optional
if true, return radius of each point in radians
dorot: bool
If dorot is True, generate the points on the equator and rotate them to
be centered at the desired location. This is the default when the dec
is within 0.1 degrees of the pole, to avoid calculation issues
"""
# generate uniformly in r**2
if dec >= 89.9 or dec <= -89.9:
dorot = True
if dorot:
tra, tdec = 90.0, 0.0
rand_ra, rand_dec, rand_r = randcap(
rng=rng,
nrand=nrand,
ra=90.0,
dec=0.0,
radius=radius,
get_radius=True,
)
rand_ra, rand_dec = eu.coords.rotate(
0.0,
dec-tdec,
0.0,
rand_ra,
rand_dec,
)
rand_ra, rand_dec = eu.coords.rotate(
ra-tra,
0.0,
0.0,
rand_ra,
rand_dec,
)
else:
rand_r = rng.uniform(size=nrand)
rand_r = np.sqrt(rand_r)*radius
# put in degrees
np.deg2rad(rand_r, rand_r)
# generate position angle uniformly 0, 2*PI
rand_posangle = rng.uniform(low=0, high=2*np.pi, size=nrand)
theta = np.array(dec, dtype='f8', ndmin=1, copy=True)
phi = np.array(ra, dtype='f8', ndmin=1, copy=True)
theta += 90
np.deg2rad(theta, theta)
np.deg2rad(phi, phi)
sintheta = np.sin(theta)
costheta = np.cos(theta)
sinr = np.sin(rand_r)
cosr = np.cos(rand_r)
cospsi = np.cos(rand_posangle)
costheta2 = costheta*cosr + sintheta*sinr*cospsi
np.clip(costheta2, -1, 1, costheta2)
# gives [0,pi)
theta2 = np.arccos(costheta2)
sintheta2 = np.sin(theta2)
cos_dphi = (cosr - costheta*costheta2)/(sintheta*sintheta2)
np.clip(cos_dphi, -1, 1, cos_dphi)
dphi = np.arccos(cos_dphi)
# note fancy usage of where
phi2 = np.where(rand_posangle > np.pi, phi+dphi, phi-dphi)
np.rad2deg(phi2, phi2)
np.rad2deg(theta2, theta2)
rand_ra = phi2
rand_dec = theta2-90.0
eu.coords.atbound(rand_ra, 0.0, 360.0)
if get_radius:
np.rad2deg(rand_r, rand_r)
return rand_ra, rand_dec, rand_r
else:
return rand_ra, rand_dec
def randsphere(rng, num, ra_range=None, dec_range=None):
"""Generate random points on the sphere, possibly on a subset of it.
Routine due to Erin Sheldon.
Parameters
----------
num: integer
The number of randoms to generate
ra_range: list, optional
Should be within range [0,360]. Default [0,360]
dec_range: list, optional
Should be within range [-90,90]. Default [-90,90]
Returns
-------
ra : array-like
ra values for the random points
dec : array-like
dec values for the random points
"""
ra_range = _check_range(ra_range, [0.0, 360.0])
dec_range = _check_range(dec_range, [-90.0, 90.0])
ra = rng.uniform(
size=num,
low=ra_range[0],
high=ra_range[1],
)
cosdec_min = np.cos(np.radians(90.0+dec_range[0]))
cosdec_max = np.cos(np.radians(90.0+dec_range[1]))
v = rng.uniform(
size=num,
low=cosdec_min,
high=cosdec_max,
)
np.clip(v, -1.0, 1.0, v)
# Now this generates on [0,pi)
dec = np.arccos(v)
# convert to degrees
np.degrees(dec, dec)
# now in range [-90,90.0)
dec -= 90.0
return ra, dec
def _check_range(rng, allowed):
if rng is None:
rng = allowed
else:
if not hasattr(rng, '__len__'):
raise ValueError("range input does not have len() method")
if rng[0] < allowed[0] or rng[1] > allowed[1]:
raise ValueError("%s should be within %s" % (rng, allowed))
return rng
|
[
"numpy.radians",
"esutil.coords.rotate",
"numpy.degrees",
"numpy.deg2rad",
"numpy.clip",
"numpy.rad2deg",
"numpy.sin",
"numpy.array",
"numpy.where",
"numpy.cos",
"esutil.coords.atbound",
"numpy.arccos",
"numpy.sqrt"
] |
[((3910, 3934), 'numpy.clip', 'np.clip', (['v', '(-1.0)', '(1.0)', 'v'], {}), '(v, -1.0, 1.0, v)\n', (3917, 3934), True, 'import numpy as np\n'), ((3981, 3993), 'numpy.arccos', 'np.arccos', (['v'], {}), '(v)\n', (3990, 3993), True, 'import numpy as np\n'), ((4024, 4044), 'numpy.degrees', 'np.degrees', (['dec', 'dec'], {}), '(dec, dec)\n', (4034, 4044), True, 'import numpy as np\n'), ((1220, 1277), 'esutil.coords.rotate', 'eu.coords.rotate', (['(0.0)', '(dec - tdec)', '(0.0)', 'rand_ra', 'rand_dec'], {}), '(0.0, dec - tdec, 0.0, rand_ra, rand_dec)\n', (1236, 1277), True, 'import esutil as eu\n'), ((1375, 1430), 'esutil.coords.rotate', 'eu.coords.rotate', (['(ra - tra)', '(0.0)', '(0.0)', 'rand_ra', 'rand_dec'], {}), '(ra - tra, 0.0, 0.0, rand_ra, rand_dec)\n', (1391, 1430), True, 'import esutil as eu\n'), ((1626, 1652), 'numpy.deg2rad', 'np.deg2rad', (['rand_r', 'rand_r'], {}), '(rand_r, rand_r)\n', (1636, 1652), True, 'import numpy as np\n'), ((1792, 1837), 'numpy.array', 'np.array', (['dec'], {'dtype': '"""f8"""', 'ndmin': '(1)', 'copy': '(True)'}), "(dec, dtype='f8', ndmin=1, copy=True)\n", (1800, 1837), True, 'import numpy as np\n'), ((1852, 1896), 'numpy.array', 'np.array', (['ra'], {'dtype': '"""f8"""', 'ndmin': '(1)', 'copy': '(True)'}), "(ra, dtype='f8', ndmin=1, copy=True)\n", (1860, 1896), True, 'import numpy as np\n'), ((1926, 1950), 'numpy.deg2rad', 'np.deg2rad', (['theta', 'theta'], {}), '(theta, theta)\n', (1936, 1950), True, 'import numpy as np\n'), ((1959, 1979), 'numpy.deg2rad', 'np.deg2rad', (['phi', 'phi'], {}), '(phi, phi)\n', (1969, 1979), True, 'import numpy as np\n'), ((2000, 2013), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2006, 2013), True, 'import numpy as np\n'), ((2033, 2046), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2039, 2046), True, 'import numpy as np\n'), ((2063, 2077), 'numpy.sin', 'np.sin', (['rand_r'], {}), '(rand_r)\n', (2069, 2077), True, 'import numpy as np\n'), ((2093, 2107), 'numpy.cos', 'np.cos', (['rand_r'], {}), '(rand_r)\n', (2099, 2107), True, 'import numpy as np\n'), ((2126, 2147), 'numpy.cos', 'np.cos', (['rand_posangle'], {}), '(rand_posangle)\n', (2132, 2147), True, 'import numpy as np\n'), ((2214, 2250), 'numpy.clip', 'np.clip', (['costheta2', '(-1)', '(1)', 'costheta2'], {}), '(costheta2, -1, 1, costheta2)\n', (2221, 2250), True, 'import numpy as np\n'), ((2292, 2312), 'numpy.arccos', 'np.arccos', (['costheta2'], {}), '(costheta2)\n', (2301, 2312), True, 'import numpy as np\n'), ((2333, 2347), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (2339, 2347), True, 'import numpy as np\n'), ((2426, 2460), 'numpy.clip', 'np.clip', (['cos_dphi', '(-1)', '(1)', 'cos_dphi'], {}), '(cos_dphi, -1, 1, cos_dphi)\n', (2433, 2460), True, 'import numpy as np\n'), ((2476, 2495), 'numpy.arccos', 'np.arccos', (['cos_dphi'], {}), '(cos_dphi)\n', (2485, 2495), True, 'import numpy as np\n'), ((2548, 2603), 'numpy.where', 'np.where', (['(rand_posangle > np.pi)', '(phi + dphi)', '(phi - dphi)'], {}), '(rand_posangle > np.pi, phi + dphi, phi - dphi)\n', (2556, 2603), True, 'import numpy as np\n'), ((2609, 2631), 'numpy.rad2deg', 'np.rad2deg', (['phi2', 'phi2'], {}), '(phi2, phi2)\n', (2619, 2631), True, 'import numpy as np\n'), ((2640, 2666), 'numpy.rad2deg', 'np.rad2deg', (['theta2', 'theta2'], {}), '(theta2, theta2)\n', (2650, 2666), True, 'import numpy as np\n'), ((2730, 2768), 'esutil.coords.atbound', 'eu.coords.atbound', (['rand_ra', '(0.0)', '(360.0)'], {}), '(rand_ra, 0.0, 360.0)\n', (2747, 2768), True, 'import esutil as eu\n'), ((2797, 2823), 'numpy.rad2deg', 'np.rad2deg', (['rand_r', 'rand_r'], {}), '(rand_r, rand_r)\n', (2807, 2823), True, 'import numpy as np\n'), ((3725, 3756), 'numpy.radians', 'np.radians', (['(90.0 + dec_range[0])'], {}), '(90.0 + dec_range[0])\n', (3735, 3756), True, 'import numpy as np\n'), ((3780, 3811), 'numpy.radians', 'np.radians', (['(90.0 + dec_range[1])'], {}), '(90.0 + dec_range[1])\n', (3790, 3811), True, 'import numpy as np\n'), ((1569, 1584), 'numpy.sqrt', 'np.sqrt', (['rand_r'], {}), '(rand_r)\n', (1576, 1584), True, 'import numpy as np\n')]
|
import os
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
import parcels
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import cartopy
import cartopy.util
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
u_filename = '/home/alir/hawaii_npac/0000969408_U_10800.8150.1_1080.3720.90'
v_filename = '/home/alir/hawaii_npac/0000969408_V_10800.8150.1_1080.3720.90'
level = 0
with open(u_filename, 'rb') as f:
nx, ny = 1080, 3720 # parse; advance file-pointer to data segment
record_length = 4 # [bytes]
f.seek(level * record_length * nx*ny, os.SEEK_SET)
u_data = np.fromfile(f, dtype='>f4', count=nx*ny)
u_array = np.reshape(u_data, [ny, nx], order='F')
with open(v_filename, 'rb') as f:
nx, ny = 1080, 3720 # parse; advance file-pointer to data segment
record_length = 4 # [bytes]
f.seek(level * record_length * nx*ny, os.SEEK_SET)
v_data = np.fromfile(f, dtype='>f4', count=nx*ny)
v_array = np.reshape(v_data, [ny, nx], order='F')
u_data = u_array
v_data = v_array
# u_data = np.ma.masked_where(u_array == 0, u_array)
# v_data = np.ma.masked_where(v_array == 0, v_array)
lats = np.arange(ny)/48
lons = np.arange(nx)/48
depth = np.array([0.0])
u_field = parcels.field.Field(name='U', data=u_data,
lon=lons, lat=lats, depth=depth, mesh='spherical')
v_field = parcels.field.Field(name='V', data=v_data,
lon=lons, lat=lats, depth=depth, mesh='spherical')
u_magnitude = np.sqrt(u_data*u_data + v_data*v_data)
fieldset = parcels.fieldset.FieldSet(u_field, v_field)
# fieldset.U.show()
lats_pset = np.tile(np.linspace(5, 70, 11), 11)
lons_pset = np.repeat(np.linspace(5, 15, 11), 11)
# species_field = -1 * np.ones((11,11), dtype=np.int32)
# for i, lat in enumerate(np.linspace(10, 50, 11)):
# for j, lon in enumerate(np.linspace(-170, -130, 11)):
# pass
# species_pfield = parcels.field.Field(name='species', data=species_field,
# lat=np.linspace(10, 50, 11), lon=np.linspace(-170, -130, 11), depth=depth, mesh='spherical')
class MicrobeParticle(parcels.JITParticle):
species = parcels.Variable('species', dtype=np.int32, initial=-1)
pset = parcels.ParticleSet.from_list(fieldset=fieldset, pclass=MicrobeParticle,
lon=lons_pset, lat=lats_pset)
for i, particle in enumerate(pset):
if 37.5 <= particle.lat <= 52.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 1
elif 37.5 <= particle.lat <= 52.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 2
elif 37.5 <= particle.lat <= 52.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 3
elif 22.5 <= particle.lat <= 37.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 3
elif 22.5 <= particle.lat <= 37.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 1
elif 22.5 <= particle.lat <= 37.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 2
elif 7.5 <= particle.lat <= 22.5 and -172.5 <= particle.lon <= -157.5:
particle.species = 2
elif 7.5 <= particle.lat <= 22.5 and -157.5 <= particle.lon <= -142.5:
particle.species = 3
elif 7.5 <= particle.lat <= 22.5 and -142.5 <= particle.lon <= -127.5:
particle.species = 1
particle.species = 1
print("Particle {:03d} @({:.2f},{:.2f}) [species={:d}]".format(i, particle.lat, particle.lon, particle.species))
def rock_paper_scissors_type(n):
if n == 1:
return "rock"
elif n == 2:
return "paper"
elif n == 3:
return "scissors"
return None
vector_crs = ccrs.PlateCarree()
land_50m = cartopy.feature.NaturalEarthFeature('physical', 'land', '50m',
edgecolor='face',facecolor='dimgray', linewidth=0)
t = datetime(2017, 1, 1)
dt = timedelta(hours=1)
for n in range(1):
print("Advecting: {:} -> {:}".format(t, t+dt))
nc_filename = "advected_microbes_" + str(n).zfill(4) + ".nc"
pset.execute(parcels.AdvectionRK4, runtime=dt, dt=dt, verbose_progress=True,
output_file=pset.ParticleFile(name=nc_filename, outputdt=dt))
# print("Computing microbe interactions...")
# N = len(pset)
# for i, p1 in enumerate(pset):
# for j, p2 in enumerate(pset[i+1:]):
# if np.abs(p1.lat - p2.lat) < 1 and np.abs(p1.lon - p2.lon) < 1:
# p1_type = rock_paper_scissors_type(p1.species)
# p2_type = rock_paper_scissors_type(p2.species)
# winner = None
# if p1_type == "rock" and p2_type == "scissors":
# winner = p1
# elif p1_type == "rock" and p2_type == "paper":
# winner = p2
# elif p1_type == "paper" and p2_type == "rock":
# winner = p1
# elif p1_type == "paper" and p2_type == "scissors":
# winner = p2
# elif p1_type == "scissors" and p2_type == "rock":
# winner = p2
# elif p1_type == "scissors" and p2_type == "paper":
# winner = p1
# else:
# winner = None
# if winner == p1:
# p2.species = p1.species
# print("[{:s}#{:d}] @({:.2f}, {:.2f}) vs. [{:s}#{:d}] @({:.2f}, {:.2f}): #{:d} wins!"
# .format(p1_type, i, p1.lat, p1.lon, p2_type, j+i, p2.lat, p2.lon, i))
# elif winner == p2:
# p1.species = p2.species
# print("[{:s}#{:d}] @({:.2f}, {:.2f}) vs. [{:s}#{:d}] @({:.2f}, {:.2f}): #{:d} wins!"
# .format(p1_type, i, p1.lat, p1.lon, p2_type, j+i, p2.lat, p2.lon, j+i))
# for i, p in enumerate(pset):
# if p.lat >= 59 or p.lat <= 1 or p.lon <= -179 or p.lon >= -121:
# print("Removing particle #{:d} @({:.2f},{:.2f}). Too close to boundary"
# .format(i, p.lat, p.lon))
# pset.remove(i)
t = t+dt
print("Plotting figure...")
fig = plt.figure(figsize=(16, 9))
matplotlib.rcParams.update({'font.size': 10})
crs_sps = ccrs.PlateCarree(central_longitude=-150)
crs_sps._threshold = 1000.0 # This solves https://github.com/SciTools/cartopy/issues/363
ax = plt.subplot(111, projection=crs_sps)
ax.add_feature(land_50m)
ax.set_extent([0, 22.5, 0, 77.5], ccrs.PlateCarree())
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='black',
alpha=0.8, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlocator = mticker.FixedLocator([0, 7.5, 15, 22.5])
gl.ylocator = mticker.FixedLocator([0, 15.5, 31, 46.5, 62, 77.5])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
im = ax.pcolormesh(lons, lats, u_magnitude, transform=vector_crs, vmin=0, vmax=1, cmap='Blues_r')
clb = fig.colorbar(im, ax=ax, extend='max', fraction=0.046, pad=0.1)
clb.ax.set_title(r'm/s')
rock_lats, rock_lons = [], []
paper_lats, paper_lons = [], []
scissors_lats, scissors_lons = [], []
for microbe in pset:
if microbe.species == 1:
rock_lats.append(microbe.lat)
rock_lons.append(microbe.lon)
elif microbe.species == 2:
paper_lats.append(microbe.lat)
paper_lons.append(microbe.lon)
elif microbe.species == 3:
scissors_lats.append(microbe.lat)
scissors_lons.append(microbe.lon)
# ax.plot(rock_lons, rock_lats, marker='o', linestyle='', color='red', ms=4, label='Rocks', transform=vector_crs)
# ax.plot(paper_lons, paper_lats, marker='o', linestyle='', color='lime', ms=4, label='Papers', transform=vector_crs)
# ax.plot(scissors_lons, scissors_lats, marker='o', linestyle='', color='cyan', ms=4, label='Scissors', transform=vector_crs)
plt.title(str(t))
ax.legend()
# plt.show()
png_filename = "advected_microbes_" + str(n).zfill(4) + ".png"
print("Saving figure: {:s}".format(png_filename))
plt.savefig(png_filename, dpi=300, format='png', transparent=False)
plt.close('all')
|
[
"cartopy.feature.NaturalEarthFeature",
"matplotlib.pyplot.figure",
"parcels.field.Field",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"matplotlib.ticker.FixedLocator",
"datetime.timedelta",
"numpy.reshape",
"numpy.linspace",
"parcels.ParticleSet.from_list",
"datetime.datetime",
"parcels.Variable",
"matplotlib.pyplot.subplot",
"parcels.fieldset.FieldSet",
"numpy.fromfile",
"numpy.array",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((1328, 1343), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1336, 1343), True, 'import numpy as np\n'), ((1355, 1452), 'parcels.field.Field', 'parcels.field.Field', ([], {'name': '"""U"""', 'data': 'u_data', 'lon': 'lons', 'lat': 'lats', 'depth': 'depth', 'mesh': '"""spherical"""'}), "(name='U', data=u_data, lon=lons, lat=lats, depth=depth,\n mesh='spherical')\n", (1374, 1452), False, 'import parcels\n'), ((1463, 1560), 'parcels.field.Field', 'parcels.field.Field', ([], {'name': '"""V"""', 'data': 'v_data', 'lon': 'lons', 'lat': 'lats', 'depth': 'depth', 'mesh': '"""spherical"""'}), "(name='V', data=v_data, lon=lons, lat=lats, depth=depth,\n mesh='spherical')\n", (1482, 1560), False, 'import parcels\n'), ((1576, 1618), 'numpy.sqrt', 'np.sqrt', (['(u_data * u_data + v_data * v_data)'], {}), '(u_data * u_data + v_data * v_data)\n', (1583, 1618), True, 'import numpy as np\n'), ((1627, 1670), 'parcels.fieldset.FieldSet', 'parcels.fieldset.FieldSet', (['u_field', 'v_field'], {}), '(u_field, v_field)\n', (1652, 1670), False, 'import parcels\n'), ((2266, 2372), 'parcels.ParticleSet.from_list', 'parcels.ParticleSet.from_list', ([], {'fieldset': 'fieldset', 'pclass': 'MicrobeParticle', 'lon': 'lons_pset', 'lat': 'lats_pset'}), '(fieldset=fieldset, pclass=MicrobeParticle,\n lon=lons_pset, lat=lats_pset)\n', (2295, 2372), False, 'import parcels\n'), ((3677, 3695), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3693, 3695), True, 'import cartopy.crs as ccrs\n'), ((3707, 3826), 'cartopy.feature.NaturalEarthFeature', 'cartopy.feature.NaturalEarthFeature', (['"""physical"""', '"""land"""', '"""50m"""'], {'edgecolor': '"""face"""', 'facecolor': '"""dimgray"""', 'linewidth': '(0)'}), "('physical', 'land', '50m', edgecolor=\n 'face', facecolor='dimgray', linewidth=0)\n", (3742, 3826), False, 'import cartopy\n'), ((3830, 3850), 'datetime.datetime', 'datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (3838, 3850), False, 'from datetime import datetime, timedelta\n'), ((3856, 3874), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3865, 3874), False, 'from datetime import datetime, timedelta\n'), ((731, 773), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '""">f4"""', 'count': '(nx * ny)'}), "(f, dtype='>f4', count=nx * ny)\n", (742, 773), True, 'import numpy as np\n'), ((786, 825), 'numpy.reshape', 'np.reshape', (['u_data', '[ny, nx]'], {'order': '"""F"""'}), "(u_data, [ny, nx], order='F')\n", (796, 825), True, 'import numpy as np\n'), ((1035, 1077), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '""">f4"""', 'count': '(nx * ny)'}), "(f, dtype='>f4', count=nx * ny)\n", (1046, 1077), True, 'import numpy as np\n'), ((1090, 1129), 'numpy.reshape', 'np.reshape', (['v_data', '[ny, nx]'], {'order': '"""F"""'}), "(v_data, [ny, nx], order='F')\n", (1100, 1129), True, 'import numpy as np\n'), ((1279, 1292), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (1288, 1292), True, 'import numpy as np\n'), ((1303, 1316), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (1312, 1316), True, 'import numpy as np\n'), ((1712, 1734), 'numpy.linspace', 'np.linspace', (['(5)', '(70)', '(11)'], {}), '(5, 70, 11)\n', (1723, 1734), True, 'import numpy as np\n'), ((1762, 1784), 'numpy.linspace', 'np.linspace', (['(5)', '(15)', '(11)'], {}), '(5, 15, 11)\n', (1773, 1784), True, 'import numpy as np\n'), ((2202, 2257), 'parcels.Variable', 'parcels.Variable', (['"""species"""'], {'dtype': 'np.int32', 'initial': '(-1)'}), "('species', dtype=np.int32, initial=-1)\n", (2218, 2257), False, 'import parcels\n'), ((6134, 6161), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6144, 6161), True, 'import matplotlib.pyplot as plt\n'), ((6166, 6211), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 10}"], {}), "({'font.size': 10})\n", (6192, 6211), False, 'import matplotlib\n'), ((6231, 6271), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {'central_longitude': '(-150)'}), '(central_longitude=-150)\n', (6247, 6271), True, 'import cartopy.crs as ccrs\n'), ((6376, 6412), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': 'crs_sps'}), '(111, projection=crs_sps)\n', (6387, 6412), True, 'import matplotlib.pyplot as plt\n'), ((6701, 6741), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[0, 7.5, 15, 22.5]'], {}), '([0, 7.5, 15, 22.5])\n', (6721, 6741), True, 'import matplotlib.ticker as mticker\n'), ((6760, 6811), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[0, 15.5, 31, 46.5, 62, 77.5]'], {}), '([0, 15.5, 31, 46.5, 62, 77.5])\n', (6780, 6811), True, 'import matplotlib.ticker as mticker\n'), ((8159, 8226), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_filename'], {'dpi': '(300)', 'format': '"""png"""', 'transparent': '(False)'}), "(png_filename, dpi=300, format='png', transparent=False)\n", (8170, 8226), True, 'import matplotlib.pyplot as plt\n'), ((8236, 8252), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8245, 8252), True, 'import matplotlib.pyplot as plt\n'), ((6480, 6498), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6496, 6498), True, 'import cartopy.crs as ccrs\n'), ((6527, 6545), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6543, 6545), True, 'import cartopy.crs as ccrs\n')]
|
'''
GraphPy: Python Module for Graph-based learning algorithms. Efficient implementations of modern methods for graph-based semi-supervised learning, and graph clustering.
See README.md file for usage.
Author: <NAME>, 2020
'''
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib
import scipy.spatial as spatial
import scipy.optimize as opt
import numpy.random as random
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
import scipy.sparse.csgraph as csgraph
import sklearn.cluster as cluster
from sklearn.decomposition import PCA
import sys, getopt, time, csv, torch, os, multiprocessing
from joblib import Parallel, delayed
from utils.non_neg_qpsolver import non_negative_qpsolver
clustering_algorithms = ['incres', 'spectral', 'spectralshimalik', 'spectralngjordanweiss']
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
# Print New Line on Complete
if iteration == total:
print()
def load_mbo_eig(dataset, metric, k):
# Load eigenvector data if MBO selected
try:
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_" + metric + "_k%d" % k + "_spectrum.npz"
dataFile_path = os.path.join(location, 'MBOdata', dataFile)
M = np.load(dataFile_path, allow_pickle=True)
eigvals = M['eigenvalues']
eigvecs = M['eigenvectors']
except:
print("Could not find MBOdata/" + dataset + "_" + metric + "_k%d" % k + "_spectrum.npz")
print('You need to run ComputeEigenvectorsMBO.py first.')
sys.exit(2)
return eigvals, eigvecs
def load_label_permutation(dataset, label_perm='', t='-1'):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + label_perm + "_permutations.npz"
dataFile_path = os.path.join(location, 'LabelPermutations', dataFile)
# Load label permutation
try:
M = np.load(dataFile_path, allow_pickle=True)
perm = M['perm']
except:
print('Cannot find ' + dataFile)
print('You need to run CreateLabelPermutation.py first.')
sys.exit(2)
# Restrict trials
t = [int(e) for e in t.split(',')]
if t[0] > -1:
if len(t) == 1:
perm = perm[0:t[0]]
else:
perm = perm[(t[0] - 1):t[1]]
return perm
def load_dataset(dataset, metric='L2'):
# For variational autoencoder the vae data, e.g., Data/MNIST_vae.npz must exist.
if metric[0:3] == 'vae' or metric[0:3] == 'aet':
dataFile = dataset + "_" + metric + ".npz"
else:
dataFile = dataset + "_raw.npz"
location = os.path.dirname(os.path.realpath(__file__))
dataFile_path = os.path.join(location, 'Data', dataFile)
# Try to Load data
try:
M = np.load(dataFile_path, allow_pickle=True)
data = M['data']
except:
print('Cannot find ' + dataFile + '.')
sys.exit(2)
return data
def load_labels(dataset):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_labels.npz"
dataFile_path = os.path.join(location, 'Data', dataFile)
# Load labels
try:
M = np.load(dataFile_path, allow_pickle=True)
labels = M['labels']
except:
print('Cannot find dataset Data/' + dataFile)
sys.exit(2)
return labels
def load_kNN_data(dataset, metric='L2'):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_" + metric + ".npz"
dataFile_path = os.path.join(location, 'kNNData', dataFile)
# Load kNN data
try:
M = np.load(dataFile_path, allow_pickle=True)
I = M['I']
J = M['J']
D = M['D']
except:
print('Cannot find ' + dataFile)
print('You need to run ComputeKNN.py.')
sys.exit(2)
return I, J, D
# Compute sizes of each class
def label_proportions(labels):
L = np.unique(labels)
L = L[L >= 0]
k = len(L)
# n = len(labels)
n = np.sum(labels >= 0)
beta = np.zeros((k,))
for i in range(k):
beta[i] = np.sum(labels == L[i]) / n
return beta
# Constructs a weight matrix for graph on mxn grid with NSEW neighbors
# def grid_graph(m, n):
# X, Y = np.mgrid[:m, :n]
#
# return W
# Reweights the graph to use self-tuning weights
def self_tuning(W, D, alpha):
if alpha != 0:
n = D.shape[0]
k = D.shape[1]
d = D[:, k - 1]
d = sparse.spdiags(d ** (-alpha), 0, n, n)
W = d * W * d
return W
# Reweights the graph based on a clustering prior
def cluster_prior(W, cluster_labels):
n = W.shape[0]
I, J, V = sparse.find(W)
K = cluster_labels[I] == cluster_labels[J]
V[K] = V[K] * 10
V = V / np.max(V)
W = sparse.coo_matrix((V, (I, J)), shape=(n, n)).tocsr()
return W
# Computes scattering transform of depth 2 of I
# Bruna, Joan, and <NAME>. "Invariant scattering convolution networks." IEEE transactions on pattern analysis and machine intelligence 35.8 (2013): 1872-1886.
def scattering_transform(I, n, m, depth=2):
from kymatio import Scattering2D
num_pts = I.shape[0]
K = torch.from_numpy(I.reshape((num_pts, n, m))).float().contiguous()
scattering = Scattering2D(J=depth, shape=(n, m))
Z = scattering(K).numpy()
l = Z.shape[1] * Z.shape[2] * Z.shape[3]
return Z.reshape((num_pts, l))
# Label permutations
# labels = labels
# T = number of trials
# r = label rate in (0,1)
def create_label_permutations_rate(labels, T, R):
perm = list()
n = labels.shape[0]
labelvals = np.unique(labels)
labelvals = labelvals[labelvals >= 0]
num_labels = len(labelvals)
num = np.zeros((num_labels,))
for i in range(num_labels):
num[i] = np.sum(labels == labelvals[i])
J = np.arange(n).astype(int)
for k in range(T):
for r in R:
L = []
for i in range(num_labels):
l = labelvals[i]
I = labels == l
K = J[I]
m = round(num[i] * r / 100)
L = L + random.choice(K, size=m.astype(int), replace=False).tolist()
L = np.array(L)
perm.append(L)
return perm
# Label permutations
# labels = labels
# T = number of trials
# m = vector of number of labels
def create_label_permutations(labels, T, m, multiplier=None):
# Find all unique labels >= 0
# Negative numbers indicate unlabeled nodes
unique_labels = np.unique(labels)
unique_labels = unique_labels[unique_labels >= 0]
perm = list()
n = labels.shape[0]
J = np.arange(n).astype(int)
for k in range(T):
for i in m:
L = []
ind = 0
for l in unique_labels:
I = labels == l
K = J[I]
if multiplier is None:
L = L + random.choice(K, size=i, replace=False).tolist()
else:
sze = int(np.round(i * multiplier[ind]))
L = L + random.choice(K, size=sze, replace=False).tolist()
ind = ind + 1
L = np.array(L)
perm.append(L)
return perm
# Randomly choose m labels per class
def randomize_labels(L, m):
perm = create_label_permutations(L, 1, [m])
return perm[0]
# Default function
def exp_weight(x):
return np.exp(-x)
# Pointwise max of non-negative sparse matrices A and B
def sparse_max(A, B):
I = (A + B) > 0
IB = B > A
IA = I - IB
return A.multiply(IA) + B.multiply(IB)
# Compute degrees of weight matrix W
def degrees(W):
return np.squeeze(np.array(np.sum(W, axis=1)))
# Multiply diagonal of matrix by degree
def diag_multiply(W, b):
n = W.shape[0] # Number of points
D = sparse.spdiags(W.diagonal(), 0, n, n)
return W - (1 - b) * D
# Compute degrees of weight matrix W
# Returns sparse matrix with degrees on diagonal
def degree_matrix(W, p=1):
n = W.shape[0] # Number of points
# Construct sparse degree matrix
d = degrees(W)
D = sparse.spdiags(d ** p, 0, n, n)
return D.tocsr()
# Construct robin boundary condition matrix
def robin_bc_matrix(X, nu, eps, gamma):
n = X.shape[0]
Xtree = spatial.cKDTree(X)
_, nn_ind = Xtree.query(X + eps * nu)
# nn_dist = np.linalg.norm(X - X[nn_ind,:],axis=1)
nn_dist = eps * np.ones((n,))
# Robin matrix
A = sparse.spdiags(gamma + (1 - gamma) / nn_dist, 0, n, n)
B = sparse.coo_matrix(((1 - gamma) / nn_dist, (range(n), nn_ind)), shape=(n, n))
R = (A - B).tocsr()
return R
# Laplace matrix
# W = weight matrix
# norm = type of normalization
# Options: none, randomwalk, normalized
def graph_laplacian(W, norm="none"):
D = degree_matrix(W)
if norm == "none":
L = D - W
elif norm == "randomwalk1":
Dinv = degree_matrix(W, p=-1)
L = Dinv * (D - W)
elif norm == "randomwalk2":
Dinv = degree_matrix(W, p=-1)
L = (D - W) * Dinv
elif norm == "normalized":
Dinv2 = degree_matrix(W, p=-1 / 2)
L = Dinv2 * (D - W) * Dinv2
else:
print("Invalid option for graph Laplacian normalization. Returning unnormalized Laplacian.")
L = D - W
return L.tocsr()
# Graph infinity Laplacian
# W = sparse weight matrix
# u = function on graph
def graph_phi_laplacian(W, u, phi, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
w = u[J] - u[I]
a = np.absolute(w)
pa = phi(a)
m = pa / (a + 1e-13)
M = sparse.coo_matrix((V * pa / (a + 1e-13), (I, J)), shape=(n, n)).tocsr()
m = degrees(M)
M = sparse.coo_matrix((V * pa * np.sign(w), (I, J)), shape=(n, n)).tocsr()
M = np.squeeze(np.array(np.sum(M, axis=1)))
return M, m
# Graph infinity Laplacian
# W = sparse weight matrix
# u = function on graph
def graph_infinity_laplacian(W, u, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
M = sparse.coo_matrix((V * (u[J] - u[I]), (I, J)), shape=(n, n)).tocsr()
M = M.min(axis=1) + M.max(axis=1)
return M.toarray().flatten()
# Construct epsilon-graph sparse distance matrix
def eps_weight_matrix(X, eps, f=exp_weight):
n = X.shape[0] # Number of points
# Rangesearch to find nearest neighbors
Xtree = spatial.cKDTree(X)
M = Xtree.query_pairs(eps)
M = np.array(list(M))
# Differences between points and neighbors
V = X[M[:, 0], :] - X[M[:, 1], :]
D = np.sum(V * V, axis=1)
# Weights
D = f(4 * D / (eps * eps))
# Symmetrize weights and add diagonal entries
D = np.concatenate((D, D, f(0) * np.ones(n, )))
M1 = np.concatenate((M[:, 0], M[:, 1], np.arange(0, n)))
M2 = np.concatenate((M[:, 1], M[:, 0], np.arange(0, n)))
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (M1, M2)), shape=(n, n))
return W.tocsr()
# Exact knnsearch
def knnsearch(X, k):
# KDtree to find nearest neighbors
n = X.shape[0]
Xtree = spatial.cKDTree(X)
D, J = Xtree.query(X, k=k)
I = np.ones((n, k), dtype=int) * J[:, 0][:, None]
return I, J, D
# Perform approximate nearest neighbor search, returning indices I,J of neighbors, and distance D
# Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot".
def knnsearch_annoy(X, k, similarity='euclidean'):
from annoy import AnnoyIndex
n = X.shape[0] # Number of points
dim = X.shape[1] # Dimension
print('kNN search with Annoy approximate nearest neighbor package...')
printProgressBar(0, n, prefix='Progress:', suffix='Complete', length=50)
u = AnnoyIndex(dim, similarity) # Length of item vector that will be indexed
for i in range(n):
u.add_item(i, X[i, :])
u.build(10) # 10 trees
D = []
I = []
J = []
for i in range(n):
printProgressBar(i + 1, n, prefix='Progress:', suffix='Complete', length=50)
A = u.get_nns_by_item(i, k, include_distances=True, search_k=-1)
I.append([i] * k)
J.append(A[0])
D.append(A[1])
I = np.array(I)
J = np.array(J)
D = np.array(D)
return I, J, D
# Compute weight matrix from nearest neighbor indices I,J and distances D
def weight_matrix_selftuning(I, J, D):
n = I.shape[0]
k = I.shape[1]
# Distance to kth nearest neighbor as a matrix
sigma = D[:, k - 1]
sigma = sparse.spdiags(1 / sigma, 0, n, n)
sigma = sigma.tocsr()
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Symmetrize and remove redundant entries
M1 = np.vstack((I, J, D))
M2 = np.vstack((J, I, D))
M = np.concatenate((M1, M2), axis=1)
M = np.unique(M, axis=1)
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
I = M[0, :]
J = M[1, :]
D = M[2, :]
dist = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
B = sparse.coo_matrix((np.ones(len(D), ), (I, J)), shape=(n, n)).tocsr() # Ones in all entries
# Self-tuning weights
E = -4 * sigma * (dist ** 2) * sigma
W = E.expm1()
W = W.multiply(B) + B
return W
# Compute weight matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
# Chooses k neighbors at random from I.shape[1] nearset neighbors
def weight_matrix_homogenized(I, J, D, k, f=exp_weight):
# I = I[:,:10]
# J = J[:,:10]
# D = D[:,:10]
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = I.shape[0]
for i in range(n):
ind = random.choice(I.shape[1], k, replace=False)
I[i, :k] = I[i, ind]
J[i, :k] = J[i, ind]
D[i, :k] = 1
n = I.shape[0]
k = I.shape[1]
D = D * D
eps = D[:, k - 1] / 4
D = f(D / eps[:, None])
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
return W
# Compute distance matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
def dist_matrix(I, J, D, k):
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
n = I.shape[0]
k = I.shape[1]
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
return W
# Adds weights to an adjacency matrix W using similarity in data X
def add_weights(W, X, labels):
n = W.shape[0]
# pca = PCA(n_components=20)
# X = pca.fit_transform(X)
# print(X.shape)
I, J, V = sparse.find(W)
# Dot products
Y = X[I, :] - X[J, :]
Y = np.sum(Y * Y, axis=1)
W = sparse.coo_matrix((Y, (I, J)), shape=(n, n)).tocsr()
max_dist = np.reshape(np.max(W, axis=1).todense().tolist(), (n,))
D = sparse.spdiags((max_dist + 1e-10) ** (-1), 0, n, n).tocsr()
W = D * W
I, J, V = sparse.find(W)
V = np.exp(-2 * V)
W = sparse.coo_matrix((V, (I, J)), shape=(n, n)).tocsr()
return W
# Finds largest connected component of the graph represented by adjacency matrix W
# Returns the weighted adjacency matrix, along with a boolean mask indicating the
# vertices from the input matrix that were selected
def largest_conn_component(W):
ncomp, labels = csgraph.connected_components(W, directed=False)
num_verts = np.zeros((ncomp,))
for i in range(ncomp):
num_verts[i] = np.sum(labels == i)
i_max = np.argmax(num_verts)
ind = labels == i_max
A = W[ind, :]
A = A[:, ind]
print("Found %d" % ncomp + " connected components.")
print("Returning component with %d" % num_verts[i_max] + " vertices out of %d" % W.shape[0] + " total vertices.")
return A, ind
# Compute weight matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
def weight_matrix(I, J, D, k, f=exp_weight, symmetrize=True):
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
n = I.shape[0]
k = I.shape[1]
D = D * D
eps = D[:, k - 1] / 4
D = f(D / eps[:, None])
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
if symmetrize:
W = (W + W.transpose()) / 2;
return W
def nnk_weight_matrix(dataset, metric, mask, knn_param, reg=1e-10, symmetrize=True):
# Try to Load data
X = load_dataset(dataset=dataset, metric=metric)
X_normalized = X / np.linalg.norm(X, axis=1, keepdims=True)
num_of_nodes = mask.shape[0]
neighbor_indices = np.zeros((num_of_nodes, knn_param))
weight_values = np.zeros((num_of_nodes, knn_param))
error_values = np.ones((num_of_nodes, knn_param))
for node_i in range(num_of_nodes):
non_zero_index = np.array(mask[node_i, :])
non_zero_index = np.delete(non_zero_index, np.where(non_zero_index == node_i))
if len(non_zero_index) > knn_param:
non_zero_index = non_zero_index[:knn_param]
x_neighbors = X_normalized[non_zero_index]
g_i = 0.5 + np.dot(x_neighbors, X_normalized[node_i]) / 2
G_i = 0.5 + np.dot(x_neighbors, x_neighbors.T) / 2
# x_opt, check = non_negative_qpsolver(G_i, g_i, g_i, reg)
# error_values[node_i, :] = 1 - 2 * np.dot(x_opt, g_i) + np.dot(x_opt, np.dot(G_i, x_opt))
x_opt = g_i
weight_values[node_i, :] = x_opt / np.sum(x_opt)
neighbor_indices[node_i, :] = non_zero_index
row_indices = np.expand_dims(np.arange(0, num_of_nodes), 1)
row_indices = np.tile(row_indices, [1, knn_param])
adjacency = sparse.coo_matrix((weight_values.ravel(), (row_indices.ravel(), neighbor_indices.ravel())),
shape=(num_of_nodes, num_of_nodes))
if symmetrize:
error = sparse.coo_matrix((error_values.ravel(), (row_indices.ravel(), neighbor_indices.ravel())),
shape=(num_of_nodes, num_of_nodes))
# Alternate way of doing: error_index = sparse.find(error > error.T); adjacency[error_index[0], error_index[
# 1]] = 0
adjacency = adjacency.multiply(error < error.T)
adjacency = adjacency.maximum(adjacency.T)
adjacency.eliminate_zeros()
error_values = error_values[:, 0]
return adjacency.tocsr(), error_values
# Compute boundary points
# k = number of neighbors to use
def boundary_points_new(X, k, I=None, J=None, D=None, ReturnNormals=False):
if (I is None) or (J is None) or (D is None):
n = X.shape[0]
d = X.shape[1]
if d <= 5:
I, J, D = knnsearch(X, k)
else:
I, J, D = knnsearch_annoy(X, k)
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = X.shape[0]
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
W = weight_matrix(I, J, D, k, f=lambda x: np.ones_like(x), symmetrize=False)
L = graph_laplacian(W)
# Estimates of normal vectors
nu = -L * X
nu = np.transpose(nu)
norms = np.sqrt(np.sum(nu * nu, axis=0))
nu = nu / norms
nu = np.transpose(nu)
print(nu.shape)
# Boundary test
NN = X[J]
NN = np.swapaxes(NN[:, 1:, :], 0, 1) # This is kxnxd
V = NN - X # This is x^i-x^0 kxnxd array
NN_nu = nu[J]
W = (np.swapaxes(NN_nu[:, 1:, :], 0, 1) + nu) / 2
xd = np.sum(V * W, axis=2) # dist to boundary
Y = np.max(-xd, axis=0)
if ReturnNormals:
return Y, nu
else:
return Y
# Compute boundary points
# k = number of neighbors to use
def boundary_points(X, k, I=None, J=None, D=None, ReturnNormals=False, R=np.inf):
if (I is None) or (J is None) or (D is None):
n = X.shape[0]
d = X.shape[1]
if d <= 5:
I, J, D = knnsearch(X, k)
else:
I, J, D = knnsearch_annoy(X, k)
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = X.shape[0]
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
W = weight_matrix(I, J, D, k, f=lambda x: np.ones_like(x), symmetrize=False)
L = graph_laplacian(W)
# Estimates of normal vectors
nu = -L * X
nu = np.transpose(nu)
norms = np.sqrt(np.sum(nu * nu, axis=0))
nu = nu / norms
nu = np.transpose(nu)
# Boundary test
NN = X[J]
NN = np.swapaxes(NN[:, 1:, :], 0, 1) # This is kxnxd
V = NN - X # This is x^i-x^0 kxnxd array
xd = np.sum(V * nu, axis=2) # xd coordinate (kxn)
sqdist = np.sum(V * V, axis=2)
Y = np.max((xd * xd - sqdist) / (2 * R) - xd, axis=0)
if ReturnNormals:
return Y, nu
else:
return Y
# Construct k-nn sparse distance matrix
# Note: Matrix is not symmetric
def knn_weight_matrix(X, k, f=exp_weight):
I, J, D = knnsearch_annoy(X, k)
W = weight_matrix(I, J, D, k, f=f)
return W
# Solves Lx=f subject to Rx=g at ind points
def gmres_bc_solve(L, f, R, g, ind):
# Mix matrices based on boundary points
A = L.copy()
A = A.tolil()
A[ind, :] = R[ind, :]
A = A.tocsr()
# Right hand side
b = f.copy()
b[ind] = g[ind]
# Preconditioner
m = A.shape[0]
M = A.diagonal()
M = sparse.spdiags(1 / M, 0, m, m).tocsr()
# GMRES solver
# start_time = time.time()
u, info = sparse.linalg.gmres(A, b, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
# print('gmres_err = %f'%np.max(np.absolute(A*u-b)))
return u
# Poisson solve
# Solves Lu = f with preconditioned conjugate gradient
def pcg_solve(L, f, x0=None, tol=1e-10):
# start_time = time.time()
L = L.tocsr()
# Conjugate gradient with Jacobi preconditioner
m = L.shape[0]
M = L.diagonal()
M = sparse.spdiags(1 / M, 0, m, m).tocsr()
if x0 is None:
u, i = splinalg.cg(L, f, tol=tol, M=M)
else:
u, i = splinalg.cg(L, f, x0=x0, tol=tol, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
return u
# Finds k Dirichlet eigenvectors
# Solves Lu = lambda u subject to u(I)=0
def dirichlet_eigenvectors(L, I, k):
L = L.tocsr()
n = L.shape[0]
# Locations of labels
idx = np.full((n,), True, dtype=bool)
idx[I] = False
# Left hand side matrix
A = L[idx, :]
A = A[:, idx]
# Eigenvector solver
vals, vec = sparse.linalg.eigs(A, k=k, which='SM')
vec = vec.real
vals = vals.real
# Add labels back into array
u = np.zeros((n, k))
u[idx, :] = vec
if k == 1:
u = u.flatten()
return u, vals
# Constrained linear solve
# Solves Lu = f subject to u(I)=g
def constrained_solve(L, I, g, f=None, x0=None, tol=1e-10):
L = L.tocsr()
n = L.shape[0]
# Locations of labels
idx = np.full((n,), True, dtype=bool)
idx[I] = False
# Right hand side
b = -L[:, I] * g
b = b[idx]
if f is not None:
b = b + f[idx]
# Left hand side matrix
A = L[idx, :]
A = A[:, idx]
# start_time = time.time()
# Conjugate gradient with Jacobi preconditioner
m = A.shape[0]
M = A.diagonal()
M = sparse.spdiags(1 / (M + 1e-10), 0, m, m).tocsr()
if x0 is None:
v, i = splinalg.cg(A, b, tol=tol, M=M)
else:
v, i = splinalg.cg(A, b, x0=x0[idx], tol=tol, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
# Add labels back into array
u = np.ones((n,))
u[idx] = v
u[I] = g
return u
# Returns n random points in R^d
def rand(n, d):
return random.rand(n, d)
# Returns n random points in annulus (r1,r2)
def rand_annulus(n, d, r1, r2):
N = 0
X = np.zeros((1, d))
while X.shape[0] <= n:
Y = r2 * (2 * rand(n, d) - 1)
dist2 = np.sum(Y * Y, axis=1)
I = (dist2 < r2 * r2) & (dist2 > r1 * r1)
Y = Y[I, :]
X = np.vstack((X, Y))
X = X[1:(n + 1)]
return X
# Returns n random points in unit ball in R^d
def rand_ball(n, d):
N = 0
X = np.zeros((1, d))
while X.shape[0] <= n:
Y = 2 * rand(n, d) - 1
I = np.sum(Y * Y, axis=1) < 1
Y = Y[I, :]
X = np.vstack((X, Y))
X = X[1:(n + 1)]
return X
def randn(n, d):
X = np.zeros((n, d))
for i in range(d):
X[:, i] = np.random.normal(0, 1, n)
return X
def bean_data(n, h):
# n = number of points
# h = height of bridge (h=0.2)
a = -1
b = 1
x = a + (b - a) * random.rand(3 * n);
c = -0.6
d = 0.6;
y = c + (d - c) * random.rand(3 * n);
X = np.transpose(np.vstack((x, y)))
dist_from_x_axis = 0.4 * np.sqrt(1 - x ** 2) * (1 + h - np.cos(3 * x))
in_bean = abs(y) <= dist_from_x_axis
X = X[in_bean, :]
if X.shape[0] < n:
print('Not enough samples');
else:
X = X[:n, :]
return X
def mesh(X):
T = spatial.Delaunay(X[:, :2]);
return T.simplices
def box_mesh(X, u=None):
n = X.shape[0]
d = X.shape[1]
if d > 2:
X = X[:, 0:2]
x1 = X[:, 0].min()
x2 = X[:, 0].max()
y1 = X[:, 1].min()
y2 = X[:, 1].max()
corners = np.array([[x1, y1], [x2, y2], [x1, y2], [x2, y1]])
X = np.append(X, corners, axis=0)
Tri = mesh(X)
if u is not None:
u = np.append(u, [0, 0, 0, 0])
for i in range(n, n + 4):
I = (Tri[:, 0] == i) | (Tri[:, 1] == i) | (Tri[:, 2] == i)
nn_tri = Tri[I, :].flatten()
nn_tri = np.unique(nn_tri[nn_tri < n])
u[i] = np.mean(u[nn_tri])
# u[i] = np.max(u[nn_tri])
return X, Tri, u
else:
return X, Tri
# Triangulation of domain
def improved_mesh(X):
n = X.shape[0]
d = X.shape[1]
if d > 2:
X = X[:, 0:2]
# Normalize data to unit box
x1 = X[:, 0].min()
x2 = X[:, 0].max()
y1 = X[:, 1].min()
y2 = X[:, 1].max()
X = X - [x1, y1]
X[:, 0] = X[:, 0] / (x2 - x1)
X[:, 1] = X[:, 1] / (y2 - y1)
# Add padding data around
pad = 10 / np.sqrt(n)
m = int(pad * n)
Y = rand(m, 2)
Y[:, 0] = Y[:, 0] * pad - pad
Z = np.vstack((X, Y))
Y = rand(m, 2)
Y[:, 0] = Y[:, 0] * pad + 1
Z = np.vstack((Z, Y))
Y = rand(m, 2)
Y[:, 1] = Y[:, 1] * pad - pad
Z = np.vstack((Z, Y))
Y = rand(m, 2)
Y[:, 1] = Y[:, 1] * pad + 1
Z = np.vstack((Z, Y))
# Delaunay triangulation
T = spatial.Delaunay(Z);
Tri = T.simplices
J = np.sum(Tri >= n, axis=1) == 0;
Tri = Tri[J, :]
return Tri
def plot(X, u):
Tri = mesh(X)
import mayavi.mlab as mlab
mlab.triangular_mesh(X[:, 0], X[:, 1], u, Tri)
mlab.view(azimuth=-45, elevation=60)
# Laplace learning
# Zhu, Xiaojin, <NAME>, and <NAME>. "Semi-supervised learning using gaussian fields and harmonic functions." Proceedings of the 20th International conference on Machine learning (ICML-03). 2003.
def laplace_solve(W, I, g, norm="none"):
L = graph_laplacian(W, norm=norm)
return constrained_solve(L, I, g)
# Shift trick
# W = Weight matrix
# I = indices of labels
# g = +1/-1 values of labels
def shift_solve(W, I, g):
# Laplace learning
u = laplace_solve(W, I, g)
# Shift solution
s = degrees(W)
c = np.sum(s[I] * g) / sum(s[I])
u = u - c
u = u - np.mean(u)
return u
# Shift trick by mean
# W = Weight matrix
# I = indices of labels
# g = +1/-1 values of labels
def meanshift_solve(W, I, g):
# Laplace learning
u = laplace_solve(W, I, g)
# Center solution
u = u - np.mean(u)
return u
# Reweights the weight matrix for WNLL
def wnll(W, I):
n = W.shape[0]
m = len(I)
a = np.ones((n,))
a[I] = n / m
D = sparse.spdiags(a, 0, n, n).tocsr()
W = D * W + W * D
return W
# Weighted nonlocal Laplacian
# Shi, Zuoqiang, <NAME>, and <NAME>. "Weighted nonlocal laplacian on interpolation from sparse data." Journal of Scientific Computing 73.2-3 (2017): 1164-1177.
def wnll_solve(W, I, g):
n = W.shape[0]
W = wnll(W, I)
L = graph_laplacian(W, norm="none")
return constrained_solve(L, I, g)
# Properly weighted Laplacian
# Calder, Jeff, and <NAME>. "Properly-weighted graph Laplacian for semi-supervised learning." arXiv preprint arXiv:1810.04351 (2018).
def properlyweighted_solve(W, I, g, X, alpha, zeta, r):
n = W.shape[0]
rzeta = r / (zeta - 1) ** (1 / alpha)
Xtree = spatial.cKDTree(X[I, :])
D, J = Xtree.query(X)
D[D < rzeta] = rzeta
gamma = 1 + (r / D) ** alpha
D = sparse.spdiags(gamma, 0, n, n).tocsr()
L = graph_laplacian(D * W + W * D, norm="none")
return constrained_solve(L, I, g)
# Game theoretic p-Laplace learning
# Rios, <NAME>, <NAME>, and <NAME>. "Algorithms for $\ell_p$-based semi-supervised learning on graphs." arXiv preprint arXiv:1901.05031 (2019).
def plaplace_solve(W, I, g, p, sol_method="SemiImplicit", norm="none"):
# start_time = time.time()
n = W.shape[0]
W = W / W.max()
if p == float("inf"):
alpha = 0
delta = 1
else:
alpha = 1 / p
delta = 1 - 2 / p
dx = degrees(W)
theta = 1.2 * (2 * alpha + np.max(dx) * delta)
if p == float("inf"):
beta = 1
gamma = 1 / theta
else:
beta = (theta * p - 2) / (theta * p)
gamma = (p - 2) / (theta * p - 2)
if norm == "normalized":
deg = dx[I] ** (1 / 2)
g = g / deg
L = graph_laplacian(W)
u = constrained_solve(L, I, g)
uu = np.max(g) * np.ones((n,))
ul = np.min(g) * np.ones((n,))
WI, WJ, WV = sparse.find(W)
# Set labels
u[I] = g
uu[I] = g
ul[I] = g
# Time step for gradient descent
dt = 0.9 / (alpha + 2 * delta)
if sol_method == "GradientDescentCcode":
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
# Type casting and memory blocking
uu = np.ascontiguousarray(uu, dtype=np.float64)
ul = np.ascontiguousarray(ul, dtype=np.float64)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.float64)
cgp.lp_iterate(uu, ul, WI, WJ, WV, I, g, p, 1e6, 1e-1, 0.0)
u = (uu + ul) / 2
# Check residual
L2uu = -L * uu
LIuu = graph_infinity_laplacian(W, uu, I=WI, J=WJ, V=WV)
resu = alpha * L2uu / dx + delta * LIuu
resu[I] = 0
L2ul = -L * ul
LIul = graph_infinity_laplacian(W, ul, I=WI, J=WJ, V=WV)
resl = alpha * L2ul / dx + delta * LIul
resl[I] = 0
# print('Upper residual = %f' % np.max(np.absolute(resu)))
# print('Lower residual = %f' % np.max(np.absolute(resl)))
else:
err = 1e6
i = 0
while err > 1e-1:
i += 1
# Graph laplacians
L2u = -L * u
LIu = graph_infinity_laplacian(W, u, I=WI, J=WJ, V=WV)
# Residual error
res = alpha * L2u / dx + delta * LIu
res[I] = 0
# err = np.max(np.absolute(res))
# print("Residual error = "+str(err))
# Update
if sol_method == "GradientDescent":
L2uu = -L * uu
LIuu = graph_infinity_laplacian(W, uu, I=WI, J=WJ, V=WV)
res = alpha * L2uu / dx + delta * LIuu
res[I] = 0
uu = uu + dt * res
err = np.max(np.absolute(res))
# print("Upper residual = "+str(err))
L2ul = -L * ul
LIul = graph_infinity_laplacian(W, ul, I=WI, J=WJ, V=WV)
res = alpha * L2ul / dx + delta * LIul
res[I] = 0
ul = ul + dt * res
err = np.max(np.absolute(res))
# print("Lower residual = "+str(err))
err1 = np.max(uu - ul)
err2 = np.min(uu - ul)
# print("Residual error = "+str(err1)+","+str(err2))
err = err1
u = (uu + ul) / 2
elif sol_method == "SemiImplicit":
rhs = beta * (2 * gamma * dx * LIu - L2u)
u = constrained_solve(L, I, g, f=rhs, x0=u, tol=err / 100)
else:
print("Invalid p-Laplace solution method.")
sys.exit()
if norm == "normalized":
deg = dx ** (1 / 2)
u = u * deg
# print("--- %s seconds ---" % (time.time() - start_time))
return u
# Gradient of function on graph
# W = sparse weight matrix
# u = function on graph
def graph_gradient(W, u, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
G = sparse.coo_matrix((V * (u[J] - u[I]), (I, J)), shape=(n, n)).tocsr()
return G
# Divergence of vector field F (F should be skew-symmetric)
# F = sparse matrix representing vector field
def graph_divergence(F, W):
F = F.multiply(W)
return 2 * np.squeeze(np.array(np.sum(F, axis=1)))
# Random-walk SSL
# Zhou, Dengyong, et al. "Learning with local and global consistency." Advances in neural information processing systems. 2004.
def randomwalk_solve(W, I, g, epsilon):
n = W.shape[0]
# Zero diagonals
W = W - sparse.spdiags(W.diagonal(), 0, n, n)
# Construct Laplacian matrix
Dinv2 = degree_matrix(W, p=-1 / 2)
L = sparse.identity(n) - (1 - epsilon) * Dinv2 * W * Dinv2;
# Format right hand side
b = np.zeros((n,))
b[I] = g
return pcg_solve(L, b)
# Computes accuracy of labeling
# m = number of labeled points used
def accuracy(L, L_true, m):
# Remove unlabeled nodes
I = L_true >= 0
L = L[I]
L_true = L_true[I]
# Compute accuracy
return 100 * np.maximum(np.sum(L == L_true) - m, 0) / (len(L) - m)
# Projects all columns of (kxn) matrix X onto k-simplex
def ProjectToSimplex(X):
n = X.shape[1]
k = X.shape[0]
Xs = -np.sort(-X, axis=0) # Sort descending
A = np.tril(np.ones((k, k)))
Sum = A @ Xs
Max = np.transpose((np.transpose(Sum) - 1) / (np.arange(k) + 1))
Xs[:-1, :] = Xs[1:, :]
Xs[-1, :] = (Sum[k - 1, :] - 1) / k
I = np.argmax(Max >= Xs, axis=0)
X = np.maximum(X - Max[I, range(n)], 0)
return X
# Takes list of labels and converts to vertices of simplex format
def LabelsToVec(L):
n = L.shape[0]
labels = np.unique(L)
k = len(labels)
for i in range(k):
L[L == labels[i]] = i
L = L.astype(int)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X, labels
# Projects all rows of (nxk) matrix X to closest vertex of the simplex
# Assume X already lives in the simplex, e.g., is the output of ProjectToSimplex
def ClosestVertex(X):
n = X.shape[1]
k = X.shape[0]
L = np.argmax(X, axis=0)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X
# Threshold with temperature to closest vertex
def ClosestVertexTemp(X, T=0.01):
n = X.shape[1]
k = X.shape[0]
beta = 1 / T
Y = np.exp(beta * X)
Ysum = np.sum(Y, axis=0)
Y = Y / Ysum
X[0, :] = Y[0, :]
for i in range(1, k):
X[i, :] = X[i - 1, :] + Y[i, :]
R = random.rand(n, 1)
L = np.sum(R.flatten() > X, axis=0)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X
# Volume MBO, initialized with Poisson
def poisson_volumeMBO(W, I, g, dataset, beta, T, volume_mult):
# Set diagonal entries to zero
W = diag_multiply(W, 0)
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
# Solve Poisson problem and compute labels
u, _ = poisson(W, I, g)
max_locations = np.argmax(u, axis=0)
u = (np.unique(g))[max_locations]
n = W.shape[0]
k = len(np.unique(g))
WI, WJ, WV = sparse.find(W)
# Class counts
ClassCounts = (n * beta).astype(int)
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
ClassCounts = np.ascontiguousarray(ClassCounts, dtype=np.int32)
cgp.volume_mbo(u, WI, WJ, WV, I, g, ClassCounts, k, 0.0, T, volume_mult)
# Set given labels and convert to vector format
u[I] = g
u, _ = LabelsToVec(u)
return u
# Volume MBO (Jacobs, et al.)
def volumeMBO(W, I, g, dataset, beta, T, volume_mult):
# Set diagonal entries to zero
W = diag_multiply(W, 0)
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
n = W.shape[0]
k = len(np.unique(g))
u = np.zeros((n,))
WI, WJ, WV = sparse.find(W)
# Class counts
ClassCounts = (n * beta).astype(int)
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
ClassCounts = np.ascontiguousarray(ClassCounts, dtype=np.int32)
cgp.volume_mbo(u, WI, WJ, WV, I, g, ClassCounts, k, 1.0, T, volume_mult)
# Set given labels and convert to vector format
u[I] = g
u, _ = LabelsToVec(u)
return u
# Multiclass MBO
# Garcia-Cardona, Cristina, et al. "Multiclass data segmentation using diffuse interface methods on graphs." IEEE transactions on pattern analysis and machine intelligence 36.8 (2014): 1600-1613.
def multiclassMBO(W, I, g, eigvals, eigvecs, dataset, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
Ns = 6
if dataset == 'MNIST' or dataset == 'FashionMNIST' or dataset == 'cifar':
dt = 0.15
mu = 50
elif dataset == 'WEBKB':
dt = 1
mu = 4
else:
print('Dataset not supported by MBO...')
sys.exit(2)
# Load eigenvalues and eigenvectors
X = eigvecs
num_eig = len(eigvals)
# Form matrices
V = np.diag(1 / (1 + (dt / Ns) * eigvals))
Y = X @ V
Xt = np.transpose(X)
# Random initial labeling
u = random.rand(k, n)
u = ProjectToSimplex(u)
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
u = Kg + (1 - J) * u
# Maximum number of iterations
T = 10
for i in range(T):
for s in range(Ns):
Z = (u - (dt / Ns) * mu * J * (u - Kg)) @ Y
u = Z @ Xt
# Projection step
u = ProjectToSimplex(u)
u = ClosestVertex(u)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Poisson MBO
def poissonMBO(W, I, g, dataset, beta, true_labels=None, temp=0, use_cuda=False, Ns=40, mu=1, T=50):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
num_labels = np.zeros((k,))
for i in range(k):
num_labels[i] = np.sum(g == unique_labels[i])
W = diag_multiply(W, 0)
if dataset == 'WEBKB':
mu = 1000
Ns = 8
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = np.transpose(b)
L = graph_laplacian(W, norm='none')
# Initialize u via Poisson learning
# u = np.zeros((k,n))
# for j in range(k):
# u[j,:] = pcg_solve(L,b[j,:])
# u = mu*u
# u = np.transpose(np.transpose(u) - np.mean(u,axis=1))
u, mix_time = poisson(W, I, g, use_cuda=use_cuda, beta=beta)
# Ns = int(mix_time/4)
u = ProjectToSimplex(u)
u = ClosestVertex(u)
# Time step for stability
dt = 1 / np.max(degrees(W))
P = sparse.identity(n) - dt * L
Db = mu * dt * b
if use_cuda:
Pt = torch_sparse(P).cuda()
Dbt = torch.from_numpy(np.transpose(Db)).float().cuda()
for i in range(T):
if use_cuda:
# Put on GPU and run heat equation
ut = torch.from_numpy(np.transpose(u)).float().cuda()
for s in range(Ns):
# u = u*P + Db
ut = torch.sparse.addmm(Dbt, Pt, ut)
# Put back on CPU
u = np.transpose(ut.cpu().numpy())
else: # Use CPU
for s in range(Ns):
# u = u + dt*(mu*b - u*L)
u = u * P + Db
# Projection step
# u = np.diag(beta/num_labels)@u
u = ProjectToSimplex(u)
u = ClosestVertex(u)
u = np.transpose(np.transpose(u) - np.mean(u, axis=1) + beta)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
def torch_sparse(A):
A = A.tocoo()
values = A.data
indices = np.vstack((A.row, A.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = A.shape
return torch.sparse.FloatTensor(i, v, torch.Size(shape))
# Sparse Label Propagation
def SparseLabelPropagation(W, I, g, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
WI, WJ, WV = sparse.find(W)
B = sparse.coo_matrix((np.ones(len(WV), ), (WI, WJ)), shape=(n, n)).tocsr() # Ones in all entries
# Construct matrix 1/2W and 1/deg
lam = 2 * W - (1 - 1e-10) * B
lam = -lam.log1p()
lam = lam.expm1() + B
Id = sparse.identity(n)
gamma = degree_matrix(W + 1e-10 * Id, p=-1)
# Random initial labeling
# u = random.rand(k,n)
u = np.zeros((k, n))
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Initialization
Y = list()
for j in range(k):
Gu = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
Y.append(Gu)
# Main loop for sparse label propagation
T = 100
for i in range(T):
u_prev = np.copy(u)
# Compute div
for j in range(k):
div = graph_divergence(Y[j], W)
u[j, :] = u_prev[j, :] - gamma * div
u[j, I] = Kg[j, I] # Set labels
u_tilde = 2 * u[j, :] - u_prev[j, :]
Gu = -graph_gradient(W, u_tilde, I=WI, J=WJ, V=WV)
Y[j] = Y[j] + Gu.multiply(lam)
ind1 = B.multiply(abs(Y[j]) > 1)
ind2 = B - ind1
Y[j] = ind1.multiply(Y[j].sign()) + ind2.multiply(Y[j])
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Dynamic Label Propagation
def DynamicLabelPropagation(W, I, g, alpha=0.05, lam=0.1, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
u, _ = LabelsToVec(K)
u = u * J
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = np.transpose(Kg * J)
u = np.copy(Kg)
if n > 5000:
print("Cannot use Dynamic Label Propagation on large datasets.")
else:
# Setup matrices
Id = sparse.identity(n)
D = degree_matrix(W, p=-1)
P = D * W
P = np.array(P.todense())
Pt = np.copy(P)
T = 2
for i in range(T):
v = P @ u
u = Pt @ u
u[I, :] = Kg[I, :]
Pt = P @ Pt @ np.transpose(P) + alpha * v @ np.transpose(v) + lam * Id
# Compute accuracy if all labels are provided
if true_labels is not None:
u = np.array(u)
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('i:%d' % i + ',Accuracy = %.2f' % acc)
u = np.transpose(np.array(u))
return u
# Centered and Iterated Centered Kernel of Mai/Coulliet 2018
def CenteredKernel(W, I, g, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = np.transpose(Kg * J)
# Center labels
c = np.sum(Kg, axis=0) / len(I)
Kg[I, :] = Kg[I, :] - c
u = np.copy(Kg)
v = np.ones((n, 1))
vt = np.ones((1, n))
e = np.random.rand(n, 1)
for i in range(100):
y = W * (e - (1 / n) * v @ (vt @ e))
w = y - (1 / n) * v @ (vt @ y) # =Ae
l = abs(np.transpose(e) @ w / (np.transpose(e) @ e))
e = w / np.linalg.norm(w)
# Number of iterations
# alpha = 5*l/4
alpha = 105 * l / 100
T = 1000
err = 1
while err > 1e-10:
y = W * (u - (1 / n) * v @ (vt @ u))
w = (1 / alpha) * (y - (1 / n) * v @ (vt @ y)) - u # Laplacian
w[I, :] = 0
err = np.max(np.absolute(w))
u = u + w
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return np.transpose(u)
def vec_acc(u, I, g, true_labels):
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
return acc
# def volume_label_projection(u,beta,s=None):
#
# k = u.shape[0]
# n = u.shape[1]
# if s is None:
# s = np.ones((k,))
# for i in range(100):
# grad = beta - np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n
# err0 = np.max(np.absolute(grad))
#
# dt = 1
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# err = err0
# newerr = np.max(np.absolute(gradnew))
# while newerr < err:
# print(dt)
# dt = 2*dt
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# err = newerr
# newerr = np.max(np.absolute(gradnew))
# dt = dt/2
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# newerr = np.max(np.absolute(gradnew))
# while newerr >= err:
# print(dt)
# dt = dt/2
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# newerr = np.max(np.absolute(gradnew))
# if dt < 1:
# dt = dt/2
#
# s = s + dt*grad
#
# print(err)
# if err == 0:
# print(i)
# break
#
# #s = s + dt*(beta - beta_u)
#
# return ClosestVertex(np.diag(s)@u),s
def volume_label_projection(u, beta, s=None, dt=None):
k = u.shape[0]
n = u.shape[1]
if s is None:
s = np.ones((k,))
if dt is None:
dt = 10
# print(np.around(100*beta,decimals=1))
# print(np.around(100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=1))
for i in range(100):
class_size = np.sum(ClosestVertex(np.diag(s) @ u), axis=1) / n
grad = beta - class_size
# print(np.around(100*class_size,decimals=1))
# err = np.max(np.absolute(grad))
# if err == 0:
# break
s = np.clip(s + dt * grad, 0.5, 2)
# print(np.around(100*beta,decimals=1))
# print(np.around(100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=1))
# print(np.around(100*beta - 100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=4))
return ClosestVertex(np.diag(s) @ u), s
# Poisson MBO with volume constraints
def poissonMBO_volume(W, I, g, dataset, beta, true_labels=None, temp=0, use_cuda=False, Ns=40, mu=1, T=20):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
if dataset == 'WEBKB':
mu = 1000
Ns = 8
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = np.transpose(b)
D = degree_matrix(W)
# L = graph_laplacian(W,norm='none')
L = D - W.transpose()
# Initialize u via Poisson learning
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda, beta=beta)
u = mu * u
# Time step for stability
dt = 1 / np.max(degrees(W))
P = sparse.identity(n) - dt * L
Db = mu * dt * b
if use_cuda:
Pt = torch_sparse(P).cuda()
Dbt = torch.from_numpy(np.transpose(Db)).float().cuda()
for i in range(T):
# Heat equation step
if use_cuda:
# Put on GPU and run heat equation
ut = torch.from_numpy(np.transpose(u)).float().cuda()
for j in range(Ns):
ut = torch.sparse.addmm(Dbt, Pt, ut)
# Put back on CPU
u = np.transpose(ut.cpu().numpy())
else: # Use CPU
for j in range(Ns):
u = u * P + Db
# Projection step
u, s = volume_label_projection(u, beta)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Poisson Volume
def PoissonVolume(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50):
# Run Poisson learning
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda, training_balance=training_balance, beta=beta)
# Volume constraints
_, s = volume_label_projection(u, beta)
return np.diag(s) @ u
def original_poisson(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
# Zero out diagonal for faster convergence
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
# Setup matrices
D = degree_matrix(W + 1e-10 * sparse.identity(n), p=-1)
# L = graph_laplacian(W,norm='none')
# P = sparse.identity(n) - D*L #Line below is equivalent when W symmetric
P = D * W.transpose()
Db = D * b
v = np.max(Kg, axis=0)
v = v / np.sum(v)
vinf = degrees(W) / np.sum(degrees(W))
RW = W.transpose() * D
u = np.zeros((n, k))
# vals, vec = sparse.linalg.eigs(RW,k=1,which='LM')
# vinf = np.absolute(vec.flatten())
# vinf = vinf/np.sum(vinf)
# Number of iterations
T = 0
if use_cuda:
Pt = torch_sparse(P).cuda()
ut = torch.from_numpy(u).float().cuda()
Dbt = torch.from_numpy(Db).float().cuda()
# start_time = time.time()
while (T < min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (T < 1000):
ut = torch.sparse.addmm(Dbt, Pt, ut)
v = RW * v
T = T + 1
# print("--- %s seconds ---" % (time.time() - start_time))
# Transfer to CPU and convert to numpy
u = ut.cpu().numpy()
else: # Use CPU
# start_time = time.time()
while (T < min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (T < 1000):
uold = u.copy()
u = Db + P * u
v = RW * v
T = T + 1
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('%d,Accuracy = %.2f' % (T, acc))
# print("--- %s seconds ---" % (time.time() - start_time))
# Balancing for training data/class size discrepancy
if training_balance:
if beta is None:
u = u @ np.diag(1 / c)
else:
u = u @ np.diag(beta / c)
return np.transpose(u), T
# Poisson learning
def poisson(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50, error=None):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
if error is None:
error = np.ones(n, dtype=np.float32)
else:
error = error.reshape((n,)) / np.max(error)
# Zero out diagonal for faster convergence
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = 2 * b[I, :] - 1
# Setup matrices
# D = degree_matrix(W + 1e-10 * sparse.identity(n), p=-1)
# L = graph_laplacian(W,norm='none')
# P = sparse.identity(n) - D*L #Line below is equivalent when W symmetric
v_prev = np.random.random(size=(n, 1))
residue_energy = 1
u = np.zeros((n, k))
confidence_gain = W.transpose() #* sparse.spdiags(np.power(1 + error, -1), 0, n, n)
# vals, vec = sparse.linalg.eigs(RW,k=1,which='LM')
# vinf = np.absolute(vec.flatten())
# vinf = vinf/np.sum(vinf)
# Number of iterations
T = 0
if use_cuda:
Wt = torch_sparse(confidence_gain).cuda()
ut = torch.from_numpy(u).float().cuda()
bt = torch.from_numpy(b).float().cuda()
# start_time = time.time()
while (T < min_iter or residue_energy > 1e-10) and (T < 1000):
ut = torch.sparse.addmm(bt, Wt, ut)
v = W.transpose() * v_prev
residue_energy = np.linalg.norm(v - v_prev)
v_prev = v
T = T + 1
# print("--- %s seconds ---" % (time.time() - start_time))
# Transfer to CPU and convert to numpy
u = ut.cpu().numpy()
else: # Use CPU
# start_time = time.time()
while (T < min_iter or residue_energy > 1e-6) and (T < 1000):
u = np.clip(b + confidence_gain * u, a_min=-1, a_max=1)
v = W.transpose() * v_prev
residue_energy = np.linalg.norm(v - v_prev)
v_prev = v
T = T + 1
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('%d,Accuracy = %.2f' % (T, acc))
# print("--- %s seconds ---" % (time.time() - start_time))
print(f"T: {T}, residue: {residue_energy}")
# Balancing for training data/class size discrepancy
if training_balance:
if beta is None:
u = u @ np.diag(1 / c)
else:
u = u @ np.diag(beta / c)
return np.transpose(u), T
# Poisson L1 based on Split Bregman Method
# Does not work as well as PoissonMBO
def poissonL1(W, I, g, dataset, norm="none", lam=100, mu=1000, Nouter=30, Ninner=6, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
# mu = mu*W.count_nonzero()/len(g) #Normalize constants
gamma = 1 / lam
WI, WJ, WV = sparse.find(W)
B = sparse.coo_matrix((np.ones(len(WV), ), (WI, WJ)), shape=(n, n)).tocsr() # Ones in all entries
L = graph_laplacian(2 * W.multiply(W), norm=norm)
deg = degrees(W)
dt = 1 / np.max(deg)
# Random initial labeling
# u = random.rand(k,n)
# u = ProjectToSimplex(u)
u = np.zeros((k, n))
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson parameters
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = (mu / lam) * np.transpose(b)
# Initialize u via Poisson learning
u = np.zeros((k, n))
L = graph_laplacian(W, norm='none')
for j in range(k):
u[j, :] = pcg_solve(L, b[j, :])
u = np.transpose(np.transpose(u) - np.mean(u, axis=1))
# Initialization
V = list()
R = list()
gradu = list()
for j in range(k):
Gu = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
gradu.append(Gu)
V.append(Gu)
R.append(Gu)
# Main loop for Split Bregman iteration
for i in range(Nouter):
print('Outer:%d' % i)
for s in range(Ninner):
normV = 0 * W
for j in range(k):
divVR = graph_divergence(R[j] - V[j], W)
u[j, :] = pcg_solve(L, b[j, :] + divVR, x0=u[j, :], tol=1e-10)
# for s in range(100):
# u[j,:] = u[j,:] + dt*(b[j,:] + divVR - u[j,:]*L)
gradu[j] = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
V[j] = gradu[j] + R[j]
normV = normV + V[j].multiply(V[j])
normV = normV.sqrt()
# Shrinkage operation
# normV^{-1} for nonzero entries (tricky to do in sparse format)
# normV.eliminate_zeros(X)
normVinv = normV - (1 - 1e-10) * B
normVinv = -normVinv.log1p()
normVinv = normVinv.expm1() + B
C = normV.multiply(normVinv)
# print(np.sum(C>0))
# print(np.sum(C>0.9999))
# Compute shrinkage factor
# print(np.sum(normV>0))
shrink = normV - gamma * B
shrink = shrink.maximum(0)
# print(np.sum(shrink>0))
shrink = shrink.multiply(normVinv)
# Apply shrinkage
for j in range(k):
V[j] = V[j].multiply(shrink)
for j in range(k):
R[j] = R[j] + gradu[j] - V[j]
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Heap functions
# d = values in heap (indexed by graph vertex)
# h = heap (contains indices of graph elements in heap)
# p = pointers from graph back to heap (are updated with heap operations)
# s = number of elements in heap
# Sift up
# i = heap index of element to be sifted up
def SiftUp(d, h, s, p, i):
pi = int(i / 2) # Parent index in heap
while pi != 0:
if d[h[pi]] > d[h[i]]: # If parent larger, then swap
# Swap in heap
tmp = h[pi]
h[pi] = h[i]
h[i] = tmp
# Update pointers to heap
p[h[i]] = i
p[h[pi]] = pi
# Update parent/child indices
i = pi
pi = int(i / 2)
else:
pi = 0
# Sift down
# i = heap index of element to be sifted down
def SiftDown(d, h, s, p, i):
ci = 2 * i # child index in heap
while ci <= s:
if d[h[ci + 1]] < d[h[ci]] and ci + 1 <= s: # Choose smallest child
ci = ci + 1
if d[h[ci]] < d[h[i]]: # If child smaller, then swap
# Swap in heap
tmp = h[ci]
h[ci] = h[i]
h[i] = tmp
# Update pointers to heap
p[h[i]] = i
p[h[ci]] = ci
# Update parent/child indices
i = ci
ci = 2 * i
else:
ci = s + 1
# Pop smallest off of heap
# Returns index of smallest and size of new heap
def PopHeap(d, h, s, p):
# Index of smallest in heap
i = h[1]
# Put last element on top of heap
h[1] = h[s]
# Update pointer
p[h[1]] = 1
# Sift down the heap
SiftDown(d, h, s - 1, p, 1)
return i, s - 1
# Push element onto heap
# i = Graph index to add to heap
def PushHeap(d, h, s, p, i):
h[s + 1] = i # add to heap at end
p[i] = s + 1 # Update pointer to heap
SiftUp(d, h, s + 1, p, s + 1)
return s + 1
def stencil_solver(ui, u, w=None):
if w is None:
w = np.ones((len(u),))
m = len(u)
# Sort neighbors
I = np.argsort(u)
u = u[I]
w = w[I]
f = np.zeros((m + 1,))
for i in range(m):
f[i] = np.sum(np.maximum(u[i] - u, 0) ** 2)
f[m] = np.maximum(1, f[m - 1])
k = np.argmin(f < 1)
b = np.sum(u[:k])
c = np.sum(u[:k] ** 2)
t = (b + np.sqrt(b * b - k * c + k)) / k
check = np.sum(np.maximum(t - u, 0) ** 2)
if (abs(check - 1) > 1e-5):
print("Error")
return t
# return np.min(u) + 1
# C code version of dijkstra
def cDijkstra(W, I, g, WI=None, WJ=None, K=None):
n = W.shape[0]
k = len(I)
u = np.ones((n,)) * 1e10 # HJ Solver
l = -np.ones((n,), dtype=int) # Index of closest label
if (WI == None) or (WJ == None) or (K == None):
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.float64)
cgp.dijkstra(u, l, WI, K, WV, I, g, 1.0)
except:
print("You need to compile the cmodules!")
sys.exit(2)
return u
# Solve a general HJ equation with fast marching
def HJsolver(W, I, g, WI=None, WJ=None, K=None, p=1):
n = W.shape[0]
k = len(I)
u = np.ones((n,)) * 1e10 # HJ Solver
l = -np.ones((n,), dtype=int) # Index of closest label
if (WI == None) or (WJ == None) or (K == None):
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
cgp.HJsolver(u, l, WI, K, WV, I, g, 1.0, p, 1.0)
except:
# Initialization
s = 0 # Size of heap
h = -np.ones((n + 1,), dtype=int) # Active points heap (indices of active points)
A = np.zeros((n,), dtype=bool) # Active flag
p = -np.ones((n,), dtype=int) # Pointer back to heap
V = np.zeros((n,), dtype=bool) # Finalized flag
l = -np.ones((n,), dtype=int) # Index of closest label
# Build active points heap and set distance = 0 for initial points
for i in range(k):
s = PushHeap(u, h, s, p, I[i])
u[I[i]] = g[i] # Initialize distance to zero
A[I[i]] = True # Set active flag to true
l[I[i]] = I[i] # Set index of closest label
# Dijkstra's algorithm
while s > 0:
i, s = PopHeap(u, h, s, p) # Pop smallest element off of heap
# Finalize this point
V[i] = True # Mark as finalized
A[i] = False # Set active flag to false
# Update neighbors (the code below is wrong: compare against C sometime)
for j in WI[K[i]:K[i + 1]]:
if j != i and V[j] == False:
nn_ind = WI[K[j]:K[j + 1]]
w_vals = WV[K[j]:K[j + 1]]
u_vals = u[nn_ind]
u_tmp = stencil_solver(u[j], u_vals, w=w_vals)
if A[j]: # If j is already active
if u_tmp < u[j]: # Need to update heap
u[j] = u_tmp
SiftUp(u, h, s, p, p[j])
l[j] = l[i]
else: # If j is not active
# Add to heap and initialize distance, active flag, and label index
s = PushHeap(u, h, s, p, j)
u[j] = u_tmp
A[j] = True
l[j] = l[i]
return u
# eikonal classifier
def eikonalSSL(W, I, g, p=2, beta=None):
k = len(I) # Number of labels
n = W.shape[0] # Number of datapoints
d = np.zeros((n,)) # Distance function
l = -np.ones((n,), dtype=int) # Index of closest label
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
c_code = False
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
d = np.ascontiguousarray(d, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
c_code = True
except:
c_code = False
labels = np.unique(g)
numl = len(labels)
u = np.zeros((numl, n))
for i in range(numl):
ind = I[g == labels[i]]
lab = np.zeros((len(ind),))
if c_code:
ind = np.ascontiguousarray(ind, dtype=np.int32)
lab = np.ascontiguousarray(lab, dtype=np.int32)
cgp.HJsolver(d, l, WI, K, WV, ind, lab, 1.0, p, 0.0)
u[i, :] = -d
else:
u[i, :] = -HJsolver(W, ind, lab, WI=WI, WV=WV, K=K, p=p)
if beta is not None:
_, s = volume_label_projection(u, beta, dt=-0.5)
u = np.diag(s) @ u
return u
# Nearest neighbor classifier (graph geodesic distance)
def nearestneighbor(W, I, g):
k = len(I) # Number of labels
n = W.shape[0] # Number of datapoints
d = np.ones((n,)) * 1e10 # Distance function
l = -np.ones((n,), dtype=int) # Index of closest label
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version of dijkstra, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
d = np.ascontiguousarray(d, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
init = np.ascontiguousarray(np.zeros_like(I), dtype=np.float64)
cgp.dijkstra(d, l, WI, K, WV, I, init, 1.0)
except: # Use python version, which is slower
# Initialization
s = 0 # Size of heap
h = -np.ones((n + 1,), dtype=int) # Active points heap (indices of active points)
A = np.zeros((n,), dtype=bool) # Active flag
p = -np.ones((n,), dtype=int) # Pointer back to heap
V = np.zeros((n,), dtype=bool) # Finalized flag
# Build active points heap and set distance = 0 for initial points
for i in range(k):
d[I[i]] = 0 # Initialize distance to zero
A[I[i]] = True # Set active flag to true
l[I[i]] = I[i] # Set index of closest label
s = PushHeap(d, h, s, p, I[i])
# Dijkstra's algorithm
while s > 0:
i, s = PopHeap(d, h, s, p) # Pop smallest element off of heap
# Finalize this point
V[i] = True # Mark as finalized
A[i] = False # Set active flag to false
# Update neighbors
# for j in WI[K[i]:K[i+1]]:
for jj in range(K[i], K[i + 1]):
j = WI[jj]
if j != i and V[j] == False:
if A[j]: # If j is already active
tmp_dist = d[i] + WV[jj]
if tmp_dist < d[j]: # Need to update heap
d[j] = tmp_dist
SiftUp(d, h, s, p, p[j])
l[j] = l[i]
else: # If j is not active
# Add to heap and initialize distance, active flag, and label index
d[j] = d[i] + WV[jj]
A[j] = True
l[j] = l[i]
s = PushHeap(d, h, s, p, j)
# Set labels based on nearest neighbor
u = np.zeros((n,))
u[I] = g
u, _ = LabelsToVec(u[l])
return u
# Computes accuracy of clustering
def clustering_accuracy(L, L_true):
unique_classes = np.unique(L_true)
num_classes = len(unique_classes)
C = np.zeros((num_classes, num_classes), dtype=float)
for i in range(num_classes):
for j in range(num_classes):
C[i][j] = np.sum((L == i) & (L_true != j))
row_ind, col_ind = opt.linear_sum_assignment(C)
return 100 * (1 - C[row_ind, col_ind].sum() / len(L))
# Spectral embedding
# Projects the graph to R^k via spectral projection
# Method can be 'unnormalized', 'ShiMalik', or 'NgJordanWeiss'
def spectral_embedding(W, k, method='NgJordanWeiss'):
n = W.shape[0]
if method == 'unnormalized':
L = graph_laplacian(W, norm='none')
vals, vec = sparse.linalg.eigs(L, k=k, which='SM')
vec = vec.real
vals = vals.real
elif method == 'ShiMalik':
D = degree_matrix(W)
L = graph_laplacian(W, norm='none')
vals, vec = sparse.linalg.eigs(L, M=D, k=k, which='SM')
vec = vec.real
vals = vals.real
elif method == 'NgJordanWeiss':
L = graph_laplacian(W, norm='normalized')
vals, vec = sparse.linalg.eigs(L, k=k, which='SM')
vec = vec.real
vals = vals.real
norms = np.sum(vec * vec, axis=1)
T = sparse.spdiags(norms ** (-1 / 2), 0, n, n)
vec = T @ vec # Normalize rows
return vec
def kmeans(X, k):
KM = cluster.KMeans(n_clusters=k).fit(X)
return KM.labels_
# Spectral Clustering
def spectral_cluster(W, k, method='NgJordanWeiss', extra_dim=0):
V = spectral_embedding(W, k + extra_dim, method=method)
kmeans = cluster.KMeans(n_clusters=k).fit(V)
# V = spectral_embedding(W,k,method=method)
# kmeans = cluster.KMeans(n_clusters=k).fit(V)
return kmeans.labels_
# INCRES clustering
# Bresson, Xavier, et al. "An incremental reseeding strategy for clustering." International Conference on Imaging, Vision and Learning based on Optimization and PDEs. Spr<NAME>, 2016.
# W = weight matrix
def incres_cluster(W, k, speed, T, labels):
n = W.shape[0]
# Increment
Dm = np.maximum(int(speed * 1e-4 * n / k), 1)
# Random initial labeling
u = random.randint(0, k, size=n)
# Initialization
F = np.zeros((n, k))
J = np.arange(n).astype(int)
# Random walk transition
D = degree_matrix(W, p=-1)
P = W * D
m = int(1)
for i in range(T):
# Plant
F.fill(0)
for r in range(k):
I = u == r
ind = J[I]
F[ind[random.choice(np.sum(I), m)], r] = 1
# Grow
while np.min(F) == 0:
F = P * F
# Harvest
u = np.argmax(F, axis=1)
# Increment
m = m + Dm
# Compute accuracy
if labels is not None:
acc = clustering_accuracy(u, labels)
print("Iteration " + str(i) + ": Accuracy = %.2f" % acc + "%%, #seeds= %d" % m)
return u
# Check if graph is connected
def isconnected(W):
num_comp, comp = csgraph.connected_components(W)
if num_comp == 1:
return True
else:
return False
# Graph-based clustering
# W = sparse weight matrix describing graph
# method = SSL method
# Options: incres
def graph_clustering(W, k, true_labels=None, method="incres", speed=5, T=100, extra_dim=0):
n = W.shape[0]
# Symmetrize W, if not already symmetric
W = (W + W.transpose()) / 2
# Check if connected
if not isconnected(W):
print('Warning: Graph is not connected!')
# Clustering
if method == "incres":
labels = incres_cluster(W, k, speed, T, true_labels)
elif method == "spectral":
labels = spectral_cluster(W, k, method="unnormalized", extra_dim=extra_dim)
elif method == "spectralshimalik":
labels = spectral_cluster(W, k, method="ShiMalik", extra_dim=extra_dim)
elif method == "spectralngjordanweiss":
labels = spectral_cluster(W, k, method="NgJordanWeiss", extra_dim=extra_dim)
else:
print("Invalid choice of clustering method.")
sys.exit()
return labels
# Graph-based semi-supervised learning
# W = sparse weight matrix describing graph
# I = indices of labeled datapoints
# g = values of labels
# method = SSL method
# Options: laplace, poisson, poisson_nodeg, wnll, properlyweighted, plaplace, randomwalk
def graph_ssl(W, I, g, D=None, Ns=40, mu=1, numT=50, beta=None, method="laplace", p=3, volume_mult=0.5, alpha=2,
zeta=1e7, r=0.1, epsilon=0.05, X=None, plaplace_solver="GradientDescentCcode", norm="none",
true_labels=None, eigvals=None, eigvecs=None, dataset=None, T=0, use_cuda=False, return_vector=False,
poisson_training_balance=True, symmetrize=True, error=None):
one_shot_methods = ["mbo", "poisson", "poissonbalanced", "poissonvolume", "poissonmbo_volume", "poissonmbo",
"poissonl1", "nearestneighbor", "poissonmbobalanced", "volumembo", "poissonvolumembo",
"dynamiclabelpropagation", "sparselabelpropagation", "centeredkernel", "eikonal"]
n = W.shape[0]
method = method.lower()
if beta is None:
beta = np.ones((len(np.unique(g)),))
# Symmetrize D,W, if not already symmetric
if symmetrize:
W = (W + W.transpose()) / 2
if D is not None:
D = sparse_max(D, D.transpose())
if not isconnected(W):
print('Warning: Graph is not connected!')
# One shot methods
if method in one_shot_methods:
if method == "mbo":
u = multiclassMBO(W, I, g, eigvals, eigvecs, dataset, true_labels=true_labels)
elif method == "volumembo":
u = volumeMBO(W, I, g, dataset, beta, T, volume_mult)
elif method == "poissonvolumembo":
u = poisson_volumeMBO(W, I, g, dataset, beta, T, volume_mult)
elif method == "poissonmbo_old":
u = poissonMBO(W, I, g, dataset, np.ones_like(beta), true_labels=true_labels, temp=T, use_cuda=use_cuda,
Ns=Ns, mu=mu, T=numT)
elif method == "poissonmbobalanced":
u = poissonMBO(W, I, g, dataset, beta, true_labels=true_labels, temp=T, use_cuda=use_cuda, Ns=Ns, mu=mu,
T=numT)
elif method == "poissonl1":
u = poissonL1(W, I, g, dataset, true_labels=true_labels)
elif method == "poisson":
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, error=error)
elif method == "poissonbalanced":
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, beta=beta)
elif method == "poissonvolume":
u = PoissonVolume(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, beta=beta)
elif method == "poissonmbo":
u = poissonMBO_volume(W, I, g, dataset, beta, true_labels=true_labels, temp=T, use_cuda=use_cuda, Ns=Ns,
mu=mu)
elif method == "dynamiclabelpropagation":
u = DynamicLabelPropagation(W, I, g, true_labels=true_labels)
elif method == "sparselabelpropagation":
u = SparseLabelPropagation(W, I, g, true_labels=true_labels)
elif method == "centeredkernel":
u = CenteredKernel(W, I, g, true_labels=true_labels)
elif method == "nearestneighbor":
# Use distance matrix if provided, instead of weight matrix
if D is None:
u = nearestneighbor(W, I, g)
else:
u = nearestneighbor(D, I, g)
elif method == "eikonal":
# Use distance matrix if provided, instead of weight matrix
if D is None:
u = eikonalSSL(W, I, g, p=p, beta=beta)
else:
u = eikonalSSL(W, I, g, p=p, beta=beta)
else: # One vs rest methods
k = len(np.unique(g)) # Number of labels
u = np.zeros((k, n))
i = 0
for l in np.unique(g):
h = g == l
# Solve binary classification problem
if method == "laplace":
v = laplace_solve(W, I, h, norm=norm)
elif method == "shift":
v = shift_solve(W, I, h)
elif method == "meanshift":
v = meanshift_solve(W, I, h)
elif method == "wnll":
v = wnll_solve(W, I, h)
elif method == "properlyweighted":
if X is None:
print("Must supply raw data points for properly weighted Laplacian.")
sys.exit()
v = properlyweighted_solve(W, I, h, X, alpha, zeta, r)
elif method == "plaplace":
v = plaplace_solve(W, I, h, p, sol_method=plaplace_solver, norm=norm)
elif method == "randomwalk":
v = randomwalk_solve(W, I, h, epsilon)
else:
print("Invalid choice of SSL method.")
sys.exit()
# Update labels
u[i, :] = v
i = i + 1
if return_vector:
labels = np.transpose(u)
else:
# Select labels
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
# Make sure to set labels at labeled points
labels[I] = g
return labels
confidence = usort[0, :] - usort[1, :]
# Read numerical data from csv file
def csvread(filename):
X = []
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
n = 0
for row in csv_reader:
if not row[0] == 'Date/Time':
X += [float(i) for i in row]
m = len(row)
n += 1
return np.array(X).reshape((n, m))
# Compute average and standard deviation of accuracy over many trials
# Reads data from csv file filename
# Returns accuracy (acc), standard deviation (stddev) and number of labeled points (N)
def accuracy_statistics(filename):
X = csvread(filename)
N = np.unique(X[:, 0])
acc = []
stddev = []
quant = []
for n in N:
Y = X[X[:, 0] == n, 1]
Y = np.sort(Y)
acc += [np.mean(Y)]
quant += [Y[int(3 * len(Y) / 4)]]
stddev += [np.std(Y)]
# print("%.1f (%.1f)"%(np.mean(Y),np.std(Y)), end="&")
num_trials = len(X[:, 0]) / len(N)
return acc, stddev, N, quant, num_trials
# Makes an accuracy table to be included in LaTeX documents
# dataset = name of dataset
# ssl_methods = list of names of methods to compare
def accuracy_table_icml(dataset, ssl_method_list, legend_list, num_of_classes, testerror=False, savefile='tables.tex',
title='', quantile=False, append=False, directory='Results', fontsize='small', small_caps=True,
two_column=True):
# Retrieve number of different label rates m
accfile = directory + "/" + dataset + "_" + ssl_method_list[0] + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
m = len(N)
# Determine best algorithm at each label rate
best = [None] * m
best_score = [0] * m
i = 0
for ssl_method in ssl_method_list:
accfile = directory + "/" + dataset + "_" + ssl_method + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
if quantile:
acc = quant
for j in range(m):
if acc[j] > best_score[j]:
best_score[j] = acc[j]
best[j] = i
i += 1
if append:
f = open(savefile, "r")
lines = f.readlines()
f.close()
f = open(savefile, "w")
f.writelines([item for item in lines[:-1]])
else:
f = open(savefile, "w")
f.write("\\documentclass{article}\n")
f.write("\\usepackage[T1]{fontenc}\n")
f.write("\\usepackage{booktabs}\n")
f.write("\\usepackage[margin=1in]{geometry}\n")
f.write("\\begin{document}\n")
f.write("\n\n\n")
if two_column:
f.write("\\begin{table*}[t!]\n")
else:
f.write("\\begin{table}[t!]\n")
f.write("\\vspace{-3mm}\n")
f.write(
"\\caption{" + title + ": Average (standard deviation) classification accuracy over %d trials.}\n" % num_trials)
f.write("\\vspace{-3mm}\n")
f.write("\\label{tab:" + title + "}\n")
f.write("\\vskip 0.15in\n")
f.write("\\begin{center}\n")
f.write("\\begin{" + fontsize + "}\n")
if small_caps:
f.write("\\begin{sc}\n")
f.write("\\begin{tabular}{l")
for i in range(m):
f.write("l")
f.write("}\n")
f.write("\\toprule\n")
f.write("\\# Labels per class")
for i in range(m):
f.write("&\\textbf{%d}" % int(N[i] / num_of_classes))
f.write("\\\\\n")
f.write("\\midrule\n")
i = 0
for ssl_method in ssl_method_list:
f.write(legend_list[i].ljust(15))
accfile = directory + "/" + dataset + "_" + ssl_method + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
for j in range(m):
if best[j] == i:
f.write("&{\\bf %.1f" % acc[j] + " (%.1f)}" % stddev[j])
# f.write("&${\\bf %.1f"%acc[j]+"\\pm %.1f}$"%stddev[j])
else:
f.write("&%.1f" % acc[j] + " (%.1f) " % stddev[j])
# f.write("&$%.1f"%acc[j]+"\\pm %.1f$ "%stddev[j])
f.write("\\\\\n")
i += 1
f.write("\\bottomrule\n")
f.write("\\end{tabular}\n")
if small_caps:
f.write("\\end{sc}\n")
f.write("\\end{" + fontsize + "}\n")
f.write("\\end{center}\n")
f.write("\\vskip -0.1in\n")
if two_column:
f.write("\\end{table*}")
else:
f.write("\\end{table}")
f.write("\n\n\n")
f.write("\\end{document}\n")
f.close()
def plot_graph(X, W, l=None):
# Other colormaps, coolwarm, winter, Set3, tab20b, rainbow
# plt.ion()
colors = np.array([[1.0, 0, 0], [0, 0.9, 0]])
plt.rcParams['figure.facecolor'] = 'navy'
n = W.shape[0]
I, J, V = sparse.find(W)
for i in range(len(I)):
xval = [X[I[i], 0], X[J[i], 0]]
yval = [X[I[i], 1], X[J[i], 1]]
# plt.plot(xval,yval, color='black', linewidth=0.15, markersize=0)
plt.plot(xval, yval, color=[0.5, 0.5, 0.5], linewidth=0.5, markersize=0)
if l is None:
# plt.scatter(X[:,0],X[:,1], s=30, cmap='Paired')
plt.scatter(X[:, 0], X[:, 1], s=8, zorder=3)
else:
# plt.scatter(X[:,0],X[:,1], s=30, c=l, cmap='Paired')
plt.scatter(X[:, 0], X[:, 1], s=8, c=colors[l, :], zorder=3)
plt.axis("off")
# plot average and standard deviation of accuracy over many trials
# dataset = name of dataset
# ssl_methods = list of names of methods to compare
def accuracy_plot(dataset, ssl_method_list, legend_list, num_of_classes, title=None, errorbars=False, testerror=False,
savefile=None, loglog=False, log_dirs=None, directed_graph=False):
if log_dirs is None:
log_dirs = ["Results/"]
# plt.ion()
plt.figure()
if errorbars:
matplotlib.rcParams.update({'errorbar.capsize': 5})
matplotlib.rcParams.update({'font.size': 16})
styles = ['^b-', 'or-', 'dg-', 'sk-', 'pm-', 'xc-', '*y-']
i = 0
for log in log_dirs:
for ssl_method in ssl_method_list:
accfile = os.path.join(log, dataset + "_" + ssl_method)
if directed_graph:
accfile += "_directed"
accfile += "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
if testerror:
acc = 100 - acc
# z = np.polyfit(np.log(N),np.log(acc),1)
# print(z[0])
if errorbars:
plt.errorbar(N / num_of_classes, acc, fmt=styles[i], yerr=stddev, label=legend_list[i])
else:
if loglog:
plt.loglog(N / num_of_classes, acc, styles[i], label=legend_list[i])
else:
plt.plot(N / num_of_classes, acc, styles[i], label=legend_list[i])
i += 1
plt.xlabel('Number of labels per class')
if testerror:
plt.ylabel('Test error (%)')
plt.legend(loc='upper right')
else:
plt.ylabel('Accuracy (%)')
plt.legend(loc='lower right')
if title is not None:
plt.title(title)
plt.tight_layout()
plt.grid(True)
if savefile is not None:
plt.savefig(savefile)
else:
plt.show()
# Select labels based on a ranking
# Prodces a label permutation with 1 trial with same variations of #labels per class as the label permutation perm provided as input
def SelectLabels(labels, permold, rank):
perm = permold
# Number of classes
L = np.unique(labels)
k = len(L)
n = len(labels)
m = len(permold)
num = np.zeros((m,))
for i in range(m):
num[i] = len(permold[i])
num, unique_perm = np.unique(num, return_index=True)
perm = list()
for i in unique_perm:
p = permold[i]
pl = labels[p]
ind = []
for l in L:
numl = np.sum(pl == l)
K = labels == l
c = np.argsort(-rank[K])
j = np.arange(0, n)[K]
ind = ind + j[c[:numl]].tolist()
ind = np.array(ind)
perm.append(ind)
return perm
# PageRank algorithm
def PageRank(W, alpha):
n = W.shape[0]
u = np.ones((n,))
v = np.ones((n,))
D = degree_matrix(W, p=-1)
P = np.transpose(D * W)
err = 1
while err > 1e-10:
w = alpha * P * u + (1 - alpha) * v
err = np.max(np.absolute(w - u))
u = w
return u
# Print help
def print_help():
print('========================================================')
print('GraphLearning: Python package for graph-based learning. ')
print('========================================================')
print('========================================================')
print('Graph-based Clustering & Semi-Supervised Learning')
print('========================================================')
print(' ')
print('Options:')
print(' -d (--dataset=): MNIST, FashionMNIST, WEBKB, cifar (default=MNIST)')
print(' -m (--metric=): Metric for computing similarities (default=L2)')
print(' Choices: vae, scatter, L2, aet')
print(' -a (--algorithm=): Learning algorithm (default=Laplace)')
print(' -k (--knn=): Number of nearest neighbors (default=10)')
print(' -t (--num_trials=): Number of trial permutations to run (default=all)')
print(
' -l (--label_perm=): Choice of label permutation file (format=dataset<label_perm>_permutations.npz). (default is empty).')
print(' -p (--p=): Value of p for plaplace method (default=3)')
print(' -n (--normalization=): Laplacian normalization (default=none)')
print(' Choices: none, normalized')
print(' -N (--num_classes): Number of clusters if choosing clustering algorithm (default=10)')
print(' -s (--speed=): Speed in INCRES method (1--10) (default=2)')
print(' -i (--num_iter=): Number of iterations for iterative methods (default=1000)')
print(' -x (--extra_dim=): Number of extra dimensions in spectral clustering (default=0)')
print(' -c (--cuda): Use GPU acceleration (when available)')
print(' -T (--temperature): Temperature for volume constrained MBO (default=0)')
print(' -v (--volume_constraint=): Volume constraint for MBO (default=0.5)')
print(' -j (--num_cores=): Number of cores to use in parallel processing (default=1)')
print(' -r (--results): Turns off automatic saving of results to .csv file')
print(' -b (--verbose): Turns on verbose mode (displaying more intermediate steps).')
# Default settings
def default_dataset(): return 'MNIST'
def default_metric(): return 'L2'
def default_algorithm(): return 'laplace'
def default_k(): return 10
def default_t(): return '-1'
def default_label_perm(): return ''
def default_p(): return 3
def default_norm(): return "none"
def default_use_cuda(): return False
def default_T(): return 0
def default_num_cores(): return 1
def default_results(): return True
def default_num_classes(): return 10
def default_speed(): return 2
def default_num_iter(): return 1000
def default_extra_dim(): return 0
def default_volume_constraint(): return 0.5
def default_verbose(): return False
def default_poisson_training_balance(): return True
def default_directed_graph(): return False
# Main subroutine. Is calleable from other scripts as graphlearning.main(...)
def main(dataset=default_dataset(), metric=default_metric(), algorithm=default_algorithm(), k=default_k(),
t=default_t(), label_perm=default_label_perm(), p=default_p(), norm=default_norm(),
use_cuda=default_use_cuda(), T=default_T(), num_cores=default_num_cores(), results=default_results(),
num_classes=default_num_classes(), speed=default_speed(), num_iter=default_num_iter(),
extra_dim=default_extra_dim(), volume_constraint=default_volume_constraint(), verbose=default_verbose(),
poisson_training_balance=default_poisson_training_balance(), directed_graph=default_directed_graph()):
# Load labels
labels = load_labels(dataset)
# Load nearest neighbor data
I, J, D = load_kNN_data(dataset, metric=metric)
# Consturct weight matrix and distance matrix
W, error = nnk_weight_matrix(dataset, metric, mask=J, knn_param=k, symmetrize=not directed_graph)
Wdist = None # dist_matrix(I, J, D, k)
# Load label permutation (including restrictions in t)
perm = load_label_permutation(dataset, label_perm=label_perm, t=t)
# Load eigenvector data if MBO selected
if algorithm == 'mbo':
eigvals, eigvecs = load_mbo_eig(dataset, metric, k)
else:
eigvals = None
eigvecs = None
# Output file
outfile = "Results/" + dataset + label_perm + "_" + metric + "_k%d" % k
if algorithm == 'plaplace':
outfile = outfile + "_p%.1f" % p + algorithm[1:] + "_" + norm
elif algorithm == 'eikonal':
outfile = outfile + "_p%.1f" % p + algorithm
else:
outfile = outfile + "_" + algorithm
if algorithm == 'volumembo' or algorithm == 'poissonvolumembo':
outfile = outfile + "_T%.3f" % T
outfile = outfile + "_V%.3f" % volume_constraint
if algorithm == 'poisson' and poisson_training_balance == False:
outfile = outfile + "_NoBal"
if directed_graph:
outfile += "_directed"
outfile = outfile + "_accuracy.csv"
# Print basic info
print('========================================================')
print('GraphLearning: Python package for graph-based learning. ')
print('========================================================')
print('========================================================')
print('Graph-based Clustering & Semi-Supervised Learning')
print('========================================================')
print(' ')
print('Dataset: ' + dataset)
print('Metric: ' + metric)
print('Number of neighbors: %d' % k)
print('Learning algorithm: ' + algorithm)
print('Laplacian normalization: ' + norm)
if algorithm == 'plaplace' or algorithm == 'eikonal':
print("p-Laplace/eikonal value p=%.2f" % p)
if algorithm in clustering_algorithms:
print('Number of clusters: %d' % num_classes)
if algorithm == 'INCRES':
print('INCRES speed: %.2f' % speed)
print('Number of iterations: %d' % num_iter)
if algorithm[:8] == 'Spectral':
print('Number of extra dimensions: %d' % extra_dim)
else:
print('Number of trial permutations: %d' % len(perm))
print('Permutations file: LabelPermutations/' + dataset + label_perm + '_permutations.npz')
if algorithm == 'volumembo' or algorithm == 'poissonvolumembo':
print("Using temperature=%.3f" % T)
print("Volume constraints = [%.3f,%.3f]" % (volume_constraint, 2 - volume_constraint))
# If output file selected
if results:
print('Output file: ' + outfile)
print(' ')
print('========================================================')
print(' ')
true_labels = None
if verbose:
true_labels = labels
# If clustering algorithm was chosen
if algorithm in clustering_algorithms:
# Clustering
u = graph_clustering(W, num_classes, labels, method=algorithm, T=num_iter, speed=speed, extra_dim=extra_dim)
# Compute accuracy
acc = clustering_accuracy(u, labels)
# Print to terminal
print("Accuracy: %.2f" % acc + "%")
# If semi-supervised algorithms chosen
else:
# If output file selected
if results:
# Check if Results directory exists
if not os.path.exists('Results'):
os.makedirs('Results')
now = datetime.datetime.now()
# Add time stamp to output file
f = open(outfile, "a+")
f.write("Date/Time, " + now.strftime("%Y-%m-%d_%H:%M") + "\n")
f.close()
# Loop over label permutations
print("Number of labels, Accuracy")
def one_trial(label_ind):
# Number of labels
m = len(label_ind)
# Label proportions (used by some algroithms)
beta = label_proportions(labels)
start_time = time.time()
# Graph-based semi-supervised learning
u = graph_ssl(W, label_ind, labels[label_ind], D=Wdist, beta=beta, method=algorithm, epsilon=0.3, p=p,
norm=norm, eigvals=eigvals, eigvecs=eigvecs, dataset=dataset, T=T, use_cuda=use_cuda,
volume_mult=volume_constraint, true_labels=true_labels,
poisson_training_balance=poisson_training_balance, symmetrize=not directed_graph, error=error)
print("--- %s seconds ---" % (time.time() - start_time))
# Compute accuracy
acc = accuracy(u, labels, m)
# Print to terminal
print("%d" % m + ",%.2f" % acc)
# Write to file
if results:
f = open(outfile, "a+")
f.write("%d" % m + ",%.2f\n" % acc)
f.close()
# Number of cores for parallel processing
num_cores = min(multiprocessing.cpu_count(), num_cores)
Parallel(n_jobs=num_cores)(delayed(one_trial)(label_ind) for label_ind in perm)
if __name__ == '__main__':
# Default settings
dataset = default_dataset()
metric = default_metric()
algorithm = default_algorithm()
k = default_k()
t = default_t()
label_perm = default_label_perm()
p = default_p()
norm = default_norm()
use_cuda = default_use_cuda()
T = default_T()
num_cores = default_num_cores()
results = default_results()
num_classes = default_num_classes()
speed = default_speed()
num_iter = default_num_iter()
extra_dim = default_extra_dim()
volume_constraint = default_volume_constraint()
verbose = default_verbose()
poisson_training_balance = default_poisson_training_balance()
directed_graph = default_directed_graph()
# Read command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:m:k:a:p:n:v:N:s:i:x:t:cl:T:j:rboz",
["dataset=", "metric=", "knn=", "algorithm=", "p=", "normalization=",
"volume_constraint=", "num_classes=", "speed=", "num_iter=", "extra_dim=",
"num_trials=", "cuda", "label_perm=", "temperature=", "num_cores=", "results",
"verbose", "poisson_training_balance", "directed"])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-d", "--dataset"):
dataset = arg
elif opt in ("-m", "--metric"):
metric = arg
elif opt in ("-k", "--knn"):
k = int(arg)
elif opt in ("-a", "--algorithm"):
algorithm = arg.lower()
elif opt in ("-p", "--p"):
p = float(arg)
elif opt in ("-n", "--normalization"):
norm = arg
elif opt in ("-v", "--volume_constraint"):
volume_constraint = float(arg)
elif opt in ("-N", "--num_classes"):
num_classes = int(arg)
elif opt in ("-s", "--speed"):
speed = float(arg)
elif opt in ("-i", "--num_iter"):
num_iter = int(arg)
elif opt in ("-x", "--extra_dim"):
extra_dim = int(arg)
elif opt in ("-t", "--num_trials"):
t = arg
elif opt in ("-c", "--cuda"):
use_cuda = True
elif opt in ("-l", "--label_perm"):
label_perm = arg
elif opt in ("-T", "--temperature"):
T = float(arg)
elif opt in ("-j", "--num_cores"):
num_cores = int(arg)
elif opt in ("-r", "--results"):
results = False
elif opt in ("-b", "--verbose"):
verbose = True
elif opt in ("-o", "--poisson_training_balance"):
poisson_training_balance = False
elif opt in ("-z", "--directed"):
directed_graph = True
# Call main subroutine
main(dataset=dataset, metric=metric, algorithm=algorithm, k=k, t=t, label_perm=label_perm, p=p, norm=norm,
use_cuda=use_cuda, T=T, num_cores=num_cores, results=results, num_classes=num_classes, speed=speed,
num_iter=num_iter, extra_dim=extra_dim, volume_constraint=volume_constraint, verbose=verbose,
poisson_training_balance=poisson_training_balance, directed_graph=directed_graph)
|
[
"numpy.sum",
"scipy.sparse.linalg.cg",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"matplotlib.rcParams.update",
"torch.FloatTensor",
"numpy.max",
"datetime.datetime.now",
"numpy.minimum",
"matplotlib.pyplot.show",
"numpy.ones_like",
"scipy.sparse.spdiags",
"numpy.min",
"matplotlib.pyplot.grid",
"numpy.vstack",
"sys.exit",
"os.makedirs",
"numpy.array",
"csv.reader",
"numpy.linalg.norm",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"scipy.spatial.Delaunay",
"numpy.full",
"multiprocessing.cpu_count",
"numpy.transpose",
"kymatio.Scattering2D",
"numpy.append",
"scipy.sparse.identity",
"matplotlib.pyplot.errorbar",
"scipy.optimize.linear_sum_assignment",
"torch.from_numpy",
"cmodules.cgraphpy.volume_mbo",
"matplotlib.pyplot.plot",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.loglog",
"numpy.maximum",
"getopt.getopt",
"numpy.argmax",
"numpy.clip",
"numpy.mean",
"numpy.tile",
"os.path.join",
"numpy.copy",
"sklearn.cluster.KMeans",
"os.path.exists",
"numpy.swapaxes",
"cmodules.cgraphpy.HJsolver",
"scipy.sparse.find",
"os.path.realpath",
"matplotlib.pyplot.legend",
"numpy.dot",
"scipy.sparse.linalg.eigs",
"numpy.zeros",
"time.time",
"scipy.sparse.linalg.gmres",
"joblib.Parallel",
"numpy.sign",
"annoy.AnnoyIndex",
"joblib.delayed",
"matplotlib.pyplot.savefig",
"numpy.absolute",
"numpy.load",
"numpy.random.randint",
"scipy.sparse.csgraph.connected_components",
"numpy.random.normal",
"scipy.spatial.cKDTree",
"numpy.round",
"numpy.zeros_like",
"numpy.std",
"scipy.sparse.coo_matrix",
"cmodules.cgraphpy.dijkstra",
"numpy.random.choice",
"cmodules.cgraphpy.lp_iterate",
"numpy.sort",
"torch.sparse.addmm",
"torch.Size",
"numpy.cos",
"mayavi.mlab.view",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"torch.LongTensor",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.where",
"numpy.random.rand",
"numpy.ascontiguousarray",
"mayavi.mlab.triangular_mesh",
"numpy.sqrt"
] |
[((2665, 2718), 'os.path.join', 'os.path.join', (['location', '"""LabelPermutations"""', 'dataFile'], {}), "(location, 'LabelPermutations', dataFile)\n", (2677, 2718), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3545, 3585), 'os.path.join', 'os.path.join', (['location', '"""Data"""', 'dataFile'], {}), "(location, 'Data', dataFile)\n", (3557, 3585), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3939, 3979), 'os.path.join', 'os.path.join', (['location', '"""Data"""', 'dataFile'], {}), "(location, 'Data', dataFile)\n", (3951, 3979), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4365, 4408), 'os.path.join', 'os.path.join', (['location', '"""kNNData"""', 'dataFile'], {}), "(location, 'kNNData', dataFile)\n", (4377, 4408), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4762, 4779), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4771, 4779), True, 'import numpy as np\n'), ((4844, 4863), 'numpy.sum', 'np.sum', (['(labels >= 0)'], {}), '(labels >= 0)\n', (4850, 4863), True, 'import numpy as np\n'), ((4875, 4889), 'numpy.zeros', 'np.zeros', (['(k,)'], {}), '((k,))\n', (4883, 4889), True, 'import numpy as np\n'), ((5500, 5514), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (5511, 5514), True, 'import scipy.sparse as sparse\n'), ((6088, 6123), 'kymatio.Scattering2D', 'Scattering2D', ([], {'J': 'depth', 'shape': '(n, m)'}), '(J=depth, shape=(n, m))\n', (6100, 6123), False, 'from kymatio import Scattering2D\n'), ((6433, 6450), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (6442, 6450), True, 'import numpy as np\n'), ((6535, 6558), 'numpy.zeros', 'np.zeros', (['(num_labels,)'], {}), '((num_labels,))\n', (6543, 6558), True, 'import numpy as np\n'), ((7327, 7344), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (7336, 7344), True, 'import numpy as np\n'), ((8216, 8226), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (8222, 8226), True, 'import numpy as np\n'), ((8908, 8939), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(d ** p)', '(0)', 'n', 'n'], {}), '(d ** p, 0, n, n)\n', (8922, 8939), True, 'import scipy.sparse as sparse\n'), ((9079, 9097), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X'], {}), '(X)\n', (9094, 9097), True, 'import scipy.spatial as spatial\n'), ((9257, 9311), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(gamma + (1 - gamma) / nn_dist)', '(0)', 'n', 'n'], {}), '(gamma + (1 - gamma) / nn_dist, 0, n, n)\n', (9271, 9311), True, 'import scipy.sparse as sparse\n'), ((10358, 10372), 'numpy.absolute', 'np.absolute', (['w'], {}), '(w)\n', (10369, 10372), True, 'import numpy as np\n'), ((11222, 11240), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X'], {}), '(X)\n', (11237, 11240), True, 'import scipy.spatial as spatial\n'), ((11392, 11413), 'numpy.sum', 'np.sum', (['(V * V)'], {'axis': '(1)'}), '(V * V, axis=1)\n', (11398, 11413), True, 'import numpy as np\n'), ((11774, 11820), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (M1, M2))'], {'shape': '(n, n)'}), '((D, (M1, M2)), shape=(n, n))\n', (11791, 11820), True, 'import scipy.sparse as sparse\n'), ((11954, 11972), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X'], {}), '(X)\n', (11969, 11972), True, 'import scipy.spatial as spatial\n'), ((12572, 12599), 'annoy.AnnoyIndex', 'AnnoyIndex', (['dim', 'similarity'], {}), '(dim, similarity)\n', (12582, 12599), False, 'from annoy import AnnoyIndex\n'), ((13025, 13036), 'numpy.array', 'np.array', (['I'], {}), '(I)\n', (13033, 13036), True, 'import numpy as np\n'), ((13045, 13056), 'numpy.array', 'np.array', (['J'], {}), '(J)\n', (13053, 13056), True, 'import numpy as np\n'), ((13065, 13076), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (13073, 13076), True, 'import numpy as np\n'), ((13338, 13372), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / sigma)', '(0)', 'n', 'n'], {}), '(1 / sigma, 0, n, n)\n', (13352, 13372), True, 'import scipy.sparse as sparse\n'), ((13530, 13550), 'numpy.vstack', 'np.vstack', (['(I, J, D)'], {}), '((I, J, D))\n', (13539, 13550), True, 'import numpy as np\n'), ((13560, 13580), 'numpy.vstack', 'np.vstack', (['(J, I, D)'], {}), '((J, I, D))\n', (13569, 13580), True, 'import numpy as np\n'), ((13589, 13621), 'numpy.concatenate', 'np.concatenate', (['(M1, M2)'], {'axis': '(1)'}), '((M1, M2), axis=1)\n', (13603, 13621), True, 'import numpy as np\n'), ((13630, 13650), 'numpy.unique', 'np.unique', (['M'], {'axis': '(1)'}), '(M, axis=1)\n', (13639, 13650), True, 'import numpy as np\n'), ((14397, 14422), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (14407, 14422), True, 'import numpy as np\n'), ((15118, 15143), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (15128, 15143), True, 'import numpy as np\n'), ((15685, 15699), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (15696, 15699), True, 'import scipy.sparse as sparse\n'), ((15754, 15775), 'numpy.sum', 'np.sum', (['(Y * Y)'], {'axis': '(1)'}), '(Y * Y, axis=1)\n', (15760, 15775), True, 'import numpy as np\n'), ((16005, 16019), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (16016, 16019), True, 'import scipy.sparse as sparse\n'), ((16028, 16042), 'numpy.exp', 'np.exp', (['(-2 * V)'], {}), '(-2 * V)\n', (16034, 16042), True, 'import numpy as np\n'), ((16388, 16435), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['W'], {'directed': '(False)'}), '(W, directed=False)\n', (16416, 16435), True, 'import scipy.sparse.csgraph as csgraph\n'), ((16452, 16470), 'numpy.zeros', 'np.zeros', (['(ncomp,)'], {}), '((ncomp,))\n', (16460, 16470), True, 'import numpy as np\n'), ((16554, 16574), 'numpy.argmax', 'np.argmax', (['num_verts'], {}), '(num_verts)\n', (16563, 16574), True, 'import numpy as np\n'), ((17041, 17066), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (17051, 17066), True, 'import numpy as np\n'), ((17798, 17833), 'numpy.zeros', 'np.zeros', (['(num_of_nodes, knn_param)'], {}), '((num_of_nodes, knn_param))\n', (17806, 17833), True, 'import numpy as np\n'), ((17854, 17889), 'numpy.zeros', 'np.zeros', (['(num_of_nodes, knn_param)'], {}), '((num_of_nodes, knn_param))\n', (17862, 17889), True, 'import numpy as np\n'), ((17909, 17943), 'numpy.ones', 'np.ones', (['(num_of_nodes, knn_param)'], {}), '((num_of_nodes, knn_param))\n', (17916, 17943), True, 'import numpy as np\n'), ((18778, 18814), 'numpy.tile', 'np.tile', (['row_indices', '[1, knn_param]'], {}), '(row_indices, [1, knn_param])\n', (18785, 18814), True, 'import numpy as np\n'), ((19938, 19963), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (19948, 19963), True, 'import numpy as np\n'), ((20203, 20219), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (20215, 20219), True, 'import numpy as np\n'), ((20294, 20310), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (20306, 20310), True, 'import numpy as np\n'), ((20376, 20407), 'numpy.swapaxes', 'np.swapaxes', (['NN[:, 1:, :]', '(0)', '(1)'], {}), '(NN[:, 1:, :], 0, 1)\n', (20387, 20407), True, 'import numpy as np\n'), ((20552, 20573), 'numpy.sum', 'np.sum', (['(V * W)'], {'axis': '(2)'}), '(V * W, axis=2)\n', (20558, 20573), True, 'import numpy as np\n'), ((20602, 20621), 'numpy.max', 'np.max', (['(-xd)'], {'axis': '(0)'}), '(-xd, axis=0)\n', (20608, 20621), True, 'import numpy as np\n'), ((21092, 21117), 'numpy.minimum', 'np.minimum', (['I.shape[1]', 'k'], {}), '(I.shape[1], k)\n', (21102, 21117), True, 'import numpy as np\n'), ((21357, 21373), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (21369, 21373), True, 'import numpy as np\n'), ((21448, 21464), 'numpy.transpose', 'np.transpose', (['nu'], {}), '(nu)\n', (21460, 21464), True, 'import numpy as np\n'), ((21509, 21540), 'numpy.swapaxes', 'np.swapaxes', (['NN[:, 1:, :]', '(0)', '(1)'], {}), '(NN[:, 1:, :], 0, 1)\n', (21520, 21540), True, 'import numpy as np\n'), ((21613, 21635), 'numpy.sum', 'np.sum', (['(V * nu)'], {'axis': '(2)'}), '(V * nu, axis=2)\n', (21619, 21635), True, 'import numpy as np\n'), ((21672, 21693), 'numpy.sum', 'np.sum', (['(V * V)'], {'axis': '(2)'}), '(V * V, axis=2)\n', (21678, 21693), True, 'import numpy as np\n'), ((21702, 21751), 'numpy.max', 'np.max', (['((xd * xd - sqdist) / (2 * R) - xd)'], {'axis': '(0)'}), '((xd * xd - sqdist) / (2 * R) - xd, axis=0)\n', (21708, 21751), True, 'import numpy as np\n'), ((22469, 22499), 'scipy.sparse.linalg.gmres', 'sparse.linalg.gmres', (['A', 'b'], {'M': 'M'}), '(A, b, M=M)\n', (22488, 22499), True, 'import scipy.sparse as sparse\n'), ((23332, 23363), 'numpy.full', 'np.full', (['(n,)', '(True)'], {'dtype': 'bool'}), '((n,), True, dtype=bool)\n', (23339, 23363), True, 'import numpy as np\n'), ((23490, 23528), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['A'], {'k': 'k', 'which': '"""SM"""'}), "(A, k=k, which='SM')\n", (23508, 23528), True, 'import scipy.sparse as sparse\n'), ((23611, 23627), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (23619, 23627), True, 'import numpy as np\n'), ((23905, 23936), 'numpy.full', 'np.full', (['(n,)', '(True)'], {'dtype': 'bool'}), '((n,), True, dtype=bool)\n', (23912, 23936), True, 'import numpy as np\n'), ((24549, 24562), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (24556, 24562), True, 'import numpy as np\n'), ((24667, 24684), 'numpy.random.rand', 'random.rand', (['n', 'd'], {}), '(n, d)\n', (24678, 24684), True, 'import numpy.random as random\n'), ((24782, 24798), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (24790, 24798), True, 'import numpy as np\n'), ((25124, 25140), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (25132, 25140), True, 'import numpy as np\n'), ((25349, 25365), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (25357, 25365), True, 'import numpy as np\n'), ((25972, 25998), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['X[:, :2]'], {}), '(X[:, :2])\n', (25988, 25998), True, 'import scipy.spatial as spatial\n'), ((26231, 26281), 'numpy.array', 'np.array', (['[[x1, y1], [x2, y2], [x1, y2], [x2, y1]]'], {}), '([[x1, y1], [x2, y2], [x1, y2], [x2, y1]])\n', (26239, 26281), True, 'import numpy as np\n'), ((26290, 26319), 'numpy.append', 'np.append', (['X', 'corners'], {'axis': '(0)'}), '(X, corners, axis=0)\n', (26299, 26319), True, 'import numpy as np\n'), ((27211, 27228), 'numpy.vstack', 'np.vstack', (['(X, Y)'], {}), '((X, Y))\n', (27220, 27228), True, 'import numpy as np\n'), ((27288, 27305), 'numpy.vstack', 'np.vstack', (['(Z, Y)'], {}), '((Z, Y))\n', (27297, 27305), True, 'import numpy as np\n'), ((27367, 27384), 'numpy.vstack', 'np.vstack', (['(Z, Y)'], {}), '((Z, Y))\n', (27376, 27384), True, 'import numpy as np\n'), ((27444, 27461), 'numpy.vstack', 'np.vstack', (['(Z, Y)'], {}), '((Z, Y))\n', (27453, 27461), True, 'import numpy as np\n'), ((27500, 27519), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['Z'], {}), '(Z)\n', (27516, 27519), True, 'import scipy.spatial as spatial\n'), ((27690, 27736), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['X[:, 0]', 'X[:, 1]', 'u', 'Tri'], {}), '(X[:, 0], X[:, 1], u, Tri)\n', (27710, 27736), True, 'import mayavi.mlab as mlab\n'), ((27741, 27777), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(-45)', 'elevation': '(60)'}), '(azimuth=-45, elevation=60)\n', (27750, 27777), True, 'import mayavi.mlab as mlab\n'), ((28751, 28764), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (28758, 28764), True, 'import numpy as np\n'), ((29491, 29515), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['X[I, :]'], {}), '(X[I, :])\n', (29506, 29515), True, 'import scipy.spatial as spatial\n'), ((30657, 30671), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (30668, 30671), True, 'import scipy.sparse as sparse\n'), ((34775, 34789), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (34783, 34789), True, 'import numpy as np\n'), ((35474, 35502), 'numpy.argmax', 'np.argmax', (['(Max >= Xs)'], {'axis': '(0)'}), '(Max >= Xs, axis=0)\n', (35483, 35502), True, 'import numpy as np\n'), ((35681, 35693), 'numpy.unique', 'np.unique', (['L'], {}), '(L)\n', (35690, 35693), True, 'import numpy as np\n'), ((35798, 35814), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (35806, 35814), True, 'import numpy as np\n'), ((36082, 36102), 'numpy.argmax', 'np.argmax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (36091, 36102), True, 'import numpy as np\n'), ((36111, 36127), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (36119, 36127), True, 'import numpy as np\n'), ((36311, 36327), 'numpy.exp', 'np.exp', (['(beta * X)'], {}), '(beta * X)\n', (36317, 36327), True, 'import numpy as np\n'), ((36339, 36356), 'numpy.sum', 'np.sum', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (36345, 36356), True, 'import numpy as np\n'), ((36472, 36489), 'numpy.random.rand', 'random.rand', (['n', '(1)'], {}), '(n, 1)\n', (36483, 36489), True, 'import numpy.random as random\n'), ((36539, 36555), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (36547, 36555), True, 'import numpy as np\n'), ((37014, 37034), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (37023, 37034), True, 'import numpy as np\n'), ((37136, 37150), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (37147, 37150), True, 'import scipy.sparse as sparse\n'), ((37260, 37299), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.int32'}), '(u, dtype=np.int32)\n', (37280, 37299), True, 'import numpy as np\n'), ((37309, 37349), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (37329, 37349), True, 'import numpy as np\n'), ((37359, 37399), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WJ'], {'dtype': 'np.int32'}), '(WJ, dtype=np.int32)\n', (37379, 37399), True, 'import numpy as np\n'), ((37409, 37451), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float32'}), '(WV, dtype=np.float32)\n', (37429, 37451), True, 'import numpy as np\n'), ((37460, 37499), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (37480, 37499), True, 'import numpy as np\n'), ((37508, 37547), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.int32'}), '(g, dtype=np.int32)\n', (37528, 37547), True, 'import numpy as np\n'), ((37566, 37615), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ClassCounts'], {'dtype': 'np.int32'}), '(ClassCounts, dtype=np.int32)\n', (37586, 37615), True, 'import numpy as np\n'), ((37621, 37693), 'cmodules.cgraphpy.volume_mbo', 'cgp.volume_mbo', (['u', 'WI', 'WJ', 'WV', 'I', 'g', 'ClassCounts', 'k', '(0.0)', 'T', 'volume_mult'], {}), '(u, WI, WJ, WV, I, g, ClassCounts, k, 0.0, T, volume_mult)\n', (37635, 37693), True, 'import cmodules.cgraphpy as cgp\n'), ((38162, 38176), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (38170, 38176), True, 'import numpy as np\n'), ((38194, 38208), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (38205, 38208), True, 'import scipy.sparse as sparse\n'), ((38318, 38357), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.int32'}), '(u, dtype=np.int32)\n', (38338, 38357), True, 'import numpy as np\n'), ((38367, 38407), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (38387, 38407), True, 'import numpy as np\n'), ((38417, 38457), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WJ'], {'dtype': 'np.int32'}), '(WJ, dtype=np.int32)\n', (38437, 38457), True, 'import numpy as np\n'), ((38467, 38509), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float32'}), '(WV, dtype=np.float32)\n', (38487, 38509), True, 'import numpy as np\n'), ((38518, 38557), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (38538, 38557), True, 'import numpy as np\n'), ((38566, 38605), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.int32'}), '(g, dtype=np.int32)\n', (38586, 38605), True, 'import numpy as np\n'), ((38624, 38673), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ClassCounts'], {'dtype': 'np.int32'}), '(ClassCounts, dtype=np.int32)\n', (38644, 38673), True, 'import numpy as np\n'), ((38679, 38751), 'cmodules.cgraphpy.volume_mbo', 'cgp.volume_mbo', (['u', 'WI', 'WJ', 'WV', 'I', 'g', 'ClassCounts', 'k', '(1.0)', 'T', 'volume_mult'], {}), '(u, WI, WJ, WV, I, g, ClassCounts, k, 1.0, T, volume_mult)\n', (38693, 38751), True, 'import cmodules.cgraphpy as cgp\n'), ((39565, 39601), 'numpy.diag', 'np.diag', (['(1 / (1 + dt / Ns * eigvals))'], {}), '(1 / (1 + dt / Ns * eigvals))\n', (39572, 39601), True, 'import numpy as np\n'), ((39627, 39642), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (39639, 39642), True, 'import numpy as np\n'), ((39682, 39699), 'numpy.random.rand', 'random.rand', (['k', 'n'], {}), '(k, n)\n', (39693, 39699), True, 'import numpy.random as random\n'), ((39768, 39779), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (39776, 39779), True, 'import numpy as np\n'), ((40655, 40667), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (40664, 40667), True, 'import numpy as np\n'), ((40713, 40727), 'numpy.zeros', 'np.zeros', (['(k,)'], {}), '((k,))\n', (40721, 40727), True, 'import numpy as np\n'), ((40947, 40958), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (40955, 40958), True, 'import numpy as np\n'), ((41129, 41145), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (41141, 41145), True, 'import numpy as np\n'), ((41180, 41195), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (41192, 41195), True, 'import numpy as np\n'), ((42915, 42940), 'numpy.vstack', 'np.vstack', (['(A.row, A.col)'], {}), '((A.row, A.col))\n', (42924, 42940), True, 'import numpy as np\n'), ((42950, 42975), 'torch.LongTensor', 'torch.LongTensor', (['indices'], {}), '(indices)\n', (42966, 42975), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((42984, 43009), 'torch.FloatTensor', 'torch.FloatTensor', (['values'], {}), '(values)\n', (43001, 43009), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((43239, 43253), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (43250, 43253), True, 'import scipy.sparse as sparse\n'), ((43488, 43506), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (43503, 43506), True, 'import scipy.sparse as sparse\n'), ((43621, 43637), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (43629, 43637), True, 'import numpy as np\n'), ((43678, 43689), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (43686, 43689), True, 'import numpy as np\n'), ((45086, 45097), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (45094, 45097), True, 'import numpy as np\n'), ((45234, 45245), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (45242, 45245), True, 'import numpy as np\n'), ((45338, 45358), 'numpy.transpose', 'np.transpose', (['(Kg * J)'], {}), '(Kg * J)\n', (45350, 45358), True, 'import numpy as np\n'), ((45367, 45378), 'numpy.copy', 'np.copy', (['Kg'], {}), '(Kg)\n', (45374, 45378), True, 'import numpy as np\n'), ((46529, 46540), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (46537, 46540), True, 'import numpy as np\n'), ((46633, 46653), 'numpy.transpose', 'np.transpose', (['(Kg * J)'], {}), '(Kg * J)\n', (46645, 46653), True, 'import numpy as np\n'), ((46748, 46759), 'numpy.copy', 'np.copy', (['Kg'], {}), '(Kg)\n', (46755, 46759), True, 'import numpy as np\n'), ((46768, 46783), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (46775, 46783), True, 'import numpy as np\n'), ((46793, 46808), 'numpy.ones', 'np.ones', (['(1, n)'], {}), '((1, n))\n', (46800, 46808), True, 'import numpy as np\n'), ((46818, 46838), 'numpy.random.rand', 'np.random.rand', (['n', '(1)'], {}), '(n, 1)\n', (46832, 46838), True, 'import numpy as np\n'), ((47692, 47707), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (47704, 47707), True, 'import numpy as np\n'), ((47765, 47785), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (47774, 47785), True, 'import numpy as np\n'), ((50502, 50513), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (50510, 50513), True, 'import numpy as np\n'), ((50684, 50700), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (50696, 50700), True, 'import numpy as np\n'), ((50735, 50750), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (50747, 50750), True, 'import numpy as np\n'), ((52590, 52602), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (52599, 52602), True, 'import numpy as np\n'), ((52759, 52770), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (52767, 52770), True, 'import numpy as np\n'), ((52941, 52957), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (52953, 52957), True, 'import numpy as np\n'), ((53235, 53253), 'numpy.max', 'np.max', (['Kg'], {'axis': '(0)'}), '(Kg, axis=0)\n', (53241, 53253), True, 'import numpy as np\n'), ((53354, 53370), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (53362, 53370), True, 'import numpy as np\n'), ((55114, 55126), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (55123, 55126), True, 'import numpy as np\n'), ((55412, 55423), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (55420, 55423), True, 'import numpy as np\n'), ((55594, 55610), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (55606, 55610), True, 'import numpy as np\n'), ((55857, 55886), 'numpy.random.random', 'np.random.random', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (55873, 55886), True, 'import numpy as np\n'), ((55918, 55934), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (55926, 55934), True, 'import numpy as np\n'), ((58153, 58167), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (58164, 58167), True, 'import scipy.sparse as sparse\n'), ((58467, 58483), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (58475, 58483), True, 'import numpy as np\n'), ((58524, 58535), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (58532, 58535), True, 'import numpy as np\n'), ((58705, 58721), 'numpy.transpose', 'np.transpose', (['Kg'], {}), '(Kg)\n', (58717, 58721), True, 'import numpy as np\n'), ((58834, 58850), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (58842, 58850), True, 'import numpy as np\n'), ((63051, 63064), 'numpy.argsort', 'np.argsort', (['u'], {}), '(u)\n', (63061, 63064), True, 'import numpy as np\n'), ((63100, 63118), 'numpy.zeros', 'np.zeros', (['(m + 1,)'], {}), '((m + 1,))\n', (63108, 63118), True, 'import numpy as np\n'), ((63206, 63229), 'numpy.maximum', 'np.maximum', (['(1)', 'f[m - 1]'], {}), '(1, f[m - 1])\n', (63216, 63229), True, 'import numpy as np\n'), ((63238, 63254), 'numpy.argmin', 'np.argmin', (['(f < 1)'], {}), '(f < 1)\n', (63247, 63254), True, 'import numpy as np\n'), ((63264, 63277), 'numpy.sum', 'np.sum', (['u[:k]'], {}), '(u[:k])\n', (63270, 63277), True, 'import numpy as np\n'), ((63286, 63304), 'numpy.sum', 'np.sum', (['(u[:k] ** 2)'], {}), '(u[:k] ** 2)\n', (63292, 63304), True, 'import numpy as np\n'), ((67763, 67777), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (67771, 67777), True, 'import numpy as np\n'), ((67943, 67957), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (67954, 67957), True, 'import scipy.sparse as sparse\n'), ((68601, 68613), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (68610, 68613), True, 'import numpy as np\n'), ((68646, 68665), 'numpy.zeros', 'np.zeros', (['(numl, n)'], {}), '((numl, n))\n', (68654, 68665), True, 'import numpy as np\n'), ((69556, 69570), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (69567, 69570), True, 'import scipy.sparse as sparse\n'), ((72063, 72077), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (72071, 72077), True, 'import numpy as np\n'), ((72227, 72244), 'numpy.unique', 'np.unique', (['L_true'], {}), '(L_true)\n', (72236, 72244), True, 'import numpy as np\n'), ((72292, 72341), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)'], {'dtype': 'float'}), '((num_classes, num_classes), dtype=float)\n', (72300, 72341), True, 'import numpy as np\n'), ((72490, 72518), 'scipy.optimize.linear_sum_assignment', 'opt.linear_sum_assignment', (['C'], {}), '(C)\n', (72515, 72518), True, 'import scipy.optimize as opt\n'), ((74341, 74369), 'numpy.random.randint', 'random.randint', (['(0)', 'k'], {'size': 'n'}), '(0, k, size=n)\n', (74355, 74369), True, 'import numpy.random as random\n'), ((74400, 74416), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (74408, 74416), True, 'import numpy as np\n'), ((75173, 75204), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['W'], {}), '(W)\n', (75201, 75204), True, 'import scipy.sparse.csgraph as csgraph\n'), ((82388, 82406), 'numpy.unique', 'np.unique', (['X[:, 0]'], {}), '(X[:, 0])\n', (82397, 82406), True, 'import numpy as np\n'), ((86343, 86379), 'numpy.array', 'np.array', (['[[1.0, 0, 0], [0, 0.9, 0]]'], {}), '([[1.0, 0, 0], [0, 0.9, 0]])\n', (86351, 86379), True, 'import numpy as np\n'), ((86460, 86474), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (86471, 86474), True, 'import scipy.sparse as sparse\n'), ((87017, 87032), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (87025, 87032), True, 'import matplotlib.pyplot as plt\n'), ((87463, 87475), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (87473, 87475), True, 'import matplotlib.pyplot as plt\n'), ((87558, 87603), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (87584, 87603), False, 'import matplotlib\n'), ((88541, 88581), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of labels per class"""'], {}), "('Number of labels per class')\n", (88551, 88581), True, 'import matplotlib.pyplot as plt\n'), ((88813, 88831), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (88829, 88831), True, 'import matplotlib.pyplot as plt\n'), ((88836, 88850), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (88844, 88850), True, 'import matplotlib.pyplot as plt\n'), ((89202, 89219), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (89211, 89219), True, 'import numpy as np\n'), ((89287, 89301), 'numpy.zeros', 'np.zeros', (['(m,)'], {}), '((m,))\n', (89295, 89301), True, 'import numpy as np\n'), ((89382, 89415), 'numpy.unique', 'np.unique', (['num'], {'return_index': '(True)'}), '(num, return_index=True)\n', (89391, 89415), True, 'import numpy as np\n'), ((89869, 89882), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (89876, 89882), True, 'import numpy as np\n'), ((89891, 89904), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (89898, 89904), True, 'import numpy as np\n'), ((89945, 89964), 'numpy.transpose', 'np.transpose', (['(D * W)'], {}), '(D * W)\n', (89957, 89964), True, 'import numpy as np\n'), ((2074, 2117), 'os.path.join', 'os.path.join', (['location', '"""MBOdata"""', 'dataFile'], {}), "(location, 'MBOdata', dataFile)\n", (2086, 2117), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2130, 2171), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (2137, 2171), True, 'import numpy as np\n'), ((2559, 2585), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2575, 2585), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2770, 2811), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (2777, 2811), True, 'import numpy as np\n'), ((3497, 3523), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3513, 3523), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3630, 3671), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (3637, 3671), True, 'import numpy as np\n'), ((3852, 3878), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3868, 3878), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4020, 4061), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (4027, 4061), True, 'import numpy as np\n'), ((4270, 4296), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4286, 4296), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4451, 4492), 'numpy.load', 'np.load', (['dataFile_path'], {'allow_pickle': '(True)'}), '(dataFile_path, allow_pickle=True)\n', (4458, 4492), True, 'import numpy as np\n'), ((5301, 5337), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(d ** -alpha)', '(0)', 'n', 'n'], {}), '(d ** -alpha, 0, n, n)\n', (5315, 5337), True, 'import scipy.sparse as sparse\n'), ((5595, 5604), 'numpy.max', 'np.max', (['V'], {}), '(V)\n', (5601, 5604), True, 'import numpy as np\n'), ((6608, 6638), 'numpy.sum', 'np.sum', (['(labels == labelvals[i])'], {}), '(labels == labelvals[i])\n', (6614, 6638), True, 'import numpy as np\n'), ((9215, 9228), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (9222, 9228), True, 'import numpy as np\n'), ((10314, 10328), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (10325, 10328), True, 'import scipy.sparse as sparse\n'), ((10866, 10880), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (10877, 10880), True, 'import scipy.sparse as sparse\n'), ((12012, 12038), 'numpy.ones', 'np.ones', (['(n, k)'], {'dtype': 'int'}), '((n, k), dtype=int)\n', (12019, 12038), True, 'import numpy as np\n'), ((14479, 14522), 'numpy.random.choice', 'random.choice', (['I.shape[1]', 'k'], {'replace': '(False)'}), '(I.shape[1], k, replace=False)\n', (14492, 14522), True, 'import numpy.random as random\n'), ((16521, 16540), 'numpy.sum', 'np.sum', (['(labels == i)'], {}), '(labels == i)\n', (16527, 16540), True, 'import numpy as np\n'), ((17700, 17740), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)', 'keepdims': '(True)'}), '(X, axis=1, keepdims=True)\n', (17714, 17740), True, 'import numpy as np\n'), ((18009, 18034), 'numpy.array', 'np.array', (['mask[node_i, :]'], {}), '(mask[node_i, :])\n', (18017, 18034), True, 'import numpy as np\n'), ((18729, 18755), 'numpy.arange', 'np.arange', (['(0)', 'num_of_nodes'], {}), '(0, num_of_nodes)\n', (18738, 18755), True, 'import numpy as np\n'), ((20240, 20263), 'numpy.sum', 'np.sum', (['(nu * nu)'], {'axis': '(0)'}), '(nu * nu, axis=0)\n', (20246, 20263), True, 'import numpy as np\n'), ((21394, 21417), 'numpy.sum', 'np.sum', (['(nu * nu)'], {'axis': '(0)'}), '(nu * nu, axis=0)\n', (21400, 21417), True, 'import numpy as np\n'), ((22972, 23003), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['L', 'f'], {'tol': 'tol', 'M': 'M'}), '(L, f, tol=tol, M=M)\n', (22983, 23003), True, 'import scipy.sparse.linalg as splinalg\n'), ((23029, 23067), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['L', 'f'], {'x0': 'x0', 'tol': 'tol', 'M': 'M'}), '(L, f, x0=x0, tol=tol, M=M)\n', (23040, 23067), True, 'import scipy.sparse.linalg as splinalg\n'), ((24343, 24374), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['A', 'b'], {'tol': 'tol', 'M': 'M'}), '(A, b, tol=tol, M=M)\n', (24354, 24374), True, 'import scipy.sparse.linalg as splinalg\n'), ((24400, 24443), 'scipy.sparse.linalg.cg', 'splinalg.cg', (['A', 'b'], {'x0': 'x0[idx]', 'tol': 'tol', 'M': 'M'}), '(A, b, x0=x0[idx], tol=tol, M=M)\n', (24411, 24443), True, 'import scipy.sparse.linalg as splinalg\n'), ((24880, 24901), 'numpy.sum', 'np.sum', (['(Y * Y)'], {'axis': '(1)'}), '(Y * Y, axis=1)\n', (24886, 24901), True, 'import numpy as np\n'), ((24984, 25001), 'numpy.vstack', 'np.vstack', (['(X, Y)'], {}), '((X, Y))\n', (24993, 25001), True, 'import numpy as np\n'), ((25269, 25286), 'numpy.vstack', 'np.vstack', (['(X, Y)'], {}), '((X, Y))\n', (25278, 25286), True, 'import numpy as np\n'), ((25407, 25432), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (25423, 25432), True, 'import numpy as np\n'), ((25686, 25703), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (25695, 25703), True, 'import numpy as np\n'), ((26374, 26400), 'numpy.append', 'np.append', (['u', '[0, 0, 0, 0]'], {}), '(u, [0, 0, 0, 0])\n', (26383, 26400), True, 'import numpy as np\n'), ((27118, 27128), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (27125, 27128), True, 'import numpy as np\n'), ((27551, 27575), 'numpy.sum', 'np.sum', (['(Tri >= n)'], {'axis': '(1)'}), '(Tri >= n, axis=1)\n', (27557, 27575), True, 'import numpy as np\n'), ((28329, 28345), 'numpy.sum', 'np.sum', (['(s[I] * g)'], {}), '(s[I] * g)\n', (28335, 28345), True, 'import numpy as np\n'), ((28385, 28395), 'numpy.mean', 'np.mean', (['u'], {}), '(u)\n', (28392, 28395), True, 'import numpy as np\n'), ((28626, 28636), 'numpy.mean', 'np.mean', (['u'], {}), '(u)\n', (28633, 28636), True, 'import numpy as np\n'), ((30578, 30587), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (30584, 30587), True, 'import numpy as np\n'), ((30590, 30603), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (30597, 30603), True, 'import numpy as np\n'), ((30613, 30622), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (30619, 30622), True, 'import numpy as np\n'), ((30625, 30638), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (30632, 30638), True, 'import numpy as np\n'), ((31085, 31127), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['uu'], {'dtype': 'np.float64'}), '(uu, dtype=np.float64)\n', (31105, 31127), True, 'import numpy as np\n'), ((31141, 31183), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ul'], {'dtype': 'np.float64'}), '(ul, dtype=np.float64)\n', (31161, 31183), True, 'import numpy as np\n'), ((31197, 31237), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (31217, 31237), True, 'import numpy as np\n'), ((31251, 31291), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WJ'], {'dtype': 'np.int32'}), '(WJ, dtype=np.int32)\n', (31271, 31291), True, 'import numpy as np\n'), ((31305, 31347), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (31325, 31347), True, 'import numpy as np\n'), ((31360, 31399), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (31380, 31399), True, 'import numpy as np\n'), ((31412, 31453), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.float64'}), '(g, dtype=np.float64)\n', (31432, 31453), True, 'import numpy as np\n'), ((31463, 31527), 'cmodules.cgraphpy.lp_iterate', 'cgp.lp_iterate', (['uu', 'ul', 'WI', 'WJ', 'WV', 'I', 'g', 'p', '(1000000.0)', '(0.1)', '(0.0)'], {}), '(uu, ul, WI, WJ, WV, I, g, p, 1000000.0, 0.1, 0.0)\n', (31477, 31527), True, 'import cmodules.cgraphpy as cgp\n'), ((34001, 34015), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (34012, 34015), True, 'import scipy.sparse as sparse\n'), ((34681, 34699), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (34696, 34699), True, 'import scipy.sparse as sparse\n'), ((35241, 35260), 'numpy.sort', 'np.sort', (['(-X)'], {'axis': '(0)'}), '(-X, axis=0)\n', (35248, 35260), True, 'import numpy as np\n'), ((35296, 35311), 'numpy.ones', 'np.ones', (['(k, k)'], {}), '((k, k))\n', (35303, 35311), True, 'import numpy as np\n'), ((37044, 37056), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (37053, 37056), True, 'import numpy as np\n'), ((37105, 37117), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (37114, 37117), True, 'import numpy as np\n'), ((38140, 38152), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (38149, 38152), True, 'import numpy as np\n'), ((39176, 39188), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (39185, 39188), True, 'import numpy as np\n'), ((39790, 39800), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (39797, 39800), True, 'import numpy as np\n'), ((40775, 40804), 'numpy.sum', 'np.sum', (['(g == unique_labels[i])'], {}), '(g == unique_labels[i])\n', (40781, 40804), True, 'import numpy as np\n'), ((40969, 40979), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (40976, 40979), True, 'import numpy as np\n'), ((41093, 41111), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (41099, 41111), True, 'import numpy as np\n'), ((41659, 41677), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (41674, 41677), True, 'import scipy.sparse as sparse\n'), ((43073, 43090), 'torch.Size', 'torch.Size', (['shape'], {}), '(shape)\n', (43083, 43090), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((43207, 43219), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (43216, 43219), True, 'import numpy as np\n'), ((43700, 43710), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (43707, 43710), True, 'import numpy as np\n'), ((44027, 44037), 'numpy.copy', 'np.copy', (['u'], {}), '(u)\n', (44034, 44037), True, 'import numpy as np\n'), ((44990, 45002), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (44999, 45002), True, 'import numpy as np\n'), ((45108, 45118), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (45115, 45118), True, 'import numpy as np\n'), ((45256, 45266), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (45263, 45266), True, 'import numpy as np\n'), ((45518, 45536), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (45533, 45536), True, 'import scipy.sparse as sparse\n'), ((45637, 45647), 'numpy.copy', 'np.copy', (['P'], {}), '(P)\n', (45644, 45647), True, 'import numpy as np\n'), ((46433, 46445), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (46442, 46445), True, 'import numpy as np\n'), ((46551, 46561), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (46558, 46561), True, 'import numpy as np\n'), ((46683, 46701), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(0)'}), '(Kg, axis=0)\n', (46689, 46701), True, 'import numpy as np\n'), ((47800, 47812), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (47809, 47812), True, 'import numpy as np\n'), ((49411, 49424), 'numpy.ones', 'np.ones', (['(k,)'], {}), '((k,))\n', (49418, 49424), True, 'import numpy as np\n'), ((49868, 49898), 'numpy.clip', 'np.clip', (['(s + dt * grad)', '(0.5)', '(2)'], {}), '(s + dt * grad, 0.5, 2)\n', (49875, 49898), True, 'import numpy as np\n'), ((50346, 50358), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (50355, 50358), True, 'import numpy as np\n'), ((50524, 50534), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (50531, 50534), True, 'import numpy as np\n'), ((50648, 50666), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (50654, 50666), True, 'import numpy as np\n'), ((51055, 51073), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (51070, 51073), True, 'import scipy.sparse as sparse\n'), ((52422, 52432), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (52429, 52432), True, 'import numpy as np\n'), ((52781, 52791), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (52788, 52791), True, 'import numpy as np\n'), ((52905, 52923), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (52911, 52923), True, 'import numpy as np\n'), ((53266, 53275), 'numpy.sum', 'np.sum', (['v'], {}), '(v)\n', (53272, 53275), True, 'import numpy as np\n'), ((54920, 54935), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (54932, 54935), True, 'import numpy as np\n'), ((55192, 55220), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'np.float32'}), '(n, dtype=np.float32)\n', (55199, 55220), True, 'import numpy as np\n'), ((55434, 55444), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (55441, 55444), True, 'import numpy as np\n'), ((55558, 55576), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (55564, 55576), True, 'import numpy as np\n'), ((57803, 57818), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (57815, 57818), True, 'import numpy as np\n'), ((58039, 58051), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (58048, 58051), True, 'import numpy as np\n'), ((58359, 58370), 'numpy.max', 'np.max', (['deg'], {}), '(deg)\n', (58365, 58370), True, 'import numpy as np\n'), ((58546, 58556), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (58553, 58556), True, 'import numpy as np\n'), ((58669, 58687), 'numpy.sum', 'np.sum', (['Kg'], {'axis': '(1)'}), '(Kg, axis=1)\n', (58675, 58687), True, 'import numpy as np\n'), ((58769, 58784), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (58781, 58784), True, 'import numpy as np\n'), ((63617, 63630), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (63624, 63630), True, 'import numpy as np\n'), ((63660, 63684), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (63667, 63684), True, 'import numpy as np\n'), ((63855, 63869), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (63866, 63869), True, 'import scipy.sparse as sparse\n'), ((64122, 64163), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.float64'}), '(u, dtype=np.float64)\n', (64142, 64163), True, 'import numpy as np\n'), ((64176, 64215), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (64196, 64215), True, 'import numpy as np\n'), ((64229, 64269), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (64249, 64269), True, 'import numpy as np\n'), ((64283, 64325), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (64303, 64325), True, 'import numpy as np\n'), ((64338, 64377), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (64358, 64377), True, 'import numpy as np\n'), ((64390, 64429), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (64410, 64429), True, 'import numpy as np\n'), ((64442, 64483), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.float64'}), '(g, dtype=np.float64)\n', (64462, 64483), True, 'import numpy as np\n'), ((64493, 64533), 'cmodules.cgraphpy.dijkstra', 'cgp.dijkstra', (['u', 'l', 'WI', 'K', 'WV', 'I', 'g', '(1.0)'], {}), '(u, l, WI, K, WV, I, g, 1.0)\n', (64505, 64533), True, 'import cmodules.cgraphpy as cgp\n'), ((64778, 64791), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (64785, 64791), True, 'import numpy as np\n'), ((64821, 64845), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (64828, 64845), True, 'import numpy as np\n'), ((65016, 65030), 'scipy.sparse.find', 'sparse.find', (['W'], {}), '(W)\n', (65027, 65030), True, 'import scipy.sparse as sparse\n'), ((65283, 65324), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['u'], {'dtype': 'np.float64'}), '(u, dtype=np.float64)\n', (65303, 65324), True, 'import numpy as np\n'), ((65337, 65376), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (65357, 65376), True, 'import numpy as np\n'), ((65390, 65430), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (65410, 65430), True, 'import numpy as np\n'), ((65444, 65486), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (65464, 65486), True, 'import numpy as np\n'), ((65499, 65538), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (65519, 65538), True, 'import numpy as np\n'), ((65551, 65590), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (65571, 65590), True, 'import numpy as np\n'), ((65603, 65642), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['g'], {'dtype': 'np.int32'}), '(g, dtype=np.int32)\n', (65623, 65642), True, 'import numpy as np\n'), ((65652, 65700), 'cmodules.cgraphpy.HJsolver', 'cgp.HJsolver', (['u', 'l', 'WI', 'K', 'WV', 'I', 'g', '(1.0)', 'p', '(1.0)'], {}), '(u, l, WI, K, WV, I, g, 1.0, p, 1.0)\n', (65664, 65700), True, 'import cmodules.cgraphpy as cgp\n'), ((67808, 67832), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (67815, 67832), True, 'import numpy as np\n'), ((68221, 68262), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (68241, 68262), True, 'import numpy as np\n'), ((68275, 68314), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (68295, 68314), True, 'import numpy as np\n'), ((68328, 68368), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (68348, 68368), True, 'import numpy as np\n'), ((68382, 68424), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (68402, 68424), True, 'import numpy as np\n'), ((68437, 68476), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (68457, 68476), True, 'import numpy as np\n'), ((68489, 68528), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (68509, 68528), True, 'import numpy as np\n'), ((69370, 69383), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (69377, 69383), True, 'import numpy as np\n'), ((69421, 69445), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (69428, 69445), True, 'import numpy as np\n'), ((69827, 69868), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (69847, 69868), True, 'import numpy as np\n'), ((69881, 69920), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['l'], {'dtype': 'np.int32'}), '(l, dtype=np.int32)\n', (69901, 69920), True, 'import numpy as np\n'), ((69934, 69974), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WI'], {'dtype': 'np.int32'}), '(WI, dtype=np.int32)\n', (69954, 69974), True, 'import numpy as np\n'), ((69988, 70030), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['WV'], {'dtype': 'np.float64'}), '(WV, dtype=np.float64)\n', (70008, 70030), True, 'import numpy as np\n'), ((70043, 70082), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['K'], {'dtype': 'np.int32'}), '(K, dtype=np.int32)\n', (70063, 70082), True, 'import numpy as np\n'), ((70095, 70134), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['I'], {'dtype': 'np.int32'}), '(I, dtype=np.int32)\n', (70115, 70134), True, 'import numpy as np\n'), ((70216, 70259), 'cmodules.cgraphpy.dijkstra', 'cgp.dijkstra', (['d', 'l', 'WI', 'K', 'WV', 'I', 'init', '(1.0)'], {}), '(d, l, WI, K, WV, I, init, 1.0)\n', (70228, 70259), True, 'import cmodules.cgraphpy as cgp\n'), ((72887, 72925), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['L'], {'k': 'k', 'which': '"""SM"""'}), "(L, k=k, which='SM')\n", (72905, 72925), True, 'import scipy.sparse as sparse\n'), ((74825, 74845), 'numpy.argmax', 'np.argmax', (['F'], {'axis': '(1)'}), '(F, axis=1)\n', (74834, 74845), True, 'import numpy as np\n'), ((80287, 80303), 'numpy.zeros', 'np.zeros', (['(k, n)'], {}), '((k, n))\n', (80295, 80303), True, 'import numpy as np\n'), ((80335, 80347), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (80344, 80347), True, 'import numpy as np\n'), ((81455, 81470), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (81467, 81470), True, 'import numpy as np\n'), ((81529, 81549), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (81538, 81549), True, 'import numpy as np\n'), ((81864, 81899), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (81874, 81899), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((82511, 82521), 'numpy.sort', 'np.sort', (['Y'], {}), '(Y)\n', (82518, 82521), True, 'import numpy as np\n'), ((86667, 86739), 'matplotlib.pyplot.plot', 'plt.plot', (['xval', 'yval'], {'color': '[0.5, 0.5, 0.5]', 'linewidth': '(0.5)', 'markersize': '(0)'}), '(xval, yval, color=[0.5, 0.5, 0.5], linewidth=0.5, markersize=0)\n', (86675, 86739), True, 'import matplotlib.pyplot as plt\n'), ((86825, 86869), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(8)', 'zorder': '(3)'}), '(X[:, 0], X[:, 1], s=8, zorder=3)\n', (86836, 86869), True, 'import matplotlib.pyplot as plt\n'), ((86951, 87011), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(8)', 'c': 'colors[l, :]', 'zorder': '(3)'}), '(X[:, 0], X[:, 1], s=8, c=colors[l, :], zorder=3)\n', (86962, 87011), True, 'import matplotlib.pyplot as plt\n'), ((87502, 87553), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'errorbar.capsize': 5}"], {}), "({'errorbar.capsize': 5})\n", (87528, 87553), False, 'import matplotlib\n'), ((88608, 88636), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test error (%)"""'], {}), "('Test error (%)')\n", (88618, 88636), True, 'import matplotlib.pyplot as plt\n'), ((88645, 88674), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (88655, 88674), True, 'import matplotlib.pyplot as plt\n'), ((88693, 88719), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy (%)"""'], {}), "('Accuracy (%)')\n", (88703, 88719), True, 'import matplotlib.pyplot as plt\n'), ((88728, 88757), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (88738, 88757), True, 'import matplotlib.pyplot as plt\n'), ((88792, 88808), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (88801, 88808), True, 'import matplotlib.pyplot as plt\n'), ((88888, 88909), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savefile'], {}), '(savefile)\n', (88899, 88909), True, 'import matplotlib.pyplot as plt\n'), ((88928, 88938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (88936, 88938), True, 'import matplotlib.pyplot as plt\n'), ((89738, 89751), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (89746, 89751), True, 'import numpy as np\n'), ((100108, 100473), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hd:m:k:a:p:n:v:N:s:i:x:t:cl:T:j:rboz"""', "['dataset=', 'metric=', 'knn=', 'algorithm=', 'p=', 'normalization=',\n 'volume_constraint=', 'num_classes=', 'speed=', 'num_iter=',\n 'extra_dim=', 'num_trials=', 'cuda', 'label_perm=', 'temperature=',\n 'num_cores=', 'results', 'verbose', 'poisson_training_balance', 'directed']"], {}), "(sys.argv[1:], 'hd:m:k:a:p:n:v:N:s:i:x:t:cl:T:j:rboz', [\n 'dataset=', 'metric=', 'knn=', 'algorithm=', 'p=', 'normalization=',\n 'volume_constraint=', 'num_classes=', 'speed=', 'num_iter=',\n 'extra_dim=', 'num_trials=', 'cuda', 'label_perm=', 'temperature=',\n 'num_cores=', 'results', 'verbose', 'poisson_training_balance', 'directed']\n )\n", (100121, 100473), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((1949, 1975), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1965, 1975), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2426, 2437), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2434, 2437), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((2964, 2975), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2972, 2975), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((3764, 3775), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3772, 3775), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4165, 4176), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4173, 4176), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4659, 4670), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4667, 4670), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((4931, 4953), 'numpy.sum', 'np.sum', (['(labels == L[i])'], {}), '(labels == L[i])\n', (4937, 4953), True, 'import numpy as np\n'), ((5614, 5658), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V, (I, J))'], {'shape': '(n, n)'}), '((V, (I, J)), shape=(n, n))\n', (5631, 5658), True, 'import scipy.sparse as sparse\n'), ((6648, 6660), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6657, 6660), True, 'import numpy as np\n'), ((7010, 7021), 'numpy.array', 'np.array', (['L'], {}), '(L)\n', (7018, 7021), True, 'import numpy as np\n'), ((7450, 7462), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7459, 7462), True, 'import numpy as np\n'), ((7974, 7985), 'numpy.array', 'np.array', (['L'], {}), '(L)\n', (7982, 7985), True, 'import numpy as np\n'), ((8488, 8505), 'numpy.sum', 'np.sum', (['W'], {'axis': '(1)'}), '(W, axis=1)\n', (8494, 8505), True, 'import numpy as np\n'), ((10422, 10485), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V * pa / (a + 1e-13), (I, J))'], {'shape': '(n, n)'}), '((V * pa / (a + 1e-13), (I, J)), shape=(n, n))\n', (10439, 10485), True, 'import scipy.sparse as sparse\n'), ((10621, 10638), 'numpy.sum', 'np.sum', (['M'], {'axis': '(1)'}), '(M, axis=1)\n', (10627, 10638), True, 'import numpy as np\n'), ((10889, 10949), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V * (u[J] - u[I]), (I, J))'], {'shape': '(n, n)'}), '((V * (u[J] - u[I]), (I, J)), shape=(n, n))\n', (10906, 10949), True, 'import scipy.sparse as sparse\n'), ((11606, 11621), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (11615, 11621), True, 'import numpy as np\n'), ((11667, 11682), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (11676, 11682), True, 'import numpy as np\n'), ((13791, 13835), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (13808, 13835), True, 'import scipy.sparse as sparse\n'), ((14874, 14918), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (14891, 14918), True, 'import scipy.sparse as sparse\n'), ((15398, 15442), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (15415, 15442), True, 'import scipy.sparse as sparse\n'), ((15785, 15829), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(Y, (I, J))'], {'shape': '(n, n)'}), '((Y, (I, J)), shape=(n, n))\n', (15802, 15829), True, 'import scipy.sparse as sparse\n'), ((15916, 15965), 'scipy.sparse.spdiags', 'sparse.spdiags', (['((max_dist + 1e-10) ** -1)', '(0)', 'n', 'n'], {}), '((max_dist + 1e-10) ** -1, 0, n, n)\n', (15930, 15965), True, 'import scipy.sparse as sparse\n'), ((16051, 16095), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V, (I, J))'], {'shape': '(n, n)'}), '((V, (I, J)), shape=(n, n))\n', (16068, 16095), True, 'import scipy.sparse as sparse\n'), ((17390, 17434), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(D, (I, J))'], {'shape': '(n, n)'}), '((D, (I, J)), shape=(n, n))\n', (17407, 17434), True, 'import scipy.sparse as sparse\n'), ((18086, 18120), 'numpy.where', 'np.where', (['(non_zero_index == node_i)'], {}), '(non_zero_index == node_i)\n', (18094, 18120), True, 'import numpy as np\n'), ((18628, 18641), 'numpy.sum', 'np.sum', (['x_opt'], {}), '(x_opt)\n', (18634, 18641), True, 'import numpy as np\n'), ((20498, 20532), 'numpy.swapaxes', 'np.swapaxes', (['NN_nu[:, 1:, :]', '(0)', '(1)'], {}), '(NN_nu[:, 1:, :], 0, 1)\n', (20509, 20532), True, 'import numpy as np\n'), ((22365, 22395), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / M)', '(0)', 'm', 'm'], {}), '(1 / M, 0, m, m)\n', (22379, 22395), True, 'import scipy.sparse as sparse\n'), ((22899, 22929), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / M)', '(0)', 'm', 'm'], {}), '(1 / M, 0, m, m)\n', (22913, 22929), True, 'import scipy.sparse as sparse\n'), ((24259, 24299), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(1 / (M + 1e-10))', '(0)', 'm', 'm'], {}), '(1 / (M + 1e-10), 0, m, m)\n', (24273, 24299), True, 'import scipy.sparse as sparse\n'), ((25211, 25232), 'numpy.sum', 'np.sum', (['(Y * Y)'], {'axis': '(1)'}), '(Y * Y, axis=1)\n', (25217, 25232), True, 'import numpy as np\n'), ((25576, 25594), 'numpy.random.rand', 'random.rand', (['(3 * n)'], {}), '(3 * n)\n', (25587, 25594), True, 'import numpy.random as random\n'), ((25644, 25662), 'numpy.random.rand', 'random.rand', (['(3 * n)'], {}), '(3 * n)\n', (25655, 25662), True, 'import numpy.random as random\n'), ((25735, 25754), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (25742, 25754), True, 'import numpy as np\n'), ((25766, 25779), 'numpy.cos', 'np.cos', (['(3 * x)'], {}), '(3 * x)\n', (25772, 25779), True, 'import numpy as np\n'), ((26568, 26597), 'numpy.unique', 'np.unique', (['nn_tri[nn_tri < n]'], {}), '(nn_tri[nn_tri < n])\n', (26577, 26597), True, 'import numpy as np\n'), ((26617, 26635), 'numpy.mean', 'np.mean', (['u[nn_tri]'], {}), '(u[nn_tri])\n', (26624, 26635), True, 'import numpy as np\n'), ((28791, 28817), 'scipy.sparse.spdiags', 'sparse.spdiags', (['a', '(0)', 'n', 'n'], {}), '(a, 0, n, n)\n', (28805, 28817), True, 'import scipy.sparse as sparse\n'), ((29609, 29639), 'scipy.sparse.spdiags', 'sparse.spdiags', (['gamma', '(0)', 'n', 'n'], {}), '(gamma, 0, n, n)\n', (29623, 29639), True, 'import scipy.sparse as sparse\n'), ((34025, 34085), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(V * (u[J] - u[I]), (I, J))'], {'shape': '(n, n)'}), '((V * (u[J] - u[I]), (I, J)), shape=(n, n))\n', (34042, 34085), True, 'import scipy.sparse as sparse\n'), ((36907, 36917), 'sys.exit', 'sys.exit', ([], {}), '()\n', (36915, 36917), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((38097, 38107), 'sys.exit', 'sys.exit', ([], {}), '()\n', (38105, 38107), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((39440, 39451), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (39448, 39451), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((40288, 40308), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (40297, 40308), True, 'import numpy as np\n'), ((42629, 42649), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (42638, 42649), True, 'import numpy as np\n'), ((44641, 44661), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (44650, 44661), True, 'import numpy as np\n'), ((46265, 46276), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (46273, 46276), True, 'import numpy as np\n'), ((47032, 47049), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (47046, 47049), True, 'import numpy as np\n'), ((47330, 47344), 'numpy.absolute', 'np.absolute', (['w'], {}), '(w)\n', (47341, 47344), True, 'import numpy as np\n'), ((47483, 47503), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (47492, 47503), True, 'import numpy as np\n'), ((51857, 51877), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (51866, 51877), True, 'import numpy as np\n'), ((53828, 53859), 'torch.sparse.addmm', 'torch.sparse.addmm', (['Dbt', 'Pt', 'ut'], {}), '(Dbt, Pt, ut)\n', (53846, 53859), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((55269, 55282), 'numpy.max', 'np.max', (['error'], {}), '(error)\n', (55275, 55282), True, 'import numpy as np\n'), ((56478, 56508), 'torch.sparse.addmm', 'torch.sparse.addmm', (['bt', 'Wt', 'ut'], {}), '(bt, Wt, ut)\n', (56496, 56508), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((56577, 56603), 'numpy.linalg.norm', 'np.linalg.norm', (['(v - v_prev)'], {}), '(v - v_prev)\n', (56591, 56603), True, 'import numpy as np\n'), ((56937, 56988), 'numpy.clip', 'np.clip', (['(b + confidence_gain * u)'], {'a_min': '(-1)', 'a_max': '(1)'}), '(b + confidence_gain * u, a_min=-1, a_max=1)\n', (56944, 56988), True, 'import numpy as np\n'), ((57057, 57083), 'numpy.linalg.norm', 'np.linalg.norm', (['(v - v_prev)'], {}), '(v - v_prev)\n', (57071, 57083), True, 'import numpy as np\n'), ((58975, 58990), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (58987, 58990), True, 'import numpy as np\n'), ((58993, 59011), 'numpy.mean', 'np.mean', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (59000, 59011), True, 'import numpy as np\n'), ((60798, 60818), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(0)'}), '(u, axis=0)\n', (60807, 60818), True, 'import numpy as np\n'), ((63318, 63344), 'numpy.sqrt', 'np.sqrt', (['(b * b - k * c + k)'], {}), '(b * b - k * c + k)\n', (63325, 63344), True, 'import numpy as np\n'), ((63370, 63390), 'numpy.maximum', 'np.maximum', (['(t - u)', '(0)'], {}), '(t - u, 0)\n', (63380, 63390), True, 'import numpy as np\n'), ((64605, 64616), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (64613, 64616), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((65873, 65899), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (65881, 65899), True, 'import numpy as np\n'), ((65989, 66015), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (65997, 66015), True, 'import numpy as np\n'), ((68798, 68839), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ind'], {'dtype': 'np.int32'}), '(ind, dtype=np.int32)\n', (68818, 68839), True, 'import numpy as np\n'), ((68858, 68899), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['lab'], {'dtype': 'np.int32'}), '(lab, dtype=np.int32)\n', (68878, 68899), True, 'import numpy as np\n'), ((68912, 68964), 'cmodules.cgraphpy.HJsolver', 'cgp.HJsolver', (['d', 'l', 'WI', 'K', 'WV', 'ind', 'lab', '(1.0)', 'p', '(0.0)'], {}), '(d, l, WI, K, WV, ind, lab, 1.0, p, 0.0)\n', (68924, 68964), True, 'import cmodules.cgraphpy as cgp\n'), ((69168, 69178), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (69175, 69178), True, 'import numpy as np\n'), ((70171, 70187), 'numpy.zeros_like', 'np.zeros_like', (['I'], {}), '(I)\n', (70184, 70187), True, 'import numpy as np\n'), ((70471, 70497), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (70479, 70497), True, 'import numpy as np\n'), ((70587, 70613), 'numpy.zeros', 'np.zeros', (['(n,)'], {'dtype': 'bool'}), '((n,), dtype=bool)\n', (70595, 70613), True, 'import numpy as np\n'), ((72434, 72466), 'numpy.sum', 'np.sum', (['((L == i) & (L_true != j))'], {}), '((L == i) & (L_true != j))\n', (72440, 72466), True, 'import numpy as np\n'), ((73098, 73141), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['L'], {'M': 'D', 'k': 'k', 'which': '"""SM"""'}), "(L, M=D, k=k, which='SM')\n", (73116, 73141), True, 'import scipy.sparse as sparse\n'), ((73565, 73593), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (73579, 73593), True, 'import sklearn.cluster as cluster\n'), ((73785, 73813), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (73799, 73813), True, 'import sklearn.cluster as cluster\n'), ((74425, 74437), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (74434, 74437), True, 'import numpy as np\n'), ((74756, 74765), 'numpy.min', 'np.min', (['F'], {}), '(F)\n', (74762, 74765), True, 'import numpy as np\n'), ((80241, 80253), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (80250, 80253), True, 'import numpy as np\n'), ((81568, 81580), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (81577, 81580), True, 'import numpy as np\n'), ((82096, 82107), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (82104, 82107), True, 'import numpy as np\n'), ((82538, 82548), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (82545, 82548), True, 'import numpy as np\n'), ((82611, 82620), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (82617, 82620), True, 'import numpy as np\n'), ((87767, 87812), 'os.path.join', 'os.path.join', (['log', "(dataset + '_' + ssl_method)"], {}), "(log, dataset + '_' + ssl_method)\n", (87779, 87812), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((89563, 89578), 'numpy.sum', 'np.sum', (['(pl == l)'], {}), '(pl == l)\n', (89569, 89578), True, 'import numpy as np\n'), ((89623, 89643), 'numpy.argsort', 'np.argsort', (['(-rank[K])'], {}), '(-rank[K])\n', (89633, 89643), True, 'import numpy as np\n'), ((90066, 90084), 'numpy.absolute', 'np.absolute', (['(w - u)'], {}), '(w - u)\n', (90077, 90084), True, 'import numpy as np\n'), ((97712, 97735), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (97733, 97735), False, 'import datetime\n'), ((98226, 98237), 'time.time', 'time.time', ([], {}), '()\n', (98235, 98237), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((99184, 99211), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (99209, 99211), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((99232, 99258), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (99240, 99258), False, 'from joblib import Parallel, delayed\n'), ((100655, 100666), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (100663, 100666), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((100754, 100764), 'sys.exit', 'sys.exit', ([], {}), '()\n', (100762, 100764), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((11548, 11558), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (11555, 11558), True, 'import numpy as np\n'), ((18294, 18335), 'numpy.dot', 'np.dot', (['x_neighbors', 'X_normalized[node_i]'], {}), '(x_neighbors, X_normalized[node_i])\n', (18300, 18335), True, 'import numpy as np\n'), ((18360, 18394), 'numpy.dot', 'np.dot', (['x_neighbors', 'x_neighbors.T'], {}), '(x_neighbors, x_neighbors.T)\n', (18366, 18394), True, 'import numpy as np\n'), ((20081, 20096), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (20093, 20096), True, 'import numpy as np\n'), ((21235, 21250), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (21247, 21250), True, 'import numpy as np\n'), ((30238, 30248), 'numpy.max', 'np.max', (['dx'], {}), '(dx)\n', (30244, 30248), True, 'import numpy as np\n'), ((31017, 31027), 'sys.exit', 'sys.exit', ([], {}), '()\n', (31025, 31027), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((33171, 33186), 'numpy.max', 'np.max', (['(uu - ul)'], {}), '(uu - ul)\n', (33177, 33186), True, 'import numpy as np\n'), ((33210, 33225), 'numpy.min', 'np.min', (['(uu - ul)'], {}), '(uu - ul)\n', (33216, 33225), True, 'import numpy as np\n'), ((34301, 34318), 'numpy.sum', 'np.sum', (['F'], {'axis': '(1)'}), '(F, axis=1)\n', (34307, 34318), True, 'import numpy as np\n'), ((35354, 35371), 'numpy.transpose', 'np.transpose', (['Sum'], {}), '(Sum)\n', (35366, 35371), True, 'import numpy as np\n'), ((35380, 35392), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (35389, 35392), True, 'import numpy as np\n'), ((40331, 40343), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (40340, 40343), True, 'import numpy as np\n'), ((42070, 42101), 'torch.sparse.addmm', 'torch.sparse.addmm', (['Dbt', 'Pt', 'ut'], {}), '(Dbt, Pt, ut)\n', (42088, 42101), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((42672, 42684), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (42681, 42684), True, 'import numpy as np\n'), ((44684, 44696), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (44693, 44696), True, 'import numpy as np\n'), ((45968, 45979), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (45976, 45979), True, 'import numpy as np\n'), ((46012, 46032), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (46021, 46032), True, 'import numpy as np\n'), ((47526, 47538), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (47535, 47538), True, 'import numpy as np\n'), ((50148, 50158), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (50155, 50158), True, 'import numpy as np\n'), ((51464, 51495), 'torch.sparse.addmm', 'torch.sparse.addmm', (['Dbt', 'Pt', 'ut'], {}), '(Dbt, Pt, ut)\n', (51482, 51495), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((51900, 51912), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (51909, 51912), True, 'import numpy as np\n'), ((53040, 53058), 'scipy.sparse.identity', 'sparse.identity', (['n'], {}), '(n)\n', (53055, 53058), True, 'import scipy.sparse as sparse\n'), ((54424, 54444), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (54433, 54444), True, 'import numpy as np\n'), ((54841, 54855), 'numpy.diag', 'np.diag', (['(1 / c)'], {}), '(1 / c)\n', (54848, 54855), True, 'import numpy as np\n'), ((54890, 54907), 'numpy.diag', 'np.diag', (['(beta / c)'], {}), '(beta / c)\n', (54897, 54907), True, 'import numpy as np\n'), ((57260, 57280), 'numpy.argmax', 'np.argmax', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (57269, 57280), True, 'import numpy as np\n'), ((57724, 57738), 'numpy.diag', 'np.diag', (['(1 / c)'], {}), '(1 / c)\n', (57731, 57738), True, 'import numpy as np\n'), ((57773, 57790), 'numpy.diag', 'np.diag', (['(beta / c)'], {}), '(beta / c)\n', (57780, 57790), True, 'import numpy as np\n'), ((60841, 60853), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (60850, 60853), True, 'import numpy as np\n'), ((63164, 63187), 'numpy.maximum', 'np.maximum', (['(u[i] - u)', '(0)'], {}), '(u[i] - u, 0)\n', (63174, 63187), True, 'import numpy as np\n'), ((65783, 65811), 'numpy.ones', 'np.ones', (['(n + 1,)'], {'dtype': 'int'}), '((n + 1,), dtype=int)\n', (65790, 65811), True, 'import numpy as np\n'), ((65928, 65952), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (65935, 65952), True, 'import numpy as np\n'), ((66047, 66071), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (66054, 66071), True, 'import numpy as np\n'), ((70381, 70409), 'numpy.ones', 'np.ones', (['(n + 1,)'], {'dtype': 'int'}), '((n + 1,), dtype=int)\n', (70388, 70409), True, 'import numpy as np\n'), ((70526, 70550), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'int'}), '((n,), dtype=int)\n', (70533, 70550), True, 'import numpy as np\n'), ((73296, 73334), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['L'], {'k': 'k', 'which': '"""SM"""'}), "(L, k=k, which='SM')\n", (73314, 73334), True, 'import scipy.sparse as sparse\n'), ((73399, 73424), 'numpy.sum', 'np.sum', (['(vec * vec)'], {'axis': '(1)'}), '(vec * vec, axis=1)\n', (73405, 73424), True, 'import numpy as np\n'), ((73437, 73479), 'scipy.sparse.spdiags', 'sparse.spdiags', (['(norms ** (-1 / 2))', '(0)', 'n', 'n'], {}), '(norms ** (-1 / 2), 0, n, n)\n', (73451, 73479), True, 'import scipy.sparse as sparse\n'), ((88187, 88279), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(N / num_of_classes)', 'acc'], {'fmt': 'styles[i]', 'yerr': 'stddev', 'label': 'legend_list[i]'}), '(N / num_of_classes, acc, fmt=styles[i], yerr=stddev, label=\n legend_list[i])\n', (88199, 88279), True, 'import matplotlib.pyplot as plt\n'), ((89660, 89675), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (89669, 89675), True, 'import numpy as np\n'), ((97627, 97652), 'os.path.exists', 'os.path.exists', (['"""Results"""'], {}), "('Results')\n", (97641, 97652), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((97670, 97692), 'os.makedirs', 'os.makedirs', (['"""Results"""'], {}), "('Results')\n", (97681, 97692), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((32753, 32769), 'numpy.absolute', 'np.absolute', (['res'], {}), '(res)\n', (32764, 32769), True, 'import numpy as np\n'), ((33076, 33092), 'numpy.absolute', 'np.absolute', (['res'], {}), '(res)\n', (33087, 33092), True, 'import numpy as np\n'), ((33632, 33642), 'sys.exit', 'sys.exit', ([], {}), '()\n', (33640, 33642), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((35066, 35085), 'numpy.sum', 'np.sum', (['(L == L_true)'], {}), '(L == L_true)\n', (35072, 35085), True, 'import numpy as np\n'), ((42465, 42480), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (42477, 42480), True, 'import numpy as np\n'), ((42483, 42501), 'numpy.mean', 'np.mean', (['u'], {'axis': '(1)'}), '(u, axis=1)\n', (42490, 42501), True, 'import numpy as np\n'), ((46059, 46071), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (46068, 46071), True, 'import numpy as np\n'), ((46971, 46986), 'numpy.transpose', 'np.transpose', (['e'], {}), '(e)\n', (46983, 46986), True, 'import numpy as np\n'), ((46994, 47009), 'numpy.transpose', 'np.transpose', (['e'], {}), '(e)\n', (47006, 47009), True, 'import numpy as np\n'), ((54471, 54483), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (54480, 54483), True, 'import numpy as np\n'), ((57307, 57319), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (57316, 57319), True, 'import numpy as np\n'), ((76224, 76234), 'sys.exit', 'sys.exit', ([], {}), '()\n', (76232, 76234), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((77350, 77362), 'numpy.unique', 'np.unique', (['g'], {}), '(g)\n', (77359, 77362), True, 'import numpy as np\n'), ((88340, 88408), 'matplotlib.pyplot.loglog', 'plt.loglog', (['(N / num_of_classes)', 'acc', 'styles[i]'], {'label': 'legend_list[i]'}), '(N / num_of_classes, acc, styles[i], label=legend_list[i])\n', (88350, 88408), True, 'import matplotlib.pyplot as plt\n'), ((88451, 88517), 'matplotlib.pyplot.plot', 'plt.plot', (['(N / num_of_classes)', 'acc', 'styles[i]'], {'label': 'legend_list[i]'}), '(N / num_of_classes, acc, styles[i], label=legend_list[i])\n', (88459, 88517), True, 'import matplotlib.pyplot as plt\n'), ((99259, 99277), 'joblib.delayed', 'delayed', (['one_trial'], {}), '(one_trial)\n', (99266, 99277), False, 'from joblib import Parallel, delayed\n'), ((7818, 7847), 'numpy.round', 'np.round', (['(i * multiplier[ind])'], {}), '(i * multiplier[ind])\n', (7826, 7847), True, 'import numpy as np\n'), ((10550, 10560), 'numpy.sign', 'np.sign', (['w'], {}), '(w)\n', (10557, 10560), True, 'import numpy as np\n'), ((15864, 15881), 'numpy.max', 'np.max', (['W'], {'axis': '(1)'}), '(W, axis=1)\n', (15870, 15881), True, 'import numpy as np\n'), ((45792, 45807), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (45804, 45807), True, 'import numpy as np\n'), ((45822, 45837), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (45834, 45837), True, 'import numpy as np\n'), ((49655, 49665), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (49662, 49665), True, 'import numpy as np\n'), ((53604, 53623), 'torch.from_numpy', 'torch.from_numpy', (['u'], {}), '(u)\n', (53620, 53623), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((53653, 53673), 'torch.from_numpy', 'torch.from_numpy', (['Db'], {}), '(Db)\n', (53669, 53673), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((53763, 53784), 'numpy.absolute', 'np.absolute', (['(v - vinf)'], {}), '(v - vinf)\n', (53774, 53784), True, 'import numpy as np\n'), ((54145, 54166), 'numpy.absolute', 'np.absolute', (['(v - vinf)'], {}), '(v - vinf)\n', (54156, 54166), True, 'import numpy as np\n'), ((56271, 56290), 'torch.from_numpy', 'torch.from_numpy', (['u'], {}), '(u)\n', (56287, 56290), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((56319, 56338), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (56335, 56338), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((98761, 98772), 'time.time', 'time.time', ([], {}), '()\n', (98770, 98772), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((41793, 41809), 'numpy.transpose', 'np.transpose', (['Db'], {}), '(Db)\n', (41805, 41809), True, 'import numpy as np\n'), ((51189, 51205), 'numpy.transpose', 'np.transpose', (['Db'], {}), '(Db)\n', (51201, 51205), True, 'import numpy as np\n'), ((74703, 74712), 'numpy.sum', 'np.sum', (['I'], {}), '(I)\n', (74709, 74712), True, 'import numpy as np\n'), ((78103, 78121), 'numpy.ones_like', 'np.ones_like', (['beta'], {}), '(beta)\n', (78115, 78121), True, 'import numpy as np\n'), ((7717, 7756), 'numpy.random.choice', 'random.choice', (['K'], {'size': 'i', 'replace': '(False)'}), '(K, size=i, replace=False)\n', (7730, 7756), True, 'import numpy.random as random\n'), ((7877, 7918), 'numpy.random.choice', 'random.choice', (['K'], {'size': 'sze', 'replace': '(False)'}), '(K, size=sze, replace=False)\n', (7890, 7918), True, 'import numpy.random as random\n'), ((41954, 41969), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (41966, 41969), True, 'import numpy as np\n'), ((51379, 51394), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (51391, 51394), True, 'import numpy as np\n'), ((80937, 80947), 'sys.exit', 'sys.exit', ([], {}), '()\n', (80945, 80947), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n'), ((81329, 81339), 'sys.exit', 'sys.exit', ([], {}), '()\n', (81337, 81339), False, 'import sys, getopt, time, csv, torch, os, multiprocessing\n')]
|
import datetime
import numpy as np
import garminmanager.utils.JsonEncDecC
import garminmanager.utils.FileWriterC
from garminmanager.enumerators.EnumHealthTypeC import EnumHealtTypeC
def test_encode_decode():
raw_data = garminmanager.RawDataC.RawDataC()
my_dates1 = {
datetime.datetime(2019,4,11,1,00) : 100,
datetime.datetime(2019,4,11,2,00) : np.nan,
datetime.datetime(2019,4,11,3,00) : 100
}
for key, value in my_dates1.items():
raw_data.add_x(key)
raw_data.add_y(value)
raw_data.set_data_type(EnumHealtTypeC.heartrate)
json_enc_dec = garminmanager.utils.JsonEncDecC.JsonEncDecC()
json_enc_dec.set_input_data(raw_data)
json_string = json_enc_dec.encode()
json_enc_dec.set_input_json(json_string)
file_writer = garminmanager.utils.FileWriterC.FileWriterC()
file_writer.set_filename('test.json')
file_writer.set_text(json_string)
file_writer.write_text_to_file()
d = file_writer.read_json()
json_enc_dec.set_input_json(d)
json_enc_dec.decode()
raw_data_output = json_enc_dec.get_data()
x = raw_data_output.get_x()
y = raw_data_output.get_y()
org_x = raw_data.get_x()
org_y = raw_data.get_y()
y[np.isnan(y)] = -100
org_y[np.isnan(org_y)] = -100
y[np.isnan(y)] = -100
org_y[np.isnan(org_y)] = -100
raw_data.set_y(y)
raw_data_output.set_y(org_y)
assert raw_data == raw_data_output
|
[
"numpy.isnan",
"datetime.datetime"
] |
[((289, 325), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(11)', '(1)', '(0)'], {}), '(2019, 4, 11, 1, 0)\n', (306, 325), False, 'import datetime\n'), ((338, 374), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(11)', '(2)', '(0)'], {}), '(2019, 4, 11, 2, 0)\n', (355, 374), False, 'import datetime\n'), ((390, 426), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(11)', '(3)', '(0)'], {}), '(2019, 4, 11, 3, 0)\n', (407, 426), False, 'import datetime\n'), ((1240, 1251), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1248, 1251), True, 'import numpy as np\n'), ((1270, 1285), 'numpy.isnan', 'np.isnan', (['org_y'], {}), '(org_y)\n', (1278, 1285), True, 'import numpy as np\n'), ((1301, 1312), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1309, 1312), True, 'import numpy as np\n'), ((1331, 1346), 'numpy.isnan', 'np.isnan', (['org_y'], {}), '(org_y)\n', (1339, 1346), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
@date: 2020/2/29 下午7:31
@file: util.py
@author: zj
@description:
"""
import numpy as np
import torch
import sys
def error(msg):
print(msg)
sys.exit(0)
def get_device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def iou(pred_box, target_box):
"""
计算候选建议和标注边界框的IoU
:param pred_box: 大小为[4]
:param target_box: 大小为[N, 4]
:return: [N]
"""
if len(target_box.shape) == 1:
target_box = target_box[np.newaxis, :]
xA = np.maximum(pred_box[0], target_box[:, 0])
yA = np.maximum(pred_box[1], target_box[:, 1])
xB = np.minimum(pred_box[2], target_box[:, 2])
yB = np.minimum(pred_box[3], target_box[:, 3])
# 计算交集面积
intersection = np.maximum(0.0, xB - xA + 1) * np.maximum(0.0, yB - yA + 1)
# 计算两个边界框面积
boxAArea = (pred_box[2] - pred_box[0] + 1) * (pred_box[3] - pred_box[1] + 1)
boxBArea = (target_box[:, 2] - target_box[:, 0] + 1) * (target_box[:, 3] - target_box[:, 1] + 1)
scores = intersection / (boxAArea + boxBArea - intersection)
return scores
def compute_ious(rects, bndboxs):
iou_list = list()
for rect in rects:
scores = iou(rect, bndboxs)
iou_list.append(max(scores))
return iou_list
def parse_output(outputs, S, B, C):
"""
每个网格保存置信度最高的检测边界框
:param outputs: (N, S*S, B*5+C)
:return: cates, probs, bboxs
cates: (N, S*S)
probs: (N, S*S)
bboxs: (N, S*S, 4)
"""
N = outputs.shape[0]
# (N*S*S, C)
probs = outputs[:, :, :C].reshape(-1, C)
# (N*S*S, B)
confidences = outputs[:, :, C:(C + B)].reshape(-1, B)
# (N*S*S, 4*B)
bboxs = outputs[:, :, (C + B):].reshape(-1, 4 * B)
# 计算每个网格所属类别 (N*S*S)
cates = torch.argmax(probs, dim=1)
# 计算每个网格最高置信度 (N*S*S)
idxs = torch.argmax(confidences, dim=1)
# 计算分类概率 (N*S*S)
cate_probs = probs[range(len(cates)), cates] * confidences[range(len(idxs)), idxs]
# 计算对应边界框坐标 (N*S*S, 4)
obj_boxs = bboxs[range(len(idxs)), idxs * 4: (idxs + 1) * 4]
return cates.reshape(N, S * S), cate_probs(N, S * S), obj_boxs(N, S * S, 4)
def bbox_corner_to_center(bboxs):
"""
[xmin, ymin, xmax, ymax] -> [x_center, y_center, w, h]
:param bboxs: [N, 4]
"""
assert len(bboxs.shape) == 2
tmp = np.zeros(bboxs.shape)
# w
tmp[:, 2] = bboxs[:, 2] - bboxs[:, 0] + 1
# h
tmp[:, 3] = bboxs[:, 3] - bboxs[:, 1] + 1
# x_center
tmp[:, 0] = bboxs[:, 0] + tmp[:, 2] / 2
# y_center
tmp[:, 1] = bboxs[:, 1] + tmp[:, 3] / 2
return tmp
def bbox_center_to_corner(bboxs):
"""
[x_center, y_center, w, h] -> [xmin, ymin, xmax, ymax]
:param bboxs: [N, 4]
"""
assert len(bboxs.shape) == 2
tmp = np.zeros(bboxs.shape)
# xmin
tmp[:, 0] = bboxs[:, 0] - bboxs[:, 2] / 2
# ymin
tmp[:, 1] = bboxs[:, 1] - bboxs[:, 3] / 2
# xmax
tmp[:, 2] = bboxs[:, 0] + bboxs[:, 2] / 2
# ymax
tmp[:, 3] = bboxs[:, 1] + bboxs[:, 3] / 2
return tmp
def deform_bboxs(pred_bboxs, data_dict, S):
"""
:param pred_bboxs: [S*S, 4]
:return:
"""
scale_h, scale_w = data_dict['scale_size']
grid_w = scale_w / S
grid_h = scale_h / S
bboxs = np.zeros(pred_bboxs.shape)
for i in range(S * S):
row = int(i / S)
col = int(i % S)
x_center, y_center, box_w, box_h = pred_bboxs[i]
bboxs[i, 0] = (col + x_center) * grid_w
bboxs[i, 1] = (row + y_center) * grid_h
bboxs[i, 2] = box_w * scale_w
bboxs[i, 3] = box_h * scale_h
# (x_center, y_center, w, h) -> (xmin, ymin, xmax, ymax)
bboxs = bbox_center_to_corner(bboxs)
ratio_h, ratio_w = data_dict['ratio']
bboxs[:, 0] /= ratio_w
bboxs[:, 1] /= ratio_h
bboxs[:, 2] /= ratio_w
bboxs[:, 3] /= ratio_h
# 最大最小值
h, w = data_dict['src_size']
bboxs[:, 0] = np.maximum(bboxs[:, 0], 0)
bboxs[:, 1] = np.maximum(bboxs[:, 1], 0)
bboxs[:, 2] = np.minimum(bboxs[:, 2], w)
bboxs[:, 3] = np.minimum(bboxs[:, 3], h)
return bboxs.astype(int)
def nms(rect_list, score_list, cate_list, thresh=0.3):
"""
非最大抑制
:param rect_list: list,大小为[N, 4]
:param score_list: list,大小为[N]
:param cate_list: list, 大小为[N]
"""
nms_rects = list()
nms_scores = list()
nms_cates = list()
rect_array = np.array(rect_list)
score_array = np.array(score_list)
cate_array = np.array(cate_list)
# 一次排序后即可
# 按分类概率从大到小排序
idxs = np.argsort(score_array)[::-1]
rect_array = rect_array[idxs]
score_array = score_array[idxs]
cate_array = cate_array[idxs]
while len(score_array) > 0:
# 添加分类概率最大的边界框
nms_rects.append(rect_array[0])
nms_scores.append(score_array[0])
nms_cates.append(cate_array[0])
rect_array = rect_array[1:]
score_array = score_array[1:]
cate_array = cate_array[1:]
length = len(score_array)
if length <= 0:
break
# 计算IoU
iou_scores = iou(np.array(nms_rects[len(nms_rects) - 1]), rect_array)
# print(iou_scores)
# 去除重叠率大于等于thresh的边界框
idxs = np.where(iou_scores < thresh)[0]
rect_array = rect_array[idxs]
score_array = score_array[idxs]
cate_array = cate_array[idxs]
return nms_rects, nms_scores, nms_cates
|
[
"numpy.minimum",
"numpy.maximum",
"torch.argmax",
"numpy.zeros",
"numpy.argsort",
"numpy.where",
"numpy.array",
"torch.cuda.is_available",
"sys.exit"
] |
[((180, 191), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (188, 191), False, 'import sys\n'), ((526, 567), 'numpy.maximum', 'np.maximum', (['pred_box[0]', 'target_box[:, 0]'], {}), '(pred_box[0], target_box[:, 0])\n', (536, 567), True, 'import numpy as np\n'), ((577, 618), 'numpy.maximum', 'np.maximum', (['pred_box[1]', 'target_box[:, 1]'], {}), '(pred_box[1], target_box[:, 1])\n', (587, 618), True, 'import numpy as np\n'), ((628, 669), 'numpy.minimum', 'np.minimum', (['pred_box[2]', 'target_box[:, 2]'], {}), '(pred_box[2], target_box[:, 2])\n', (638, 669), True, 'import numpy as np\n'), ((679, 720), 'numpy.minimum', 'np.minimum', (['pred_box[3]', 'target_box[:, 3]'], {}), '(pred_box[3], target_box[:, 3])\n', (689, 720), True, 'import numpy as np\n'), ((1752, 1778), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(1)'}), '(probs, dim=1)\n', (1764, 1778), False, 'import torch\n'), ((1816, 1848), 'torch.argmax', 'torch.argmax', (['confidences'], {'dim': '(1)'}), '(confidences, dim=1)\n', (1828, 1848), False, 'import torch\n'), ((2310, 2331), 'numpy.zeros', 'np.zeros', (['bboxs.shape'], {}), '(bboxs.shape)\n', (2318, 2331), True, 'import numpy as np\n'), ((2754, 2775), 'numpy.zeros', 'np.zeros', (['bboxs.shape'], {}), '(bboxs.shape)\n', (2762, 2775), True, 'import numpy as np\n'), ((3238, 3264), 'numpy.zeros', 'np.zeros', (['pred_bboxs.shape'], {}), '(pred_bboxs.shape)\n', (3246, 3264), True, 'import numpy as np\n'), ((3889, 3915), 'numpy.maximum', 'np.maximum', (['bboxs[:, 0]', '(0)'], {}), '(bboxs[:, 0], 0)\n', (3899, 3915), True, 'import numpy as np\n'), ((3934, 3960), 'numpy.maximum', 'np.maximum', (['bboxs[:, 1]', '(0)'], {}), '(bboxs[:, 1], 0)\n', (3944, 3960), True, 'import numpy as np\n'), ((3979, 4005), 'numpy.minimum', 'np.minimum', (['bboxs[:, 2]', 'w'], {}), '(bboxs[:, 2], w)\n', (3989, 4005), True, 'import numpy as np\n'), ((4024, 4050), 'numpy.minimum', 'np.minimum', (['bboxs[:, 3]', 'h'], {}), '(bboxs[:, 3], h)\n', (4034, 4050), True, 'import numpy as np\n'), ((4359, 4378), 'numpy.array', 'np.array', (['rect_list'], {}), '(rect_list)\n', (4367, 4378), True, 'import numpy as np\n'), ((4397, 4417), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (4405, 4417), True, 'import numpy as np\n'), ((4435, 4454), 'numpy.array', 'np.array', (['cate_list'], {}), '(cate_list)\n', (4443, 4454), True, 'import numpy as np\n'), ((753, 781), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xB - xA + 1)'], {}), '(0.0, xB - xA + 1)\n', (763, 781), True, 'import numpy as np\n'), ((784, 812), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yB - yA + 1)'], {}), '(0.0, yB - yA + 1)\n', (794, 812), True, 'import numpy as np\n'), ((4499, 4522), 'numpy.argsort', 'np.argsort', (['score_array'], {}), '(score_array)\n', (4509, 4522), True, 'import numpy as np\n'), ((248, 273), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (271, 273), False, 'import torch\n'), ((5166, 5195), 'numpy.where', 'np.where', (['(iou_scores < thresh)'], {}), '(iou_scores < thresh)\n', (5174, 5195), True, 'import numpy as np\n')]
|
'''
Module to preprocess filckr8k image data
'''
import cv2
import numpy as np
import os
from _pickle import dump, load
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Flatten
from keras.models import load_model
from keras.preprocessing import image
from keras.applications.inception_v3 import preprocess_input
from keras.models import Model
from PIL import Image
def load_images_as_arrays(directory):
img_array_dict = {}
for img_file in os.listdir(directory):
img_path = directory + '/' + img_file
img = Image.open(img_path)
x = np.array(img)
img_array_dict[os.path.splitext(img_file)[0]] = x
return img_array_dict
def extract_features(directory):
# base_model = InceptionV3(weights='imagenet')
# model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
#model = load_model('./preprocessing/CNN_encoder_100epoch.h5')
#top = Flatten()(model.output)
#model = Model(inputs=model.input, outputs=top)
#print(model.summary())
img_id = []
img_matrices = []
i = 0
for img_file in os.listdir(directory):
print(i, ":", i > 1999 and i < 8000 or i > 8999)
'''if (i > 1999 and i < 8000 or i > 8999):
i += 1
continue'''
img_path = directory + '/' + img_file
resizeDim = (256, 512)
img = cv2.imread(img_path)
img = cv2.resize(img, resizeDim, interpolation=cv2.INTER_AREA)
img = img.astype('float16') / 255
#x = img.reshape(img.shape + (1,))
img_id.append(os.path.splitext(img_file)[0])
img_matrices.append(img)
i += 1
img_matrices = np.array(img_matrices)
#img_features = model.predict(img_matrices, verbose=1)
return {'ids': img_id, 'features': img_matrices}
def extract_feature_from_image(file_dir):
img = image.load_img(file_dir, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# base_model = InceptionV3(weights='imagenet')
# model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
model = load_model('CNN_encoder_100epoch.h5')
return model.predict(x)
def load_features(dict_dir, dataset_dir, repeat_times=1):
assert (repeat_times >= 1)
img_ids = []
with open(dataset_dir, 'r') as f:
for line in f.readlines():
img_ids.append(os.path.splitext(line)[0])
features_dict = load(open(dict_dir, 'rb'))
#features_dict = extract_features('./datasets/Flickr8k_Dataset')
dataset_features = []
for img_id in img_ids:
fidx = features_dict['ids'].index(img_id)
dataset_features.append(np.vstack([features_dict['features'][fidx, :]] * repeat_times))
#dataset_features = np.vstack(dataset_features)
return np.array(dataset_features)
if __name__ == "__main__":
# pre-extract image features from Inception Net
image_directory = './datasets/Flickr8k_Dataset'
features_dict = extract_features(image_directory)
dump(features_dict, open('./datasets/features_dict2.pkl', 'wb'),protocol=4)
|
[
"keras.models.load_model",
"numpy.expand_dims",
"PIL.Image.open",
"keras.applications.inception_v3.preprocess_input",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"cv2.imread",
"numpy.array",
"os.path.splitext",
"numpy.vstack",
"os.listdir",
"cv2.resize"
] |
[((483, 504), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (493, 504), False, 'import os\n'), ((1130, 1151), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1140, 1151), False, 'import os\n'), ((1693, 1715), 'numpy.array', 'np.array', (['img_matrices'], {}), '(img_matrices)\n', (1701, 1715), True, 'import numpy as np\n'), ((1884, 1932), 'keras.preprocessing.image.load_img', 'image.load_img', (['file_dir'], {'target_size': '(299, 299)'}), '(file_dir, target_size=(299, 299))\n', (1898, 1932), False, 'from keras.preprocessing import image\n'), ((1941, 1964), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1959, 1964), False, 'from keras.preprocessing import image\n'), ((1973, 1998), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1987, 1998), True, 'import numpy as np\n'), ((2007, 2026), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (2023, 2026), False, 'from keras.applications.inception_v3 import preprocess_input\n'), ((2185, 2222), 'keras.models.load_model', 'load_model', (['"""CNN_encoder_100epoch.h5"""'], {}), "('CNN_encoder_100epoch.h5')\n", (2195, 2222), False, 'from keras.models import load_model\n'), ((2869, 2895), 'numpy.array', 'np.array', (['dataset_features'], {}), '(dataset_features)\n', (2877, 2895), True, 'import numpy as np\n'), ((567, 587), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (577, 587), False, 'from PIL import Image\n'), ((600, 613), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (608, 613), True, 'import numpy as np\n'), ((1395, 1415), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1405, 1415), False, 'import cv2\n'), ((1430, 1486), 'cv2.resize', 'cv2.resize', (['img', 'resizeDim'], {'interpolation': 'cv2.INTER_AREA'}), '(img, resizeDim, interpolation=cv2.INTER_AREA)\n', (1440, 1486), False, 'import cv2\n'), ((2740, 2802), 'numpy.vstack', 'np.vstack', (["([features_dict['features'][fidx, :]] * repeat_times)"], {}), "([features_dict['features'][fidx, :]] * repeat_times)\n", (2749, 2802), True, 'import numpy as np\n'), ((638, 664), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (654, 664), False, 'import os\n'), ((1594, 1620), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (1610, 1620), False, 'import os\n'), ((2461, 2483), 'os.path.splitext', 'os.path.splitext', (['line'], {}), '(line)\n', (2477, 2483), False, 'import os\n')]
|
# Omid55
# Test module for network_utils.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import networkx as nx
import pandas as pd
import numpy as np
import unittest
import datetime
import re
from parameterized import parameterized
import utils
import network_utils
class MyTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.triad_map, cls.triad_list = (
network_utils.generate_all_possible_sparse_triads())
@classmethod
def tearDownClass(cls):
del cls.triad_map
del cls.triad_list
# =========================================================================
# ==================== extract_graph ======================================
# =========================================================================
@parameterized.expand([
["latest_multiple_edge_weight", False],
["sum_of_multiple_edge_weights", True]])
def test_extract_graph(self, name, sum_multiple_edge):
matrix_edges = [
[1, 2, +1, datetime.datetime(2017, 1, 1)],
[1, 2, +5, datetime.datetime(2017, 1, 2)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[2, 3, -2, datetime.datetime(2017, 1, 6)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 22)],
[4, 3, -5, datetime.datetime(2017, 2, 24)]]
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
expected = nx.DiGraph()
expected.add_nodes_from([1, 2, 3, 4])
if sum_multiple_edge:
expected.add_edge(1, 2, weight=6)
else:
expected.add_edge(1, 2, weight=5)
if sum_multiple_edge:
expected.add_edge(2, 3, weight=1)
else:
expected.add_edge(2, 3, weight=-2)
expected.add_edge(3, 1, weight=1)
expected.add_edge(1, 4, weight=-1)
if sum_multiple_edge:
expected.add_edge(4, 3, weight=-10)
else:
expected.add_edge(4, 3, weight=-5)
computed = network_utils.extract_graph(
sample_edge_list, sum_multiple_edge=sum_multiple_edge)
self.assertTrue(
utils.graph_equals(
expected,
computed,
weight_column_name='weight'))
# =========================================================================
# ==================== extract_graphs =====================================
# =========================================================================
def test_extract_graphs_raises_with_missing_columns(self):
sample_edge_list = pd.DataFrame({'source': [1, 2], 'target': [5, 6]})
with self.assertRaises(ValueError):
network_utils.extract_graphs(edge_list=sample_edge_list)
@parameterized.expand(
[["seperated graphs", False],
["accumulative graphs", True]])
def test_extract_graphs(self, name, accumulative):
# source, target, weight, edge_date
matrix_edges = [[1, 2, +1, datetime.datetime(2017, 1, 1)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 24)],
[-1, -1, -1, datetime.datetime(2017, 2, 28)]]
# The last one is going to be ignored because fall into another period
# which is neglected.
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
g1 = nx.DiGraph()
g1.add_nodes_from([1, 2, 3])
g1.add_edge(1, 2, weight=1)
g1.add_edge(2, 3, weight=3)
g2 = nx.DiGraph()
g2.add_nodes_from([1, 3, 4])
g2.add_edge(3, 1, weight=1)
g2.add_edge(1, 4, weight=-1)
g2.add_edge(4, 3, weight=-5)
g3 = nx.DiGraph()
g3.add_nodes_from([1, 2, 3, 4])
g3.add_edge(1, 2, weight=1)
g3.add_edge(2, 3, weight=3)
g3.add_edge(3, 1, weight=1)
g3.add_edge(1, 4, weight=-1)
g3.add_edge(4, 3, weight=-5)
if not accumulative:
expected = [g1, g2]
else:
expected = [g1, g3]
computed = network_utils.extract_graphs(
edge_list=sample_edge_list, weeks=4, accumulative=accumulative)
for expected_graph, computed_graph in zip(expected, computed):
self.assertTrue(
utils.graph_equals(
expected_graph,
computed_graph,
weight_column_name='weight'))
# =========================================================================
# ====================== get_all_degrees ==================================
# =========================================================================
def test_get_all_degrees(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 1, weight=6)
dg.add_edge(1, 2, weight=1)
dg.add_edge(1, 4, weight=-5)
dg.add_edge(2, 2, weight=-1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-4)
dg.add_edge(3, 2, weight=4)
dg.add_edge(4, 4, weight=-10)
computed = network_utils.get_all_degrees(dg)
expected = (
{1: {'self': 6, 'out': -4, 'in': -4},
2: {'self': -1, 'out': 1, 'in': 5},
3: {'self': 0, 'out': 0, 'in': 1},
4: {'self': -10, 'out': 0, 'in': -5},
5: {'self': 0, 'out': 0, 'in': 0}})
self.assertDictEqual(computed, expected)
# =========================================================================
# ===================== get_just_periods ==================================
# =========================================================================
def test_get_just_periods(self):
matrix_edges = [[1, 2, +1, datetime.datetime(2017, 1, 1)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 24)],
[-1, -1, -1, datetime.datetime(2017, 2, 28)]]
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
expected = [['2017-01-01', '2017-01-29'], ['2017-01-29', '2017-02-26']]
computed = network_utils.get_just_periods(
sample_edge_list, weeks=4, accumulative=False)
self.assertEqual(expected, computed)
# =========================================================================
# ==================== get_metrics_for_network ============================
# =========================================================================
def test_get_metrics_for_network(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(1, 3, weight=-1)
computed = network_utils.get_metrics_for_network(dg)
expected = {
'#edges': 4,
'#edges/#nodes': 1,
'#gcc edges': 3,
'#gcc neg edges': 1,
'#gcc nodes': 3,
'#gcc pos edges': 2,
'#neg edges': 2,
'#nodes': 4,
'#pos edges': 2,
'algebraic connectivity': 0,
'average (und) clustering coefficient': 0.75,
'average betweenness': 0.0833,
'average closeness': 0.3888,
'average degree': 2,
'average eigenvector': 0.4222,
'average harmonic': 1.25,
'average in degree': 1,
'average w in degree': 0,
'average w out degree': 0,
'average load': 0.0833,
'average out degree': 1,
'gcc algebraic connectivity': 2.9999,
'gcc diameter': 1,
'unbalanced cycles 3 ratio': 1,
'weights max': 1,
'weights average': 0,
'weights min': -1,
'weights std': 1
}
# utils.print_dict_pretty(computed)
# self.assertDictEqual(computed, expected)
for key, value in expected.items():
self.assertAlmostEqual(value, computed[key], places=3)
# =========================================================================
# ====================== cartwright_harary_balance_ratio ==================
# =========================================================================
def test_cartwright_harary_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 0)
def test_cartwright_harary_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 0)
def test_cartwright_harary_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 1)
def test_cartwright_harary_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
self.assertEqual(
network_utils.cartwright_harary_balance_ratio(dg), 0.5)
# =========================================================================
# ========================= sprase_balance_ratio ==========================
# =========================================================================
def test_sparse_balance_ratio_raises_when_incorrect_balance_type(self):
with self.assertRaises(ValueError):
network_utils.sprase_balance_ratio(
dgraph=nx.DiGraph(),
balance_type=0)
@parameterized.expand([
['CartwrightHarary', 1, [0.3, 3, 7]],
['Clustering', 2, [0.5, 5, 5]],
['Transitivity', 3, [0.9, 9, 1]]])
def test_sprase_balance_ratio(
self,
name,
balance_type,
expected_values):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=5)
dg.add_edge(2, 3, weight=-4)
dg.add_edge(3, 1, weight=-7)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-2)
dg.add_edge(1, 5, weight=9)
dg.add_edge(5, 1, weight=-11)
dg.add_edge(2, 1, weight=100)
computed = network_utils.sprase_balance_ratio(
dgraph=dg,
balance_type=balance_type)
np.testing.assert_array_almost_equal(
computed, expected_values, decimal=2)
# =========================================================================
# ======================= fullyconnected_balance_ratio ====================
# =========================================================================
def test_fullyconnected_balance_ratio_raises_when_incorrect_balance_type(
self):
with self.assertRaises(ValueError):
network_utils.fullyconnected_balance_ratio(
dgraph=nx.DiGraph(),
balance_type=0)
def test_fullyconnected_balance_ratio_raises_when_negative_in_dgraph(self):
with self.assertRaises(ValueError):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=-1)
network_utils.fullyconnected_balance_ratio(
dgraph=dg,
balance_type=1)
@parameterized.expand([
['Classical', 1, [0.4, 4, 6]],
['Clustering', 2, [0.7, 7, 3]],
['Transitivity', 3, [0.8, 8, 2]]])
def test_fullyconnected_balance_ratio(
self,
name,
balance_type,
expected_values):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(5, 1, weight=1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 2, weight=1)
dg.add_edge(2, 5, weight=1)
dg.add_edge(5, 3, weight=1)
dg.add_edge(2, 3, weight=1)
computed = network_utils.fullyconnected_balance_ratio(
dgraph=dg,
balance_type=balance_type)
np.testing.assert_array_almost_equal(
computed, expected_values, decimal=2)
# =========================================================================
# ====================== count_different_signed_edges =====================
# =========================================================================
def test_count_different_signed_edges(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 1, weight=-5)
dg.add_edge(1, 3, weight=-2)
self.assertEqual(network_utils.count_different_signed_edges(dg), 0)
def test_count_different_signed_edges1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=3)
dg.add_edge(2, 1, weight=4)
dg.add_edge(3, 1, weight=1)
dg.add_edge(1, 3, weight=-1)
self.assertEqual(network_utils.count_different_signed_edges(dg), 1)
def test_count_different_signed_edges2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 1, weight=9)
dg.add_edge(1, 3, weight=-2)
self.assertEqual(network_utils.count_different_signed_edges(dg), 2)
# =========================================================================
# ==================== terzi_sprase_balance_ratio =========================
# =========================================================================
def test_terzi_sprase_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 1
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
expected = 0.5
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
# =========================================================================
# ================= kunegis_sprase_balance_ratio ==========================
# =========================================================================
def test_kunegis_sprase_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 1
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
expected = 0.6
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected, decimal=1)
# =========================================================================
# ====================== compute_vanderijt_edge_balance ===================
# =========================================================================
def test_compute_vanderijt_edge_balance_small_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=-5)
dg.add_edge(3, 1, weight=-2)
expected = {(2, 1): {'#nodes3': 1, '#balanced_node3': 1}}
computed = network_utils.compute_vanderijt_edge_balance(dg)
self.assertDictEqual(computed, expected)
def test_compute_vanderijt_edge_balance_allnegative_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(1, 3, weight=-1)
dg.add_edge(2, 4, weight=-1)
dg.add_edge(4, 2, weight=-1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 2, weight=1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(4, 1, weight=-5)
dg.add_edge(1, 4, weight=-5)
dg.add_edge(4, 3, weight=-2)
dg.add_edge(3, 4, weight=1)
expected = {
(1, 2): {'#balanced_node3': 1, '#nodes3': 2},
(3, 2): {'#balanced_node3': 1, '#nodes3': 2},
(1, 3): {'#balanced_node3': 0, '#nodes3': 2},
(3, 4): {'#balanced_node3': 1, '#nodes3': 2},
(3, 1): {'#balanced_node3': 1, '#nodes3': 2},
(1, 4): {'#balanced_node3': 1, '#nodes3': 2},
(2, 3): {'#balanced_node3': 1, '#nodes3': 2},
(2, 1): {'#balanced_node3': 2, '#nodes3': 2},
(4, 3): {'#balanced_node3': 0, '#nodes3': 2},
(4, 2): {'#balanced_node3': 1, '#nodes3': 2},
(4, 1): {'#balanced_node3': 1, '#nodes3': 2},
(2, 4): {'#balanced_node3': 2, '#nodes3': 2}}
computed = network_utils.compute_vanderijt_edge_balance(dg)
self.assertDictEqual(computed, expected)
# @parameterized.expand(
# [["no_isomorph_cycles", False], ["no_isomorph_cycles", True]])
# def test_compute_vanderijt_edge_balance_small_graph(
# self, name, no_isomorph_cycles):
# dg = nx.DiGraph()
# dg.add_nodes_from([1, 2, 3])
# dg.add_edge(1, 2, weight=1)
# dg.add_edge(2, 1, weight=1)
# dg.add_edge(2, 3, weight=-5)
# dg.add_edge(3, 1, weight=-2)
# if no_isomorph_cycles:
# expected = {
# (1, 2): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 9,
# 'as_expected_sign': True}}
# else:
# expected = {
# (1, 2): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 9,
# 'as_expected_sign': True},
# (3, 1): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 3,
# 'as_expected_sign': True},
# (2, 3): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 3,
# 'as_expected_sign': True}}
# computed = network_utils.compute_vanderijt_edge_balance(
# dg, no_isomorph_cycles=no_isomorph_cycles)
# self.assertDictEqual(computed, expected)
# @parameterized.expand(
# [["no_isomorph_cycles", False],
# ["no_isomorph_cycles", True]])
# def test_compute_vanderijt_edge_balance_allnegative_graph(
# self, name, no_isomorph_cycles):
# dg = nx.DiGraph()
# dg.add_nodes_from([1, 2, 3, 4])
# dg.add_edge(1, 2, weight=-1)
# dg.add_edge(2, 3, weight=-1)
# dg.add_edge(3, 1, weight=-1)
# dg.add_edge(1, 4, weight=-5)
# dg.add_edge(4, 3, weight=-2)
# if no_isomorph_cycles:
# expected = {
# (1, 2): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (1, 4): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False}}
# else:
# expected = {
# (1, 2): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (1, 4): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False},
# (2, 3): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (3, 1): {
# '#balanced': 0,
# '#cycle3': 2,
# 'weight_distance': 13,
# 'as_expected_sign': False},
# (4, 3): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False}}
# computed = network_utils.compute_vanderijt_edge_balance(
# dg, no_isomorph_cycles=no_isomorph_cycles)
# self.assertDictEqual(computed, expected)
# =========================================================================
# ====================== compute_fairness_goodness ========================
# =========================================================================
def test_compute_fairness_goodness(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1.0)
dg.add_edge(2, 3, weight=1.0)
dg.add_edge(3, 1, weight=1.0)
dg.add_edge(1, 4, weight=2.0)
dg.add_edge(4, 3, weight=-1.0)
expected = {'fairness': {1: 1.0, 2: 0.95, 3: 1.0, 4: 0.95},
'goodness': {1: 1.0, 2: 1.0, 3: 0.0, 4: 2.0}}
computed = network_utils.compute_fairness_goodness(dg, verbose=False)
self.assertDictEqual(computed, expected)
# =========================================================================
# ====================== is_sparsely_transitive_balanced ==================
# =========================================================================
def test_is_sparsely_transitive_balanced_raises_when_self_loops(self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_transitive_balanced(triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), True],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), True],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), True],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), True],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True]]
)
def test_is_sparsely_transitive_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_transitive_balanced(triad),
expected_balance)
# =========================================================================
# ====================== is_sparsely_cartwright_harary_balanced ===========
# =========================================================================
def test_is_sparsely_cartwright_harary_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_cartwright_harary_balanced(
triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), False],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), False],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), False],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), False],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), False]]
)
def test_is_sparsely_cartwright_harary_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_cartwright_harary_balanced(triad),
expected_balance)
# =========================================================================
# ====================== is_sparsely_clustering_balanced ==================
# =========================================================================
def test_is_sparsely_clustering_balanced_raises_when_self_loops(self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_clustering_balanced(
triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), False],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), False],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), True],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True]]
)
def test_is_sparsely_clustering_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_clustering_balanced(triad),
expected_balance)
# =========================================================================
# ========= is_fullyconnected_cartwright_harary_balance ===================
# =========================================================================
def test_is_fullyconnected_cartwright_harary_balance_raises_when_selfloops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_cartwright_harary_balance(
triad_with_self_loop)
def test_is_fullyconnected_cartwright_harary_balance_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_cartwright_harary_balance(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), False],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), False],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), False],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), False],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), False],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_cartwright_harary_balance(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_cartwright_harary_balance(triad),
expected_balance)
# =========================================================================
# =============== is_fullyconnected_clustering_balanced ===================
# =========================================================================
def test_is_fullyconnected_clustering_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_clustering_balanced(
triad_with_self_loop)
def test_is_fullyconnected_clustering_balanced_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_clustering_balanced(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), False],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), False],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), False],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_clustering_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_clustering_balanced(triad),
expected_balance)
# =========================================================================
# ============= is_fullyconnected_transitivity_balanced ===================
# =========================================================================
def test_is_fullyconnected_transitivity_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_transitivity_balanced(
triad_with_self_loop)
def test_is_fullyconnected_transitivity_balanced_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_transitivity_balanced(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), True],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), True],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_transitivity_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_transitivity_balanced(triad),
expected_balance)
# # =======================================================================
# # =================== is_sparsely_ranked_clustering_balanced ============
# # =======================================================================
# def test_is_sparsely_ranked_clustering_balanced_raises_when_self_loops(
# self):
# with self.assertRaises(ValueError):
# triad_with_self_loop = np.array(
# [[0, 1, 0],
# [0, 1, 1],
# [0, 0, 0]])
# network_utils.is_sparsely_ranked_clustering_balanced(
# triad_with_self_loop)
# @parameterized.expand([
# ["120U", np.array(
# [[0, 1, 1],
# [1, 0, 1],
# [-1, -1, 0]]), True],
# ["120D", np.array(
# [[0, 1, -1],
# [1, 0, -1],
# [1, 1, 0]]), True],
# ["0122Z", np.array(
# [[0, 0, -1],
# [-1, 0, 0],
# [1, -1, 0]]), True],
# ["030TZ", np.array(
# [[0, 1, 1],
# [0, 0, 1],
# [0, 0, 0]]), True],
# ["003", np.array(
# [[0, -1, -1],
# [-1, 0, -1],
# [-1, -1, 0]]), True],
# ["0032Z", np.array(
# [[0, 0, -1],
# [-1, 0, 0],
# [-1, -1, 0]]), True],
# ["030T", np.array(
# [[0, 1, 1],
# [-1, 0, 1],
# [-1, -1, 0]]), False],
# ["021C", np.array(
# [[0, 1, -1],
# [-1, 0, 1],
# [-1, -1, 0]]), False],
# ["030T2negZ", np.array(
# [[0, 1, -1],
# [0, 0, -1],
# [0, 0, 0]]), True],
# ["021UnegZ", np.array(
# [[0, 1, 0],
# [0, 0, 0],
# [0, -1, 0]]), True],
# ["021DZ", np.array(
# [[0, 0, 0],
# [1, 0, 1],
# [0, 0, 0]]), True],
# ["210", np.array(
# [[0, 1, -1],
# [1, 0, 1],
# [1, 1, 0]]), False],
# ["210Z", np.array(
# [[0, 1, 0],
# [1, 0, 1],
# [1, 1, 0]]), False],
# ["003Z", np.array(
# [[0, 0, 0],
# [0, 0, 0],
# [0, 0, 0]]), True],
# ["102Z", np.array(
# [[0, 1, 0],
# [1, 0, 0],
# [0, 0, 0]]), True],
# ["102negZ", np.array(
# [[0, -1, 0],
# [-1, 0, 0],
# [0, 0, 0]]), True],
# ["102posnegZ", np.array(
# [[0, 1, 0],
# [-1, 0, 0],
# [0, 0, 0]]), True],
# ["012Z", np.array(
# [[0, 1, 0],
# [0, 0, 0],
# [0, 0, 0]]), True],
# ["012", np.array(
# [[0, 1, -1],
# [-1, 0, -1],
# [-1, -1, 0]]), True]]
# )
# def test_is_sparsely_ranked_clustering_balanced(
# self, name, triad, expected_balance):
# self.assertEqual(
# network_utils.is_sparsely_ranked_clustering_balanced(triad),
# expected_balance)
# =========================================================================
# ====================== get_all_triad_permutations =======================
# =========================================================================
def test_get_all_triad_permutations(self):
triad_adj_matrix = np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]])
expected = set([
'[[0 1 1]\n [1 0 1]\n [1 0 0]]',
'[[0 0 1]\n [1 0 1]\n [1 1 0]]',
'[[0 1 1]\n [0 0 1]\n [1 1 0]]',
'[[0 1 1]\n [1 0 1]\n [0 1 0]]',
'[[0 1 1]\n [1 0 0]\n [1 1 0]]',
'[[0 1 0]\n [1 0 1]\n [1 1 0]]'])
computed = network_utils._get_all_triad_permutations(triad_adj_matrix)
self.assertEqual(expected, computed)
# =========================================================================
# ====================== generate_all_possible_sparse_triads ==============
# =========================================================================
def test_generate_all_possible_sparse_triads(self):
computed_triad_map, computed_triad_list = (
network_utils.generate_all_possible_sparse_triads())
# Testing triad_list
self.assertTrue(
len(computed_triad_list) == 138,
'Length of triad_list is not correct.')
np.testing.assert_array_equal(
computed_triad_list[0], np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), 'First triad_list is incorrect.')
np.testing.assert_array_equal(
computed_triad_list[-1], np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), 'Last triad_list is incorrect.')
np.testing.assert_array_equal(
computed_triad_list[69], np.array(
[[0, 0, 1],
[1, 0, -1],
[1, 0, 0]]), 'Middle triad_list is incorrect.')
# Testing triad_map.
expected_key1 = '[[0 0 0]\n [1 0 0]\n [0 0 0]]'
expected_value1 = 1
expected_key2 = '[[ 0 1 1]\n [-1 0 1]\n [-1 -1 0]]'
expected_value2 = 129
self.assertTrue(
expected_key1 in computed_triad_map,
'First key was not found in computed_triad_map.')
self.assertTrue(
expected_key2 in computed_triad_map,
'Second key was not found in computed_triad_map.')
self.assertEqual(
computed_triad_map[expected_key1], expected_value1,
'First value was not found in computed_triad_map.')
self.assertEqual(
computed_triad_map[expected_key2], expected_value2,
'Second value was not found in computed_triad_map.')
# =========================================================================
# ====================== detect_triad_type_for_all_subgraph3 ==============
# =========================================================================
def test_detect_triad_type_for_all_subgraph3(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=1)
dg.add_edge(1, 4, weight=2)
dg.add_edge(4, 3, weight=-5)
expected = {
'(1, 2, 3)': 55,
# [[0, 0, 1],
# [1, 0, 0],
# [0, 1, 0]]
'(1, 2, 4)': 3,
# [[0, 0, 0],
# [0, 0, 0],
# [1, 1, 0]]
'(1, 3, 4)': 56,
# [[0, 0, 1],
# [1, 0, 0],
# [0,-1, 0]]
'(2, 3, 4)': 24
# [[0, 0, 0],
# [1, 0, 0],
# [-1, 0, 0]]
}
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
self.assertDictEqual(expected, computed)
def test_detect_triad_type_for_all_subgraph3_nodes_with_str_name(self):
dg = nx.DiGraph()
dg.add_nodes_from(['b', 'c', 'a', 'd'])
dg.add_edge('b', 'c', weight=1)
dg.add_edge('c', 'a', weight=1)
dg.add_edge('a', 'b', weight=1)
dg.add_edge('b', 'd', weight=2)
dg.add_edge('d', 'a', weight=-5)
expected = {
"('a', 'b', 'c')": 55,
"('a', 'b', 'd')": 56,
"('a', 'c', 'd')": 24,
"('b', 'c', 'd')": 3
}
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
self.assertDictEqual(expected, computed)
def test_detect_triad_type_for_all_subgraph3_has_unique_keys(self):
dg = nx.DiGraph()
dg.add_nodes_from(['b', 'c', 'a', 'd'])
dg.add_edge('b', 'c', weight=1)
dg.add_edge('c', 'a', weight=1)
dg.add_edge('a', 'b', weight=1)
dg.add_edge('b', 'd', weight=2)
dg.add_edge('d', 'a', weight=-5)
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
truncated_keys = []
for key in list(computed.keys()):
key = re.sub(r'[^\w]', ' ', key)
key = key.replace(" ", "")
truncated_keys.append(''.join(sorted(key)))
self.assertEqual(len(truncated_keys), len(np.unique(truncated_keys)))
# =========================================================================
# ====================== compute_transition_matrix ========================
# =========================================================================
def test_compute_transition_matrix(self):
dg1 = nx.DiGraph()
dg1.add_nodes_from([1, 2, 3, 4])
dg1.add_edge(1, 2, weight=1)
dg1.add_edge(2, 1, weight=1)
dg1.add_edge(2, 3, weight=1)
dg1.add_edge(3, 1, weight=-1)
dg1.add_edge(3, 4, weight=1)
dg2 = nx.DiGraph()
dg2.add_nodes_from([1, 2, 3, 4])
dg2.add_edge(1, 2, weight=1)
dg2.add_edge(1, 3, weight=1)
dg2.add_edge(2, 1, weight=1)
dg2.add_edge(2, 3, weight=1)
dg2.add_edge(2, 4, weight=1)
dg2.add_edge(3, 1, weight=1)
dg2.add_edge(3, 4, weight=1)
dg2.add_edge(4, 1, weight=1)
dgraphs = [dg1, dg2]
triads_types = [
{'(1, 2, 3)': 76,
'(1, 2, 4)': 6,
'(1, 3, 4)': 4,
'(2, 3, 4)': 8},
{'(1, 2, 3)': 63,
'(1, 2, 4)': 57,
'(1, 3, 4)': 57,
'(2, 3, 4)': 22}]
n = len(self.triad_list)
transition_matrix = np.zeros((n, n))
transition_matrix[76, 63] = 1
transition_matrix[6, 57] = 1
transition_matrix[4, 57] = 1
transition_matrix[8, 22] = 1
computed = network_utils.compute_transition_matrix(
dgraphs=dgraphs,
unique_triad_num=n,
triad_map=self.triad_map)
# self.assertDictEqual(expected, computed)
self.assertTrue(
'triads_types' in computed,
'triads_types was not found in computed transition matrix.')
self.assertTrue(
'transition_matrices' in computed,
'transition_matrices was not found in computed transition matrix.')
self.assertEqual(
triads_types,
computed['triads_types'],
'Triad types were different.')
np.testing.assert_array_equal(
transition_matrix,
computed['transition_matrices'][0],
'Transition matrices were different.')
# =========================================================================
# ====================== get_stationary_distribution ======================
# =========================================================================
def test_get_stationary_distribution_simple(self):
transition_matrix = np.array(
[[0, 0, 1],
[0, 0, 1],
[0, 0, 1]], dtype=float)
expected = np.array([0, 0, 1])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution_full_matrix(self):
transition_matrix = np.array(
[[0.6, 0.1, 0.3],
[0.1, 0.7, 0.2],
[0.2, 0.2, 0.6]], dtype=float)
expected = np.array([0.2759, 0.3448, 0.3793])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution_not_row_stochastic(self):
transition_matrix = np.array(
[[0, 0, 0],
[9, 0, 1],
[1, 0, 3]], dtype=float)
expected = np.array([0.3571, 0.1191, 0.5238])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0001)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution(self):
transition_matrix = np.array(
[[0, 0, 0],
[0.9, 0, 0.1],
[0.25, 0, 0.75]], dtype=float)
expected = np.array([0.3571, 0.1191, 0.5238])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0001)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
# =========================================================================
# ====================== get_mixing_time_range ============================
# =========================================================================
def test_get_mixing_time_range(self):
transition_matrix = np.array(
[[0, 0, 0],
[0.9, 0, 0.1],
[0.25, 0, 0.75]], dtype=float)
expected = 13.7081
computed = network_utils.get_mixing_time_range(
transition_matrix,
aperiodic_irreducible_eps=0.0001,
distance_from_stationary_eps=0.01)
self.assertEqual(np.round(expected, 4), np.round(computed, 4))
# =========================================================================
# ====================== _randomize_network ===============================
# =========================================================================
def test_randomize_network_with_unweighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2)
dg.add_edge(2, 1)
dg.add_edge(2, 3)
dg.add_edge(3, 1)
dg.add_edge(3, 4)
dg.add_edge(4, 5)
dg.add_edge(5, 4)
dg.add_edge(1, 6)
dg.add_edge(6, 1)
dg.add_edge(6, 5)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
def test_randomize_network_with_all_positive_weighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=2)
dg.add_edge(3, 4, weight=5)
dg.add_edge(4, 5, weight=9)
dg.add_edge(5, 4, weight=6)
dg.add_edge(1, 6, weight=9)
dg.add_edge(6, 1, weight=1)
dg.add_edge(6, 5, weight=16)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
def test_randomize_network_with_signed_weighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-2)
dg.add_edge(3, 4, weight=5)
dg.add_edge(4, 5, weight=9)
dg.add_edge(5, 4, weight=-6)
dg.add_edge(1, 6, weight=-9)
dg.add_edge(6, 1, weight=1)
dg.add_edge(6, 5, weight=-16)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
# =========================================================================
# ================== get_robustness_of_transitions ========================
# =========================================================================
def test_get_robustness_of_transitions(self):
transition_matrices = [
np.array(
[[0.9, 0.1, 0],
[0.6, 0.2, 0.2],
[0.7, 0.1, 0.2]]),
np.array(
[[0.1, 0.8, 0.1],
[0, 0.9, 0.1],
[0.1, 0.1, 0.8]])
]
# Expected dataframe.
columns = [
'Transitions',
'Matrix L2-Norm Dist. from Average',
'Matrix Pearson r-value',
'Matrix Pearson p-value',
'Stationary Dist. L2-Norm Dist. from Average',
'Stationary Dist. Pearson r-value',
'Stationary Dist. Pearson p-value']
expected_df = pd.DataFrame({
columns[0]: ['Period 1 to Period 2', 'Period 2 to Period 3'],
columns[1]: [0.8444, 0.8083],
columns[2]: [0.4256, 0.6522],
columns[3]: [0.2534, 0.0569],
columns[4]: [0.5833, 0.4404],
columns[5]: [0.4637, 0.1319],
columns[6]: [0.6930, 0.9156],
},
columns=columns)
expected_df = pd.DataFrame(
expected_df, columns=columns)
# Computed dataframe.
computed_df = network_utils.get_robustness_of_transitions(
transition_matrices, lnorm=2)
# Comparing computed with expected.
pd.testing.assert_frame_equal(
expected_df, computed_df, check_less_precise=2)
# =========================================================================
# ================== generate_converted_graphs ============================
# =========================================================================
def test_generate_converted_graphs_raises_when_wrong_percentage(self):
with self.assertRaises(ValueError):
network_utils.generate_converted_graphs(
dgraph=nx.DiGraph(),
percentage=-1)
def test_generate_converted_graphs_when_it_adds_edges(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(1, 3, weight=2)
dg.add_edge(2, 3, weight=5)
dg.add_edge(3, 1, weight=1)
percentage = 25
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=5)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain all dg's edges.
self.assertEqual(len(nx.difference(dg, computed).edges()), 0)
# It should contain percentage% more edges.
remaining_edges_count = 4 * 3 - 4
self.assertEqual(
len(nx.difference(computed, dg).edges()),
int(percentage*remaining_edges_count/100))
def test_generate_converted_graphs_when_all_edges_exist(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=2)
dg.add_edge(1, 3, weight=-5)
dg.add_edge(2, 3, weight=-2)
dg.add_edge(3, 1, weight=2)
dg.add_edge(4, 1, weight=2)
dg.add_edge(4, 3, weight=2)
percentage = 25
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=2,
convert_to=3,
percentage=percentage,
how_many_to_generate=2)
for computed in computed_graphs:
converted_cnt = 0
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain all dg's edges.
self.assertEqual(dg.edges(), computed.edges())
# Checking every edge weight.
for edge in dg.edges():
w1 = dg.get_edge_data(edge[0], edge[1])['weight']
w2 = computed.get_edge_data(edge[0], edge[1])['weight']
if w1 == w2:
continue
if w1 != w2 and w1 == 2 and w2 == 3 and converted_cnt == 0:
converted_cnt += 1
else:
self.assertTrue(
False, 'Found more converted edges than expeced.')
def test_generate_converted_graphs(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
percentage = 10
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=2)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain percentage extra edges.
self.assertEqual(
len(computed.edges()), int(4 * 3 * percentage / 100))
def test_generate_converted_graphs_for_large_networks(self):
n = 100
m = 300
dgraph = nx.gnm_random_graph(n=n, m=m, directed=True)
percentage = 5
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dgraph,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=6)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dgraph.nodes(), computed.nodes())
# It should contain percentage extra edges.
self.assertEqual(
len(computed.edges()), m + int(
(n * (n-1) - m) * percentage / 100))
if __name__ == '__main__':
unittest.main()
|
[
"network_utils.generate_converted_graphs",
"network_utils.is_fullyconnected_cartwright_harary_balance",
"numpy.testing.assert_array_almost_equal",
"numpy.round",
"network_utils.compute_vanderijt_edge_balance",
"network_utils.get_just_periods",
"numpy.unique",
"unittest.main",
"pandas.DataFrame",
"network_utils.generate_all_possible_sparse_triads",
"network_utils.is_sparsely_cartwright_harary_balanced",
"network_utils.extract_graph",
"network_utils.kunegis_sprase_balance_ratio",
"network_utils.get_robustness_of_transitions",
"numpy.testing.assert_almost_equal",
"network_utils.get_all_degrees",
"network_utils.count_different_signed_edges",
"re.sub",
"networkx.gnm_random_graph",
"pandas.testing.assert_frame_equal",
"network_utils.is_sparsely_transitive_balanced",
"network_utils.get_metrics_for_network",
"network_utils._detect_triad_type_for_all_subgraph3",
"numpy.testing.assert_array_equal",
"utils.graph_equals",
"network_utils.cartwright_harary_balance_ratio",
"datetime.datetime",
"parameterized.parameterized.expand",
"network_utils._get_all_triad_permutations",
"network_utils.is_sparsely_clustering_balanced",
"network_utils.is_fullyconnected_clustering_balanced",
"network_utils.compute_transition_matrix",
"network_utils.terzi_sprase_balance_ratio",
"networkx.DiGraph",
"network_utils.sprase_balance_ratio",
"network_utils.get_stationary_distribution",
"network_utils.compute_fairness_goodness",
"network_utils.is_fullyconnected_transitivity_balanced",
"network_utils.extract_graphs",
"numpy.zeros",
"network_utils.fullyconnected_balance_ratio",
"networkx.difference",
"numpy.array",
"network_utils._randomize_network",
"network_utils.get_mixing_time_range"
] |
[((865, 972), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['latest_multiple_edge_weight', False], ['sum_of_multiple_edge_weights', True]\n ]"], {}), "([['latest_multiple_edge_weight', False], [\n 'sum_of_multiple_edge_weights', True]])\n", (885, 972), False, 'from parameterized import parameterized\n'), ((2978, 3065), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['seperated graphs', False], ['accumulative graphs', True]]"], {}), "([['seperated graphs', False], ['accumulative graphs', \n True]])\n", (2998, 3065), False, 'from parameterized import parameterized\n'), ((10923, 11054), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['CartwrightHarary', 1, [0.3, 3, 7]], ['Clustering', 2, [0.5, 5, 5]], [\n 'Transitivity', 3, [0.9, 9, 1]]]"], {}), "([['CartwrightHarary', 1, [0.3, 3, 7]], ['Clustering', \n 2, [0.5, 5, 5]], ['Transitivity', 3, [0.9, 9, 1]]])\n", (10943, 11054), False, 'from parameterized import parameterized\n'), ((12682, 12805), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['Classical', 1, [0.4, 4, 6]], ['Clustering', 2, [0.7, 7, 3]], [\n 'Transitivity', 3, [0.8, 8, 2]]]"], {}), "([['Classical', 1, [0.4, 4, 6]], ['Clustering', 2, [0.7,\n 7, 3]], ['Transitivity', 3, [0.8, 8, 2]]])\n", (12702, 12805), False, 'from parameterized import parameterized\n'), ((65583, 65598), 'unittest.main', 'unittest.main', ([], {}), '()\n', (65596, 65598), False, 'import unittest\n'), ((467, 518), 'network_utils.generate_all_possible_sparse_triads', 'network_utils.generate_all_possible_sparse_triads', ([], {}), '()\n', (516, 518), False, 'import network_utils\n'), ((1539, 1618), 'pandas.DataFrame', 'pd.DataFrame', (['matrix_edges'], {'columns': "['source', 'target', 'weight', 'edge_date']"}), "(matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])\n", (1551, 1618), True, 'import pandas as pd\n'), ((1651, 1663), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1661, 1663), True, 'import networkx as nx\n'), ((2226, 2313), 'network_utils.extract_graph', 'network_utils.extract_graph', (['sample_edge_list'], {'sum_multiple_edge': 'sum_multiple_edge'}), '(sample_edge_list, sum_multiple_edge=\n sum_multiple_edge)\n', (2253, 2313), False, 'import network_utils\n'), ((2808, 2858), 'pandas.DataFrame', 'pd.DataFrame', (["{'source': [1, 2], 'target': [5, 6]}"], {}), "({'source': [1, 2], 'target': [5, 6]})\n", (2820, 2858), True, 'import pandas as pd\n'), ((3724, 3803), 'pandas.DataFrame', 'pd.DataFrame', (['matrix_edges'], {'columns': "['source', 'target', 'weight', 'edge_date']"}), "(matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])\n", (3736, 3803), True, 'import pandas as pd\n'), ((3830, 3842), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3840, 3842), True, 'import networkx as nx\n'), ((3965, 3977), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3975, 3977), True, 'import networkx as nx\n'), ((4138, 4150), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4148, 4150), True, 'import networkx as nx\n'), ((4499, 4595), 'network_utils.extract_graphs', 'network_utils.extract_graphs', ([], {'edge_list': 'sample_edge_list', 'weeks': '(4)', 'accumulative': 'accumulative'}), '(edge_list=sample_edge_list, weeks=4,\n accumulative=accumulative)\n', (4527, 4595), False, 'import network_utils\n'), ((5153, 5165), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (5163, 5165), True, 'import networkx as nx\n'), ((5521, 5554), 'network_utils.get_all_degrees', 'network_utils.get_all_degrees', (['dg'], {}), '(dg)\n', (5550, 5554), False, 'import network_utils\n'), ((6584, 6663), 'pandas.DataFrame', 'pd.DataFrame', (['matrix_edges'], {'columns': "['source', 'target', 'weight', 'edge_date']"}), "(matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])\n", (6596, 6663), True, 'import pandas as pd\n'), ((6776, 6853), 'network_utils.get_just_periods', 'network_utils.get_just_periods', (['sample_edge_list'], {'weeks': '(4)', 'accumulative': '(False)'}), '(sample_edge_list, weeks=4, accumulative=False)\n', (6806, 6853), False, 'import network_utils\n'), ((7210, 7222), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (7220, 7222), True, 'import networkx as nx\n'), ((7428, 7469), 'network_utils.get_metrics_for_network', 'network_utils.get_metrics_for_network', (['dg'], {}), '(dg)\n', (7465, 7469), False, 'import network_utils\n'), ((9028, 9040), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (9038, 9040), True, 'import networkx as nx\n'), ((9351, 9363), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (9361, 9363), True, 'import networkx as nx\n'), ((9672, 9684), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (9682, 9684), True, 'import networkx as nx\n'), ((9996, 10008), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10006, 10008), True, 'import networkx as nx\n'), ((11215, 11227), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (11225, 11227), True, 'import networkx as nx\n'), ((11586, 11658), 'network_utils.sprase_balance_ratio', 'network_utils.sprase_balance_ratio', ([], {'dgraph': 'dg', 'balance_type': 'balance_type'}), '(dgraph=dg, balance_type=balance_type)\n', (11620, 11658), False, 'import network_utils\n'), ((11692, 11766), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['computed', 'expected_values'], {'decimal': '(2)'}), '(computed, expected_values, decimal=2)\n', (11728, 11766), True, 'import numpy as np\n'), ((12975, 12987), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (12985, 12987), True, 'import networkx as nx\n'), ((13338, 13423), 'network_utils.fullyconnected_balance_ratio', 'network_utils.fullyconnected_balance_ratio', ([], {'dgraph': 'dg', 'balance_type': 'balance_type'}), '(dgraph=dg, balance_type=balance_type\n )\n', (13380, 13423), False, 'import network_utils\n'), ((13452, 13526), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['computed', 'expected_values'], {'decimal': '(2)'}), '(computed, expected_values, decimal=2)\n', (13488, 13526), True, 'import numpy as np\n'), ((13843, 13855), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (13853, 13855), True, 'import networkx as nx\n'), ((14179, 14191), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (14189, 14191), True, 'import networkx as nx\n'), ((14514, 14526), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (14524, 14526), True, 'import networkx as nx\n'), ((15106, 15118), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15116, 15118), True, 'import networkx as nx\n'), ((15305, 15366), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (15345, 15366), False, 'import network_utils\n'), ((15388, 15438), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (15418, 15438), True, 'import numpy as np\n'), ((15519, 15531), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15529, 15531), True, 'import networkx as nx\n'), ((15720, 15781), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (15760, 15781), False, 'import network_utils\n'), ((15803, 15853), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (15833, 15853), True, 'import numpy as np\n'), ((15930, 15942), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15940, 15942), True, 'import networkx as nx\n'), ((16130, 16191), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (16170, 16191), False, 'import network_utils\n'), ((16213, 16263), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (16243, 16263), True, 'import numpy as np\n'), ((16344, 16356), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (16354, 16356), True, 'import networkx as nx\n'), ((16735, 16796), 'network_utils.terzi_sprase_balance_ratio', 'network_utils.terzi_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (16775, 16796), False, 'import network_utils\n'), ((16818, 16868), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (16848, 16868), True, 'import numpy as np\n'), ((17191, 17203), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (17201, 17203), True, 'import networkx as nx\n'), ((17390, 17453), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (17432, 17453), False, 'import network_utils\n'), ((17475, 17525), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (17505, 17525), True, 'import numpy as np\n'), ((17608, 17620), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (17618, 17620), True, 'import networkx as nx\n'), ((17809, 17872), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (17851, 17872), False, 'import network_utils\n'), ((17894, 17944), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (17924, 17944), True, 'import numpy as np\n'), ((18023, 18035), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (18033, 18035), True, 'import networkx as nx\n'), ((18223, 18286), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (18265, 18286), False, 'import network_utils\n'), ((18308, 18358), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {}), '(computed, expected)\n', (18338, 18358), True, 'import numpy as np\n'), ((18441, 18453), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (18451, 18453), True, 'import networkx as nx\n'), ((18832, 18895), 'network_utils.kunegis_sprase_balance_ratio', 'network_utils.kunegis_sprase_balance_ratio', (['dg'], {'undirected': '(True)'}), '(dg, undirected=True)\n', (18874, 18895), False, 'import network_utils\n'), ((18917, 18978), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed', 'expected'], {'decimal': '(1)'}), '(computed, expected, decimal=1)\n', (18947, 18978), True, 'import numpy as np\n'), ((19296, 19308), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (19306, 19308), True, 'import networkx as nx\n'), ((19577, 19625), 'network_utils.compute_vanderijt_edge_balance', 'network_utils.compute_vanderijt_edge_balance', (['dg'], {}), '(dg)\n', (19621, 19625), False, 'import network_utils\n'), ((19758, 19770), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (19768, 19770), True, 'import networkx as nx\n'), ((20988, 21036), 'network_utils.compute_vanderijt_edge_balance', 'network_utils.compute_vanderijt_edge_balance', (['dg'], {}), '(dg)\n', (21032, 21036), False, 'import network_utils\n'), ((25020, 25032), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (25030, 25032), True, 'import networkx as nx\n'), ((25417, 25475), 'network_utils.compute_fairness_goodness', 'network_utils.compute_fairness_goodness', (['dg'], {'verbose': '(False)'}), '(dg, verbose=False)\n', (25456, 25475), False, 'import network_utils\n'), ((46930, 46973), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (46938, 46973), True, 'import numpy as np\n'), ((47328, 47387), 'network_utils._get_all_triad_permutations', 'network_utils._get_all_triad_permutations', (['triad_adj_matrix'], {}), '(triad_adj_matrix)\n', (47369, 47387), False, 'import network_utils\n'), ((47794, 47845), 'network_utils.generate_all_possible_sparse_triads', 'network_utils.generate_all_possible_sparse_triads', ([], {}), '()\n', (47843, 47845), False, 'import network_utils\n'), ((49732, 49744), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (49742, 49744), True, 'import networkx as nx\n'), ((50443, 50535), 'network_utils._detect_triad_type_for_all_subgraph3', 'network_utils._detect_triad_type_for_all_subgraph3', ([], {'dgraph': 'dg', 'triad_map': 'self.triad_map'}), '(dgraph=dg, triad_map=\n self.triad_map)\n', (50493, 50535), False, 'import network_utils\n'), ((50683, 50695), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (50693, 50695), True, 'import networkx as nx\n'), ((51133, 51225), 'network_utils._detect_triad_type_for_all_subgraph3', 'network_utils._detect_triad_type_for_all_subgraph3', ([], {'dgraph': 'dg', 'triad_map': 'self.triad_map'}), '(dgraph=dg, triad_map=\n self.triad_map)\n', (51183, 51225), False, 'import network_utils\n'), ((51369, 51381), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (51379, 51381), True, 'import networkx as nx\n'), ((51650, 51742), 'network_utils._detect_triad_type_for_all_subgraph3', 'network_utils._detect_triad_type_for_all_subgraph3', ([], {'dgraph': 'dg', 'triad_map': 'self.triad_map'}), '(dgraph=dg, triad_map=\n self.triad_map)\n', (51700, 51742), False, 'import network_utils\n'), ((52340, 52352), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (52350, 52352), True, 'import networkx as nx\n'), ((52594, 52606), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (52604, 52606), True, 'import networkx as nx\n'), ((53330, 53346), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (53338, 53346), True, 'import numpy as np\n'), ((53515, 53621), 'network_utils.compute_transition_matrix', 'network_utils.compute_transition_matrix', ([], {'dgraphs': 'dgraphs', 'unique_triad_num': 'n', 'triad_map': 'self.triad_map'}), '(dgraphs=dgraphs, unique_triad_num=n,\n triad_map=self.triad_map)\n', (53554, 53621), False, 'import network_utils\n'), ((54137, 54265), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['transition_matrix', "computed['transition_matrices'][0]", '"""Transition matrices were different."""'], {}), "(transition_matrix, computed[\n 'transition_matrices'][0], 'Transition matrices were different.')\n", (54166, 54265), True, 'import numpy as np\n'), ((54622, 54678), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 0, 1], [0, 0, 1]]'], {'dtype': 'float'}), '([[0, 0, 1], [0, 0, 1], [0, 0, 1]], dtype=float)\n', (54630, 54678), True, 'import numpy as np\n'), ((54737, 54756), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (54745, 54756), True, 'import numpy as np\n'), ((54776, 54871), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0)\n', (54817, 54871), False, 'import network_utils\n'), ((54889, 54956), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (54925, 54956), True, 'import numpy as np\n'), ((55046, 55120), 'numpy.array', 'np.array', (['[[0.6, 0.1, 0.3], [0.1, 0.7, 0.2], [0.2, 0.2, 0.6]]'], {'dtype': 'float'}), '([[0.6, 0.1, 0.3], [0.1, 0.7, 0.2], [0.2, 0.2, 0.6]], dtype=float)\n', (55054, 55120), True, 'import numpy as np\n'), ((55179, 55213), 'numpy.array', 'np.array', (['[0.2759, 0.3448, 0.3793]'], {}), '([0.2759, 0.3448, 0.3793])\n', (55187, 55213), True, 'import numpy as np\n'), ((55233, 55328), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0)\n', (55274, 55328), False, 'import network_utils\n'), ((55346, 55413), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (55382, 55413), True, 'import numpy as np\n'), ((55510, 55566), 'numpy.array', 'np.array', (['[[0, 0, 0], [9, 0, 1], [1, 0, 3]]'], {'dtype': 'float'}), '([[0, 0, 0], [9, 0, 1], [1, 0, 3]], dtype=float)\n', (55518, 55566), True, 'import numpy as np\n'), ((55625, 55659), 'numpy.array', 'np.array', (['[0.3571, 0.1191, 0.5238]'], {}), '([0.3571, 0.1191, 0.5238])\n', (55633, 55659), True, 'import numpy as np\n'), ((55679, 55777), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0001)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0001)\n', (55720, 55777), False, 'import network_utils\n'), ((55795, 55862), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (55831, 55862), True, 'import numpy as np\n'), ((55940, 56006), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]]'], {'dtype': 'float'}), '([[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]], dtype=float)\n', (55948, 56006), True, 'import numpy as np\n'), ((56065, 56099), 'numpy.array', 'np.array', (['[0.3571, 0.1191, 0.5238]'], {}), '([0.3571, 0.1191, 0.5238])\n', (56073, 56099), True, 'import numpy as np\n'), ((56119, 56217), 'network_utils.get_stationary_distribution', 'network_utils.get_stationary_distribution', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0001)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0001)\n', (56160, 56217), False, 'import network_utils\n'), ((56235, 56302), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'computed'], {'decimal': '(4)'}), '(expected, computed, decimal=4)\n', (56271, 56302), True, 'import numpy as np\n'), ((56614, 56680), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]]'], {'dtype': 'float'}), '([[0, 0, 0], [0.9, 0, 0.1], [0.25, 0, 0.75]], dtype=float)\n', (56622, 56680), True, 'import numpy as np\n'), ((56766, 56893), 'network_utils.get_mixing_time_range', 'network_utils.get_mixing_time_range', (['transition_matrix'], {'aperiodic_irreducible_eps': '(0.0001)', 'distance_from_stationary_eps': '(0.01)'}), '(transition_matrix,\n aperiodic_irreducible_eps=0.0001, distance_from_stationary_eps=0.01)\n', (56801, 56893), False, 'import network_utils\n'), ((57312, 57324), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (57322, 57324), True, 'import networkx as nx\n'), ((57650, 57710), 'network_utils._randomize_network', 'network_utils._randomize_network', (['dg'], {'switching_count_coef': '(2)'}), '(dg, switching_count_coef=2)\n', (57682, 57710), False, 'import network_utils\n'), ((58020, 58032), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (58030, 58032), True, 'import networkx as nx\n'), ((58459, 58519), 'network_utils._randomize_network', 'network_utils._randomize_network', (['dg'], {'switching_count_coef': '(2)'}), '(dg, switching_count_coef=2)\n', (58491, 58519), False, 'import network_utils\n'), ((58823, 58835), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (58833, 58835), True, 'import networkx as nx\n'), ((59266, 59326), 'network_utils._randomize_network', 'network_utils._randomize_network', (['dg'], {'switching_count_coef': '(2)'}), '(dg, switching_count_coef=2)\n', (59298, 59326), False, 'import network_utils\n'), ((60510, 60795), 'pandas.DataFrame', 'pd.DataFrame', (["{columns[0]: ['Period 1 to Period 2', 'Period 2 to Period 3'], columns[1]:\n [0.8444, 0.8083], columns[2]: [0.4256, 0.6522], columns[3]: [0.2534, \n 0.0569], columns[4]: [0.5833, 0.4404], columns[5]: [0.4637, 0.1319],\n columns[6]: [0.693, 0.9156]}"], {'columns': 'columns'}), "({columns[0]: ['Period 1 to Period 2', 'Period 2 to Period 3'],\n columns[1]: [0.8444, 0.8083], columns[2]: [0.4256, 0.6522], columns[3]:\n [0.2534, 0.0569], columns[4]: [0.5833, 0.4404], columns[5]: [0.4637, \n 0.1319], columns[6]: [0.693, 0.9156]}, columns=columns)\n", (60522, 60795), True, 'import pandas as pd\n'), ((60917, 60959), 'pandas.DataFrame', 'pd.DataFrame', (['expected_df'], {'columns': 'columns'}), '(expected_df, columns=columns)\n', (60929, 60959), True, 'import pandas as pd\n'), ((61025, 61098), 'network_utils.get_robustness_of_transitions', 'network_utils.get_robustness_of_transitions', (['transition_matrices'], {'lnorm': '(2)'}), '(transition_matrices, lnorm=2)\n', (61068, 61098), False, 'import network_utils\n'), ((61164, 61241), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['expected_df', 'computed_df'], {'check_less_precise': '(2)'}), '(expected_df, computed_df, check_less_precise=2)\n', (61193, 61241), True, 'import pandas as pd\n'), ((61815, 61827), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (61825, 61827), True, 'import networkx as nx\n'), ((62062, 62193), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dg', 'convert_from': '(0)', 'convert_to': '(1)', 'percentage': 'percentage', 'how_many_to_generate': '(5)'}), '(dgraph=dg, convert_from=0,\n convert_to=1, percentage=percentage, how_many_to_generate=5)\n', (62101, 62193), False, 'import network_utils\n'), ((62846, 62858), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (62856, 62858), True, 'import networkx as nx\n'), ((63167, 63298), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dg', 'convert_from': '(2)', 'convert_to': '(3)', 'percentage': 'percentage', 'how_many_to_generate': '(2)'}), '(dgraph=dg, convert_from=2,\n convert_to=3, percentage=percentage, how_many_to_generate=2)\n', (63206, 63298), False, 'import network_utils\n'), ((64219, 64231), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (64229, 64231), True, 'import networkx as nx\n'), ((64322, 64453), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dg', 'convert_from': '(0)', 'convert_to': '(1)', 'percentage': 'percentage', 'how_many_to_generate': '(2)'}), '(dgraph=dg, convert_from=0,\n convert_to=1, percentage=percentage, how_many_to_generate=2)\n', (64361, 64453), False, 'import network_utils\n'), ((64925, 64969), 'networkx.gnm_random_graph', 'nx.gnm_random_graph', ([], {'n': 'n', 'm': 'm', 'directed': '(True)'}), '(n=n, m=m, directed=True)\n', (64944, 64969), True, 'import networkx as nx\n'), ((65019, 65154), 'network_utils.generate_converted_graphs', 'network_utils.generate_converted_graphs', ([], {'dgraph': 'dgraph', 'convert_from': '(0)', 'convert_to': '(1)', 'percentage': 'percentage', 'how_many_to_generate': '(6)'}), '(dgraph=dgraph, convert_from=0,\n convert_to=1, percentage=percentage, how_many_to_generate=6)\n', (65058, 65154), False, 'import network_utils\n'), ((2359, 2426), 'utils.graph_equals', 'utils.graph_equals', (['expected', 'computed'], {'weight_column_name': '"""weight"""'}), "(expected, computed, weight_column_name='weight')\n", (2377, 2426), False, 'import utils\n'), ((2915, 2971), 'network_utils.extract_graphs', 'network_utils.extract_graphs', ([], {'edge_list': 'sample_edge_list'}), '(edge_list=sample_edge_list)\n', (2943, 2971), False, 'import network_utils\n'), ((9212, 9261), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (9257, 9261), False, 'import network_utils\n'), ((9537, 9586), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (9582, 9586), False, 'import network_utils\n'), ((9857, 9906), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (9902, 9906), False, 'import network_utils\n'), ((10383, 10432), 'network_utils.cartwright_harary_balance_ratio', 'network_utils.cartwright_harary_balance_ratio', (['dg'], {}), '(dg)\n', (10428, 10432), False, 'import network_utils\n'), ((12429, 12441), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (12439, 12441), True, 'import networkx as nx\n'), ((12573, 12642), 'network_utils.fullyconnected_balance_ratio', 'network_utils.fullyconnected_balance_ratio', ([], {'dgraph': 'dg', 'balance_type': '(1)'}), '(dgraph=dg, balance_type=1)\n', (12615, 12642), False, 'import network_utils\n'), ((14064, 14110), 'network_utils.count_different_signed_edges', 'network_utils.count_different_signed_edges', (['dg'], {}), '(dg)\n', (14106, 14110), False, 'import network_utils\n'), ((14399, 14445), 'network_utils.count_different_signed_edges', 'network_utils.count_different_signed_edges', (['dg'], {}), '(dg)\n', (14441, 14445), False, 'import network_utils\n'), ((14735, 14781), 'network_utils.count_different_signed_edges', 'network_utils.count_different_signed_edges', (['dg'], {}), '(dg)\n', (14777, 14781), False, 'import network_utils\n'), ((25920, 25963), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (25928, 25963), True, 'import numpy as np\n'), ((26027, 26094), 'network_utils.is_sparsely_transitive_balanced', 'network_utils.is_sparsely_transitive_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (26072, 26094), False, 'import network_utils\n'), ((28378, 28430), 'network_utils.is_sparsely_transitive_balanced', 'network_utils.is_sparsely_transitive_balanced', (['triad'], {}), '(triad)\n', (28423, 28430), False, 'import network_utils\n'), ((28877, 28920), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (28885, 28920), True, 'import numpy as np\n'), ((28984, 29058), 'network_utils.is_sparsely_cartwright_harary_balanced', 'network_utils.is_sparsely_cartwright_harary_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (29036, 29058), False, 'import network_utils\n'), ((31373, 31432), 'network_utils.is_sparsely_cartwright_harary_balanced', 'network_utils.is_sparsely_cartwright_harary_balanced', (['triad'], {}), '(triad)\n', (31425, 31432), False, 'import network_utils\n'), ((31859, 31902), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (31867, 31902), True, 'import numpy as np\n'), ((31966, 32033), 'network_utils.is_sparsely_clustering_balanced', 'network_utils.is_sparsely_clustering_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (32011, 32033), False, 'import network_utils\n'), ((34337, 34389), 'network_utils.is_sparsely_clustering_balanced', 'network_utils.is_sparsely_clustering_balanced', (['triad'], {}), '(triad)\n', (34382, 34389), False, 'import network_utils\n'), ((34840, 34883), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (34848, 34883), True, 'import numpy as np\n'), ((34947, 35026), 'network_utils.is_fullyconnected_cartwright_harary_balance', 'network_utils.is_fullyconnected_cartwright_harary_balance', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (35004, 35026), False, 'import network_utils\n'), ((35222, 35266), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [-1, 0, 0]])\n', (35230, 35266), True, 'import numpy as np\n'), ((35330, 35409), 'network_utils.is_fullyconnected_cartwright_harary_balance', 'network_utils.is_fullyconnected_cartwright_harary_balance', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (35387, 35409), False, 'import network_utils\n'), ((37352, 37416), 'network_utils.is_fullyconnected_cartwright_harary_balance', 'network_utils.is_fullyconnected_cartwright_harary_balance', (['triad'], {}), '(triad)\n', (37409, 37416), False, 'import network_utils\n'), ((37862, 37905), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (37870, 37905), True, 'import numpy as np\n'), ((37969, 38042), 'network_utils.is_fullyconnected_clustering_balanced', 'network_utils.is_fullyconnected_clustering_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (38020, 38042), False, 'import network_utils\n'), ((38232, 38276), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [-1, 0, 0]])\n', (38240, 38276), True, 'import numpy as np\n'), ((38340, 38413), 'network_utils.is_fullyconnected_clustering_balanced', 'network_utils.is_fullyconnected_clustering_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (38391, 38413), False, 'import network_utils\n'), ((40348, 40406), 'network_utils.is_fullyconnected_clustering_balanced', 'network_utils.is_fullyconnected_clustering_balanced', (['triad'], {}), '(triad)\n', (40399, 40406), False, 'import network_utils\n'), ((40854, 40897), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [0, 0, 0]])\n', (40862, 40897), True, 'import numpy as np\n'), ((40961, 41036), 'network_utils.is_fullyconnected_transitivity_balanced', 'network_utils.is_fullyconnected_transitivity_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (41014, 41036), False, 'import network_utils\n'), ((41228, 41272), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 1], [-1, 0, 0]])\n', (41236, 41272), True, 'import numpy as np\n'), ((41336, 41411), 'network_utils.is_fullyconnected_transitivity_balanced', 'network_utils.is_fullyconnected_transitivity_balanced', (['triad_with_self_loop'], {}), '(triad_with_self_loop)\n', (41389, 41411), False, 'import network_utils\n'), ((43343, 43403), 'network_utils.is_fullyconnected_transitivity_balanced', 'network_utils.is_fullyconnected_transitivity_balanced', (['triad'], {}), '(triad)\n', (43396, 43403), False, 'import network_utils\n'), ((48074, 48117), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (48082, 48117), True, 'import numpy as np\n'), ((48280, 48329), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (48288, 48329), True, 'import numpy as np\n'), ((48493, 48537), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, -1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, -1], [1, 0, 0]])\n', (48501, 48537), True, 'import numpy as np\n'), ((51839, 51865), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'key'], {}), "('[^\\\\w]', ' ', key)\n", (51845, 51865), False, 'import re\n'), ((56952, 56973), 'numpy.round', 'np.round', (['expected', '(4)'], {}), '(expected, 4)\n', (56960, 56973), True, 'import numpy as np\n'), ((56975, 56996), 'numpy.round', 'np.round', (['computed', '(4)'], {}), '(computed, 4)\n', (56983, 56996), True, 'import numpy as np\n'), ((59886, 59945), 'numpy.array', 'np.array', (['[[0.9, 0.1, 0], [0.6, 0.2, 0.2], [0.7, 0.1, 0.2]]'], {}), '([[0.9, 0.1, 0], [0.6, 0.2, 0.2], [0.7, 0.1, 0.2]])\n', (59894, 59945), True, 'import numpy as np\n'), ((60010, 60069), 'numpy.array', 'np.array', (['[[0.1, 0.8, 0.1], [0, 0.9, 0.1], [0.1, 0.1, 0.8]]'], {}), '([[0.1, 0.8, 0.1], [0, 0.9, 0.1], [0.1, 0.1, 0.8]])\n', (60018, 60069), True, 'import numpy as np\n'), ((1092, 1121), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (1109, 1121), False, 'import datetime\n'), ((1147, 1176), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(2)'], {}), '(2017, 1, 2)\n', (1164, 1176), False, 'import datetime\n'), ((1202, 1231), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(4)'], {}), '(2017, 1, 4)\n', (1219, 1231), False, 'import datetime\n'), ((1257, 1286), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(5)'], {}), '(2017, 2, 5)\n', (1274, 1286), False, 'import datetime\n'), ((1312, 1341), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(6)'], {}), '(2017, 1, 6)\n', (1329, 1341), False, 'import datetime\n'), ((1367, 1397), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(13)'], {}), '(2017, 2, 13)\n', (1384, 1397), False, 'import datetime\n'), ((1423, 1453), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(22)'], {}), '(2017, 2, 22)\n', (1440, 1453), False, 'import datetime\n'), ((1479, 1509), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(24)'], {}), '(2017, 2, 24)\n', (1496, 1509), False, 'import datetime\n'), ((3213, 3242), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (3230, 3242), False, 'import datetime\n'), ((3280, 3309), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(4)'], {}), '(2017, 1, 4)\n', (3297, 3309), False, 'import datetime\n'), ((3347, 3376), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(5)'], {}), '(2017, 2, 5)\n', (3364, 3376), False, 'import datetime\n'), ((3414, 3444), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(13)'], {}), '(2017, 2, 13)\n', (3431, 3444), False, 'import datetime\n'), ((3482, 3512), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(24)'], {}), '(2017, 2, 24)\n', (3499, 3512), False, 'import datetime\n'), ((3552, 3582), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(28)'], {}), '(2017, 2, 28)\n', (3569, 3582), False, 'import datetime\n'), ((4721, 4800), 'utils.graph_equals', 'utils.graph_equals', (['expected_graph', 'computed_graph'], {'weight_column_name': '"""weight"""'}), "(expected_graph, computed_graph, weight_column_name='weight')\n", (4739, 4800), False, 'import utils\n'), ((6185, 6214), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (6202, 6214), False, 'import datetime\n'), ((6252, 6281), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(4)'], {}), '(2017, 1, 4)\n', (6269, 6281), False, 'import datetime\n'), ((6319, 6348), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(5)'], {}), '(2017, 2, 5)\n', (6336, 6348), False, 'import datetime\n'), ((6386, 6416), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(13)'], {}), '(2017, 2, 13)\n', (6403, 6416), False, 'import datetime\n'), ((6454, 6484), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(24)'], {}), '(2017, 2, 24)\n', (6471, 6484), False, 'import datetime\n'), ((6524, 6554), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(2)', '(28)'], {}), '(2017, 2, 28)\n', (6541, 6554), False, 'import datetime\n'), ((26141, 26186), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [-1, -1, 0]])\n', (26149, 26186), True, 'import numpy as np\n'), ((26251, 26296), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, -1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, -1], [1, 1, 0]])\n', (26259, 26296), True, 'import numpy as np\n'), ((26362, 26408), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [1, -1, 0]])\n', (26370, 26408), True, 'import numpy as np\n'), ((26474, 26517), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (26482, 26517), True, 'import numpy as np\n'), ((26581, 26630), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (26589, 26630), True, 'import numpy as np\n'), ((26696, 26743), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [-1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [-1, -1, 0]])\n', (26704, 26743), True, 'import numpy as np\n'), ((26808, 26854), 'numpy.array', 'np.array', (['[[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]])\n', (26816, 26854), True, 'import numpy as np\n'), ((26919, 26966), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, 1], [-1, -1, 0]])\n', (26927, 26966), True, 'import numpy as np\n'), ((27037, 27082), 'numpy.array', 'np.array', (['[[0, 1, -1], [0, 0, -1], [0, 0, 0]]'], {}), '([[0, 1, -1], [0, 0, -1], [0, 0, 0]])\n', (27045, 27082), True, 'import numpy as np\n'), ((27151, 27195), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, -1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\n', (27159, 27195), True, 'import numpy as np\n'), ((27261, 27304), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (27269, 27304), True, 'import numpy as np\n'), ((27368, 27412), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, 1], [1, 1, 0]])\n', (27376, 27412), True, 'import numpy as np\n'), ((27478, 27521), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (27486, 27521), True, 'import numpy as np\n'), ((27587, 27630), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (27595, 27630), True, 'import numpy as np\n'), ((27695, 27738), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (27703, 27738), True, 'import numpy as np\n'), ((27806, 27851), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, -1, 0], [-1, 0, 0], [0, 0, 0]])\n', (27814, 27851), True, 'import numpy as np\n'), ((27922, 27966), 'numpy.array', 'np.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 0]])\n', (27930, 27966), True, 'import numpy as np\n'), ((28031, 28074), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (28039, 28074), True, 'import numpy as np\n'), ((28138, 28186), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (28146, 28186), True, 'import numpy as np\n'), ((29122, 29167), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [-1, -1, 0]])\n', (29130, 29167), True, 'import numpy as np\n'), ((29233, 29278), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, -1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, -1], [1, 1, 0]])\n', (29241, 29278), True, 'import numpy as np\n'), ((29345, 29391), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [1, -1, 0]])\n', (29353, 29391), True, 'import numpy as np\n'), ((29458, 29501), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (29466, 29501), True, 'import numpy as np\n'), ((29565, 29614), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (29573, 29614), True, 'import numpy as np\n'), ((29681, 29728), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [-1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [-1, -1, 0]])\n', (29689, 29728), True, 'import numpy as np\n'), ((29794, 29840), 'numpy.array', 'np.array', (['[[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]])\n', (29802, 29840), True, 'import numpy as np\n'), ((29906, 29953), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, 1], [-1, -1, 0]])\n', (29914, 29953), True, 'import numpy as np\n'), ((30024, 30069), 'numpy.array', 'np.array', (['[[0, 1, -1], [0, 0, -1], [0, 0, 0]]'], {}), '([[0, 1, -1], [0, 0, -1], [0, 0, 0]])\n', (30032, 30069), True, 'import numpy as np\n'), ((30138, 30182), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, -1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\n', (30146, 30182), True, 'import numpy as np\n'), ((30248, 30291), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (30256, 30291), True, 'import numpy as np\n'), ((30355, 30399), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, 1], [1, 1, 0]])\n', (30363, 30399), True, 'import numpy as np\n'), ((30465, 30508), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (30473, 30508), True, 'import numpy as np\n'), ((30574, 30617), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (30582, 30617), True, 'import numpy as np\n'), ((30682, 30725), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (30690, 30725), True, 'import numpy as np\n'), ((30793, 30838), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, -1, 0], [-1, 0, 0], [0, 0, 0]])\n', (30801, 30838), True, 'import numpy as np\n'), ((30909, 30953), 'numpy.array', 'np.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 0]])\n', (30917, 30953), True, 'import numpy as np\n'), ((31018, 31061), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (31026, 31061), True, 'import numpy as np\n'), ((31125, 31173), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (31133, 31173), True, 'import numpy as np\n'), ((32097, 32142), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [-1, -1, 0]])\n', (32105, 32142), True, 'import numpy as np\n'), ((32208, 32253), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, -1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, -1], [1, 1, 0]])\n', (32216, 32253), True, 'import numpy as np\n'), ((32320, 32366), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [1, -1, 0]])\n', (32328, 32366), True, 'import numpy as np\n'), ((32432, 32475), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (32440, 32475), True, 'import numpy as np\n'), ((32539, 32588), 'numpy.array', 'np.array', (['[[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (32547, 32588), True, 'import numpy as np\n'), ((32654, 32701), 'numpy.array', 'np.array', (['[[0, 0, -1], [-1, 0, 0], [-1, -1, 0]]'], {}), '([[0, 0, -1], [-1, 0, 0], [-1, -1, 0]])\n', (32662, 32701), True, 'import numpy as np\n'), ((32766, 32812), 'numpy.array', 'np.array', (['[[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]])\n', (32774, 32812), True, 'import numpy as np\n'), ((32878, 32925), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, 1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, 1], [-1, -1, 0]])\n', (32886, 32925), True, 'import numpy as np\n'), ((32996, 33041), 'numpy.array', 'np.array', (['[[0, 1, -1], [0, 0, -1], [0, 0, 0]]'], {}), '([[0, 1, -1], [0, 0, -1], [0, 0, 0]])\n', (33004, 33041), True, 'import numpy as np\n'), ((33110, 33154), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, -1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\n', (33118, 33154), True, 'import numpy as np\n'), ((33220, 33263), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (33228, 33263), True, 'import numpy as np\n'), ((33327, 33371), 'numpy.array', 'np.array', (['[[0, 1, -1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, -1], [1, 0, 1], [1, 1, 0]])\n', (33335, 33371), True, 'import numpy as np\n'), ((33437, 33480), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (33445, 33480), True, 'import numpy as np\n'), ((33546, 33589), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (33554, 33589), True, 'import numpy as np\n'), ((33654, 33697), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (33662, 33697), True, 'import numpy as np\n'), ((33765, 33810), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, -1, 0], [-1, 0, 0], [0, 0, 0]])\n', (33773, 33810), True, 'import numpy as np\n'), ((33881, 33925), 'numpy.array', 'np.array', (['[[0, 1, 0], [-1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [-1, 0, 0], [0, 0, 0]])\n', (33889, 33925), True, 'import numpy as np\n'), ((33990, 34033), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (33998, 34033), True, 'import numpy as np\n'), ((34097, 34145), 'numpy.array', 'np.array', (['[[0, 1, -1], [-1, 0, -1], [-1, -1, 0]]'], {}), '([[0, 1, -1], [-1, 0, -1], [-1, -1, 0]])\n', (34105, 34145), True, 'import numpy as np\n'), ((35472, 35515), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (35480, 35515), True, 'import numpy as np\n'), ((35579, 35622), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (35587, 35622), True, 'import numpy as np\n'), ((35686, 35729), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (35694, 35729), True, 'import numpy as np\n'), ((35795, 35838), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 1], [1, 0, 0]])\n', (35803, 35838), True, 'import numpy as np\n'), ((35904, 35947), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 0], [1, 1, 0]]'], {}), '([[0, 1, 1], [0, 0, 0], [1, 1, 0]])\n', (35912, 35947), True, 'import numpy as np\n'), ((36013, 36056), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (36021, 36056), True, 'import numpy as np\n'), ((36122, 36165), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (36130, 36165), True, 'import numpy as np\n'), ((36231, 36274), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (36239, 36274), True, 'import numpy as np\n'), ((36339, 36382), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (36347, 36382), True, 'import numpy as np\n'), ((36448, 36491), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (36456, 36491), True, 'import numpy as np\n'), ((36557, 36600), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (36565, 36600), True, 'import numpy as np\n'), ((36666, 36709), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 1, 0]])\n', (36674, 36709), True, 'import numpy as np\n'), ((36775, 36818), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (36783, 36818), True, 'import numpy as np\n'), ((36883, 36926), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (36891, 36926), True, 'import numpy as np\n'), ((36992, 37035), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 0, 0]])\n', (37000, 37035), True, 'import numpy as np\n'), ((37100, 37143), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (37108, 37143), True, 'import numpy as np\n'), ((38476, 38519), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (38484, 38519), True, 'import numpy as np\n'), ((38583, 38626), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (38591, 38626), True, 'import numpy as np\n'), ((38690, 38733), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (38698, 38733), True, 'import numpy as np\n'), ((38798, 38841), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 1], [1, 0, 0]])\n', (38806, 38841), True, 'import numpy as np\n'), ((38907, 38950), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 0], [1, 1, 0]]'], {}), '([[0, 1, 1], [0, 0, 0], [1, 1, 0]])\n', (38915, 38950), True, 'import numpy as np\n'), ((39016, 39059), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (39024, 39059), True, 'import numpy as np\n'), ((39125, 39168), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (39133, 39168), True, 'import numpy as np\n'), ((39234, 39277), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (39242, 39277), True, 'import numpy as np\n'), ((39342, 39385), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (39350, 39385), True, 'import numpy as np\n'), ((39450, 39493), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (39458, 39493), True, 'import numpy as np\n'), ((39559, 39602), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (39567, 39602), True, 'import numpy as np\n'), ((39668, 39711), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 1, 0]])\n', (39676, 39711), True, 'import numpy as np\n'), ((39777, 39820), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (39785, 39820), True, 'import numpy as np\n'), ((39885, 39928), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (39893, 39928), True, 'import numpy as np\n'), ((39994, 40037), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 0, 0]])\n', (40002, 40037), True, 'import numpy as np\n'), ((40102, 40145), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (40110, 40145), True, 'import numpy as np\n'), ((41474, 41517), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (41482, 41517), True, 'import numpy as np\n'), ((41581, 41624), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (41589, 41624), True, 'import numpy as np\n'), ((41688, 41731), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (41696, 41731), True, 'import numpy as np\n'), ((41796, 41839), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 1], [1, 0, 0]])\n', (41804, 41839), True, 'import numpy as np\n'), ((41904, 41947), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 0], [1, 1, 0]]'], {}), '([[0, 1, 1], [0, 0, 0], [1, 1, 0]])\n', (41912, 41947), True, 'import numpy as np\n'), ((42012, 42055), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (42020, 42055), True, 'import numpy as np\n'), ((42120, 42163), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (42128, 42163), True, 'import numpy as np\n'), ((42228, 42271), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (42236, 42271), True, 'import numpy as np\n'), ((42335, 42378), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (42343, 42378), True, 'import numpy as np\n'), ((42443, 42486), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (42451, 42486), True, 'import numpy as np\n'), ((42552, 42595), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (42560, 42595), True, 'import numpy as np\n'), ((42661, 42704), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 1, 0]])\n', (42669, 42704), True, 'import numpy as np\n'), ((42770, 42813), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (42778, 42813), True, 'import numpy as np\n'), ((42878, 42921), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (42886, 42921), True, 'import numpy as np\n'), ((42987, 43030), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 0, 0]])\n', (42995, 43030), True, 'import numpy as np\n'), ((43095, 43138), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (43103, 43138), True, 'import numpy as np\n'), ((52011, 52036), 'numpy.unique', 'np.unique', (['truncated_keys'], {}), '(truncated_keys)\n', (52020, 52036), True, 'import numpy as np\n'), ((10871, 10883), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10881, 10883), True, 'import networkx as nx\n'), ((12241, 12253), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (12251, 12253), True, 'import networkx as nx\n'), ((61691, 61703), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (61701, 61703), True, 'import networkx as nx\n'), ((62475, 62502), 'networkx.difference', 'nx.difference', (['dg', 'computed'], {}), '(dg, computed)\n', (62488, 62502), True, 'import networkx as nx\n'), ((62668, 62695), 'networkx.difference', 'nx.difference', (['computed', 'dg'], {}), '(computed, dg)\n', (62681, 62695), True, 'import networkx as nx\n')]
|
import numpy as np
n = 300
serial = int(input())
grid = np.array([[int(str(((x+10)*y+serial)*(x+10))[-3])-5 for y in range(1, n+1)] for x in range(1, n+1)])
coord = (0, 0)
mVal, dim = 0, 0
for d in range(4, 2, -1):
squares = sum(grid[x:x-d+1 or None, y:y-d+1 or None] for x in range(d) for y in range(d))
val = int(squares.max())
if mVal < val:
coord = np.where(squares == val)
mVal = val
dim = d
x,y = coord[0][0], coord[1][0]
print(f'({x+1}, {y+1}) X {dim} = {mVal}')
|
[
"numpy.where"
] |
[((374, 398), 'numpy.where', 'np.where', (['(squares == val)'], {}), '(squares == val)\n', (382, 398), True, 'import numpy as np\n')]
|
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
def rmovie_basicvar(cdf,
var = 'tg1',
Mm = False,
km = False,
savefig = False,
figname = 'radynvar.html',
color = 'steelblue'):
'''
A function to produce an animated figure of RADYN variables.
This version is pre-constructed and lets you just input the
variable you want to plot. Other variables (such as populations)
will require more input, and are separate functions.
Turns the output into a pandas dataframe, which is then passed to
plotly express to create the animated figure
Parameters
__________
cdf : The radyn cdf object
var : str
The variable to plot (default = 'tg1')
Mm : Boolean
Plot height in Mm (default = False)
km : Boolean
Plot height in km (default = False)
savefig : Boolean
Save the figure (html file)
figname : str
Filename, if saving the output
NOTES :
So far, allowed variables are
tg1 - temperature
ne1 - electron density
bheat1 - beam heating rate
d1 - mass density
vz1 - velocity
np - proton density
<NAME>, March 2021
'''
########################################################################
# Some preliminary set up
########################################################################
if Mm == True:
xtitle = 'Height [Mm]'
height = cdf.z1/1e8
elif km == True:
xtitle = 'Height [km]'
height = cdf.z1/1e5
else:
xtitle = 'Height [cm]'
height = cdf.z1
if var == 'tg1':
rvar = cdf.tg1
ytitle = 'Temperature [K]'
ylog = True
xlog = False
elif var == 'ne1':
rvar = cdf.ne1
ytitle = 'Electron Density [cm<sup>-3</sup>]'
ylog = True
xlog = False
elif var == 'bheat1':
rvar = cdf.bheat1
ytitle = 'Q<sub>beam</sub> [erg cm<sup>-3</sup> s<sup>-1</sup>]'
ylog = False
xlog = False
elif var == 'd1':
rvar = cdf.d1
ytitle = 'Mass Density [g cm<sup>-3</sup>]'
ylog = True
xlog = False
elif var == 'vz1':
rvar = cdf.vz1/1e5
ytitle = 'Velocity [km s<sup>-1</sup>]'
ylog = False
xlog = False
elif var == 'np':
rvar = cdf.n1[:,:,5,0]
ytitle = 'Proton Density [cm<sup>-3</sup>]'
ylog = True
xlog = False
template = dict(
layout = go.Layout(font = dict(family = "Rockwell", size = 16),
title_font = dict(family = "Rockwell", size = 20),
plot_bgcolor = 'white',
paper_bgcolor = 'white',
xaxis = dict(
showexponent = 'all',
exponentformat = 'e',
tickangle = 0,
linewidth = 3,
showgrid = True,
),
yaxis = dict(
showexponent = 'all',
exponentformat = 'e',
linewidth = 3,
showgrid = True,
anchor = 'free',
position = 0,
domain = [0.0,1]
),
coloraxis_colorbar = dict(
thickness = 15,
tickformat = '0.2f',
ticks = 'outside',
titleside = 'right'
)
))
########################################################################
# Build the dataframe
########################################################################
col1 = ytitle
col2 = xtitle
time = 'Time [s]'
timeind = 'Time index'
df_list = []
for i in range(len(cdf.time)):
data = {col1:rvar[i,:],
col2:height[i,:],
time: cdf.time[i],
timeind: i
}
df_list.append(pd.DataFrame(data))
df = pd.concat(df_list)
########################################################################
# Plot the variable
########################################################################
h1 = 700
w1 = 700
fig1 = px.line(df,
x = df.columns[1], y = df.columns[0],
# animation_group = 'Time [s]',
animation_frame = 'Time [s]',
log_x = xlog,
log_y = ylog,
template = template,
color_discrete_sequence = [color])
fig1.show()
if savefig == True:
fig1.write_html(figname)
return df
def rmovie(var1, var2,
time = [-10.0],
savefig = False,
figname = 'radynvar.html',
xtitle = 'Var 1',
ytitle = 'Var 2',
title = ' ',
color = 'steelblue',
xlog = False, ylog = False):
'''
A function to produce an animated figure of RADYN variables.
This version is 'dumb' and just plots col1 vs col2 without any
axes labels, unless passed through the function fall.
Variables must be input as [time, dim1]
Turns the output into a pandas dataframe, which is then passed to
plotly express to create the animated figure
Parameters
__________
var1 : float
The variable to plot on the x-axis [time, dim1]
var2 : float
The variable to plot on the y-axis [time, dim1]
xtitle : str
The xaxis label (default "Var 1")
ytitle : str
The xaxis label (default "Var 2")
title : str
A plot title (default " ")
savefig : Boolean
Save the figure (html file)
figname : str
Filename, if saving the output
xlog : boolean
Default is false. Set to True to have log x-axis
ylog : boolean
Default is false. Set to True to have log y-axis
NOTES :
<NAME>, March 2021
'''
########################################################################
# Some preliminary set up
########################################################################
if time[0] == -10:
time = np.arange(0,var1.shape[0])
col3 = 'Time [index]'
else:
col3 = 'Time [s]'
template = dict(
layout = go.Layout(font = dict(family = "Rockwell", size = 16),
title_font = dict(family = "Rockwell", size = 20),
plot_bgcolor = 'white',
paper_bgcolor = 'white',
xaxis = dict(
showexponent = 'all',
exponentformat = 'e',
tickangle = 0,
linewidth = 3,
showgrid = True,
),
yaxis = dict(
showexponent = 'all',
exponentformat = 'e',
linewidth = 3,
showgrid = True,
anchor = 'free',
position = 0,
domain = [0.0,1]
),
coloraxis_colorbar = dict(
thickness = 15,
tickformat = '0.2f',
ticks = 'outside',
titleside = 'right'
)
))
########################################################################
# Build the dataframe
########################################################################
col1 = xtitle
col2 = ytitle
df_list = []
for i in range(len(time)):
data = {col1:var1[i,:],
col2:var2[i,:],
col3: time[i],
}
df_list.append(pd.DataFrame(data))
df = pd.concat(df_list)
########################################################################
# Plot the variable
########################################################################
h1 = 700
w1 = 700
fig1 = px.line(df,
x = df.columns[0], y = df.columns[1],
# animation_group = 'Time [s]',
animation_frame = df.columns[2],
log_x = xlog,
log_y = ylog,
title = title,
color_discrete_sequence = [color],
template = template)
fig1.show()
if savefig == True:
fig1.write_html(figname)
return df
|
[
"pandas.DataFrame",
"pandas.concat",
"numpy.arange",
"plotly.express.line"
] |
[((4737, 4890), 'plotly.express.line', 'px.line', (['df'], {'x': 'df.columns[1]', 'y': 'df.columns[0]', 'animation_frame': '"""Time [s]"""', 'log_x': 'xlog', 'log_y': 'ylog', 'template': 'template', 'color_discrete_sequence': '[color]'}), "(df, x=df.columns[1], y=df.columns[0], animation_frame='Time [s]',\n log_x=xlog, log_y=ylog, template=template, color_discrete_sequence=[color])\n", (4744, 4890), True, 'import plotly.express as px\n'), ((8830, 9003), 'plotly.express.line', 'px.line', (['df'], {'x': 'df.columns[0]', 'y': 'df.columns[1]', 'animation_frame': 'df.columns[2]', 'log_x': 'xlog', 'log_y': 'ylog', 'title': 'title', 'color_discrete_sequence': '[color]', 'template': 'template'}), '(df, x=df.columns[0], y=df.columns[1], animation_frame=df.columns[2],\n log_x=xlog, log_y=ylog, title=title, color_discrete_sequence=[color],\n template=template)\n', (8837, 9003), True, 'import plotly.express as px\n'), ((4498, 4516), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (4507, 4516), True, 'import pandas as pd\n'), ((6793, 6820), 'numpy.arange', 'np.arange', (['(0)', 'var1.shape[0]'], {}), '(0, var1.shape[0])\n', (6802, 6820), True, 'import numpy as np\n'), ((8590, 8608), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (8599, 8608), True, 'import pandas as pd\n'), ((4460, 4478), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4472, 4478), True, 'import pandas as pd\n'), ((8552, 8570), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (8564, 8570), True, 'import pandas as pd\n')]
|
import numpy as np
from numba import njit
from snapshot_functions import read_particles_filter
from scipy.linalg import eigh
def run(argv):
if len(argv) < 5:
print('python script.py <IC-file> <preIC-file> <ID> <radius>')
return 1
ID = int(argv[3])
r = float(argv[4])
print('getting IDs of nearby particles')
pos, header = read_particles_filter(argv[2],ID_list=[ID],opts={'pos':True})
IDs, header = read_particles_filter(argv[2],center=pos[0],radius=r,opts={'ID':True})
print('reading positions of %d particles'%len(IDs))
pos0, ID0, header = read_particles_filter(argv[2],ID_list=IDs,opts={'pos':True,'ID':True})
sort0 = np.argsort(ID0)
ID0 = ID0[sort0]
pos0 = pos0[sort0] - pos
pos1, ID1, header = read_particles_filter(argv[1],ID_list=IDs,opts={'pos':True,'ID':True})
sort1 = np.argsort(ID1)
ID1 = ID1[sort1]
pos1 = pos1[sort1] - pos
if not np.array_equal(ID0,ID1):
print('Error')
print(np.stack((ID0,ID1)).T.tolist())
return
rot = np.diag((1,1,1))
for i in range(2):
if i > 0:
eigval, eigvec = eigh(e)
rot1 = eigvec.T
print('rotate by %.0f degrees'%(np.arccos((np.trace(rot1)-1)/2)*180./np.pi))
pos = (rot1 @ (pos.T)).T
pos0 = (rot1 @ (pos0.T)).T
pos1 = (rot1 @ (pos1.T)).T
rot = rot1 @ rot
disp = pos1 - pos0
e = np.zeros((3,3))
for c in range(3):
dist2 = np.zeros(pos0.shape[0])
for d in range(3):
if d != c: dist2 += pos0[:,d]**2
idx = np.argsort(dist2)[:32]
for d in range(3):
e[c,d] = np.polyfit(pos0[idx,c],disp[idx,d],1)[0]
e = .5*(e + e.T)
with np.printoptions(precision=5, suppress=True):
print('Tidal tensor:')
print(e)
with np.printoptions(precision=5, suppress=True):
print('rotation matrix (%.0f degrees)'%(np.arccos((np.trace(rot)-1)/2)*180./np.pi))
print(rot)
np.savetxt('rotation_%d.txt'%ID,rot)
if __name__ == '__main__':
from sys import argv
run(argv)
|
[
"numpy.stack",
"numpy.trace",
"numpy.polyfit",
"numpy.savetxt",
"numpy.zeros",
"numpy.argsort",
"scipy.linalg.eigh",
"snapshot_functions.read_particles_filter",
"numpy.array_equal",
"numpy.diag",
"numpy.printoptions"
] |
[((361, 425), 'snapshot_functions.read_particles_filter', 'read_particles_filter', (['argv[2]'], {'ID_list': '[ID]', 'opts': "{'pos': True}"}), "(argv[2], ID_list=[ID], opts={'pos': True})\n", (382, 425), False, 'from snapshot_functions import read_particles_filter\n'), ((440, 514), 'snapshot_functions.read_particles_filter', 'read_particles_filter', (['argv[2]'], {'center': 'pos[0]', 'radius': 'r', 'opts': "{'ID': True}"}), "(argv[2], center=pos[0], radius=r, opts={'ID': True})\n", (461, 514), False, 'from snapshot_functions import read_particles_filter\n'), ((591, 666), 'snapshot_functions.read_particles_filter', 'read_particles_filter', (['argv[2]'], {'ID_list': 'IDs', 'opts': "{'pos': True, 'ID': True}"}), "(argv[2], ID_list=IDs, opts={'pos': True, 'ID': True})\n", (612, 666), False, 'from snapshot_functions import read_particles_filter\n'), ((673, 688), 'numpy.argsort', 'np.argsort', (['ID0'], {}), '(ID0)\n', (683, 688), True, 'import numpy as np\n'), ((762, 837), 'snapshot_functions.read_particles_filter', 'read_particles_filter', (['argv[1]'], {'ID_list': 'IDs', 'opts': "{'pos': True, 'ID': True}"}), "(argv[1], ID_list=IDs, opts={'pos': True, 'ID': True})\n", (783, 837), False, 'from snapshot_functions import read_particles_filter\n'), ((844, 859), 'numpy.argsort', 'np.argsort', (['ID1'], {}), '(ID1)\n', (854, 859), True, 'import numpy as np\n'), ((1031, 1049), 'numpy.diag', 'np.diag', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (1038, 1049), True, 'import numpy as np\n'), ((1955, 1994), 'numpy.savetxt', 'np.savetxt', (["('rotation_%d.txt' % ID)", 'rot'], {}), "('rotation_%d.txt' % ID, rot)\n", (1965, 1994), True, 'import numpy as np\n'), ((920, 944), 'numpy.array_equal', 'np.array_equal', (['ID0', 'ID1'], {}), '(ID0, ID1)\n', (934, 944), True, 'import numpy as np\n'), ((1397, 1413), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1405, 1413), True, 'import numpy as np\n'), ((1800, 1843), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(5)', 'suppress': '(True)'}), '(precision=5, suppress=True)\n', (1815, 1843), True, 'import numpy as np\n'), ((1111, 1118), 'scipy.linalg.eigh', 'eigh', (['e'], {}), '(e)\n', (1115, 1118), False, 'from scipy.linalg import eigh\n'), ((1452, 1475), 'numpy.zeros', 'np.zeros', (['pos0.shape[0]'], {}), '(pos0.shape[0])\n', (1460, 1475), True, 'import numpy as np\n'), ((1699, 1742), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(5)', 'suppress': '(True)'}), '(precision=5, suppress=True)\n', (1714, 1742), True, 'import numpy as np\n'), ((1557, 1574), 'numpy.argsort', 'np.argsort', (['dist2'], {}), '(dist2)\n', (1567, 1574), True, 'import numpy as np\n'), ((1624, 1665), 'numpy.polyfit', 'np.polyfit', (['pos0[idx, c]', 'disp[idx, d]', '(1)'], {}), '(pos0[idx, c], disp[idx, d], 1)\n', (1634, 1665), True, 'import numpy as np\n'), ((976, 996), 'numpy.stack', 'np.stack', (['(ID0, ID1)'], {}), '((ID0, ID1))\n', (984, 996), True, 'import numpy as np\n'), ((1901, 1914), 'numpy.trace', 'np.trace', (['rot'], {}), '(rot)\n', (1909, 1914), True, 'import numpy as np\n'), ((1194, 1208), 'numpy.trace', 'np.trace', (['rot1'], {}), '(rot1)\n', (1202, 1208), True, 'import numpy as np\n')]
|
from collections import deque
import numpy as np
import cv2
import chainer
from chainer import links as L
import chainerrl
from chainerrl import agents
from chainerrl.action_value import DiscreteActionValue
from chainerrl import explorers
from chainerrl import links
from chainerrl import replay_buffer
def infer(agent, state):
gray_state = [cv2.cvtColor(s, cv2.COLOR_RGB2GRAY) for s in state]
dqn_state = [cv2.resize(s, (84, 84), interpolation=cv2.INTER_AREA) \
for s in gray_state]
input_tensor = np.array(dqn_state).astype(np.float32)
return agent.act(input_tensor)
class Agent(object):
def __init__(self,
modelpath,
n_actions=4,
n_stack_frames=4):
# Predefined parameters.
replay_start_size = 5 * 10 ** 4
# Load the model.
q_func = links.Sequence(
links.NatureDQNHead(),
L.Linear(512, n_actions),
DiscreteActionValue)
opt = chainer.optimizers.RMSpropGraves(
lr=2.5e-4, alpha=0.95, momentum=0.0, eps=1e-2)
opt.setup(q_func)
rbuf = replay_buffer.ReplayBuffer(10 ** 6)
explorer = explorers.LinearDecayEpsilonGreedy(
start_epsilon=1.0, end_epsilon=0.1,
decay_steps=10 ** 6,
random_action_func=lambda: np.random.randint(n_actions))
def phi(x):
# Feature extractor
return np.asarray(x, dtype=np.float32) / 255
Agent = agents.DQN
self._agent = Agent(q_func, opt, rbuf, gpu=-1, gamma=0.99,
explorer=explorer, replay_start_size=replay_start_size,
target_update_interval=10 ** 4,
clip_delta=True,
update_interval=4,
batch_accumulator='sum',
phi=phi)
self._agent.load(modelpath)
self._state = deque(
[], maxlen=n_stack_frames)
self._action = 0
def get_action(self):
return self._action
def put_state(self, state):
# Note: should devide this code to 2 parts:
# putting state part and do inference part...
self._state.append(state)
if len(self._state) < self._state.maxlen:
# Need to wait.
return
state = list(self._state)
self._action = infer(self._agent, state)
|
[
"cv2.resize",
"chainerrl.links.NatureDQNHead",
"chainerrl.replay_buffer.ReplayBuffer",
"chainer.optimizers.RMSpropGraves",
"cv2.cvtColor",
"numpy.asarray",
"numpy.random.randint",
"numpy.array",
"collections.deque",
"chainer.links.Linear"
] |
[((353, 388), 'cv2.cvtColor', 'cv2.cvtColor', (['s', 'cv2.COLOR_RGB2GRAY'], {}), '(s, cv2.COLOR_RGB2GRAY)\n', (365, 388), False, 'import cv2\n'), ((422, 475), 'cv2.resize', 'cv2.resize', (['s', '(84, 84)'], {'interpolation': 'cv2.INTER_AREA'}), '(s, (84, 84), interpolation=cv2.INTER_AREA)\n', (432, 475), False, 'import cv2\n'), ((1002, 1087), 'chainer.optimizers.RMSpropGraves', 'chainer.optimizers.RMSpropGraves', ([], {'lr': '(0.00025)', 'alpha': '(0.95)', 'momentum': '(0.0)', 'eps': '(0.01)'}), '(lr=0.00025, alpha=0.95, momentum=0.0, eps=0.01\n )\n', (1034, 1087), False, 'import chainer\n'), ((1136, 1171), 'chainerrl.replay_buffer.ReplayBuffer', 'replay_buffer.ReplayBuffer', (['(10 ** 6)'], {}), '(10 ** 6)\n', (1162, 1171), False, 'from chainerrl import replay_buffer\n'), ((1930, 1962), 'collections.deque', 'deque', (['[]'], {'maxlen': 'n_stack_frames'}), '([], maxlen=n_stack_frames)\n', (1935, 1962), False, 'from collections import deque\n'), ((535, 554), 'numpy.array', 'np.array', (['dqn_state'], {}), '(dqn_state)\n', (543, 554), True, 'import numpy as np\n'), ((894, 915), 'chainerrl.links.NatureDQNHead', 'links.NatureDQNHead', ([], {}), '()\n', (913, 915), False, 'from chainerrl import links\n'), ((929, 953), 'chainer.links.Linear', 'L.Linear', (['(512)', 'n_actions'], {}), '(512, n_actions)\n', (937, 953), True, 'from chainer import links as L\n'), ((1448, 1479), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (1458, 1479), True, 'import numpy as np\n'), ((1347, 1375), 'numpy.random.randint', 'np.random.randint', (['n_actions'], {}), '(n_actions)\n', (1364, 1375), True, 'import numpy as np\n')]
|
import pytest
import time
import numpy as np
from spotify_confidence.analysis.frequentist.confidence_computers.z_test_computer import sequential_bounds
@pytest.mark.skip(reason="Skipping because this test is very slow")
def test_many_days():
"""
This input (based on a real experiment) is very long, which can cause slow calculation
"""
t = [
0.0016169976338740648,
0.0057857955498163615,
0.012200379088315757,
0.020199591701142824,
0.02956441064038571,
0.04047102718841871,
0.052929825413405296,
0.06580092295219643,
0.07878439818310792,
0.09148496950057272,
0.1028893343050959,
0.1128434997940756,
0.12298934256730025,
0.13280979910049193,
0.14267997977787195,
0.15281963941289514,
0.16293176212095561,
0.17198778455162406,
0.17996747917082068,
0.18786110540725684,
0.1955669737257397,
0.20335013690301407,
0.21277055903588274,
0.22148328777708232,
0.2295912740670489,
0.23640586948077766,
0.2431234831038822,
0.24987292468428604,
0.2568336065927525,
0.2649271880853427,
0.27282722271091664,
0.2799894816822785,
0.2862801096305317,
0.2925685639072496,
0.2988294699944579,
0.3051314956400879,
0.3118994077972684,
0.31887303037202536,
0.32523581745772245,
0.3307398353487736,
0.33616198578702633,
0.34151324975562525,
0.3478405485563082,
0.3546238566149848,
0.36130761502236336,
0.36751189302418574,
0.3730571543616735,
0.37865278180851814,
0.38428987795273567,
0.3900127609160433,
0.3964718089893684,
0.40306122104207753,
0.40914555292031984,
0.41449831480764515,
0.4198849769608837,
0.4256404199470336,
0.4315384355133149,
0.43801594290086987,
0.4444516211895538,
0.45034373518130405,
0.4556807858158224,
0.4610488197166289,
0.46633036852044285,
0.4717294082126311,
0.47769497653470894,
0.48369759863580825,
0.4892945325380834,
0.49431792124380325,
0.49935417177798586,
0.5043009639028166,
0.5093262559789482,
0.5149098888134348,
0.5205835093969735,
0.5261172491490695,
0.5310141031413226,
0.5359027242118537,
0.540068909216935,
0.5451620919252675,
0.5506752550043325,
0.5562355968920056,
0.5614758121490083,
0.5660462437469214,
0.5706616804819072,
0.5750453002157994,
0.5795939049979849,
0.5861802311128667,
0.5913273051077091,
0.5958976691303413,
0.6001503392324151,
0.6042404457337608,
0.6082963816680697,
0.6124734913435614,
0.6174918231657613,
0.6223867287374153,
0.6268875352709179,
0.6308341907134806,
0.6348490070893678,
0.6388763812049537,
0.6430405276890614,
0.6476616520101889,
0.6525750168960728,
0.6570689758011117,
0.6610427627189518,
0.6649727383296814,
0.6689671694958335,
0.673019050913289,
0.6776959248411508,
0.6825336054124376,
0.6869984168463193,
0.6908780826604262,
0.6949984065748767,
0.6991746490342636,
0.7033415661048878,
0.7082721626873987,
0.7131064081819068,
0.7176506656210218,
0.7216193168175142,
0.7256178250256133,
0.7296113326629264,
0.733677461202103,
0.7383860054116087,
0.7431864069529378,
0.7475115177561259,
0.7513220765829758,
0.7551652404828552,
0.7591154774153049,
0.7635879699061145,
0.76888963361854,
0.7740750002725536,
0.7788235152607059,
0.7829338267710377,
0.7870690059847372,
0.7912444713283939,
0.7954864645360872,
0.8002680350991415,
0.8051864906561857,
0.8097254772233912,
0.8137210008565843,
0.8175460095309978,
0.8214444612731922,
0.8256005212486867,
0.8302889054993935,
0.8351108860804202,
0.839542135124793,
0.8433705788759852,
0.8472835029908369,
0.8513248314019267,
0.8556693700983707,
0.8606610209471658,
0.865499591259651,
0.8699232042972833,
0.8737653545679493,
0.8776996212090155,
0.8816179062961511,
0.8856027192473231,
0.8900849425785808,
0.8947120585746139,
0.8993599427069738,
0.9035026227768521,
0.9075820073336299,
0.9115699850604569,
0.9158137239629064,
0.9207252417911126,
0.925749689176233,
0.9303560370359392,
0.9343408161994707,
0.9384800274049299,
0.9426168396879175,
0.9475247422385961,
0.9523909621035122,
0.9573336433987555,
0.9618665256655873,
0.9657568345864344,
0.9697355995499667,
0.973736889607129,
0.9778353641807583,
0.9828378833872299,
0.987703190985854,
0.9921586319807856,
0.9960384779956415,
1.0,
]
start_time = time.time()
results = sequential_bounds(np.array(t), alpha=0.003333333, sides=2)
my_bounds = results.bounds
expected = np.array(
[
5.75400023,
8.0,
5.14701605,
4.91478643,
4.80691346,
4.69004328,
4.57921075,
4.49683943,
4.44452939,
4.38899083,
4.35683792,
4.33289847,
4.301461,
4.27383028,
4.24513591,
4.21444005,
4.18809224,
4.17037988,
4.15702106,
4.13796352,
4.12345883,
4.10808648,
4.07898394,
4.06169498,
4.04985422,
4.04453139,
4.03288177,
4.02205301,
4.00664024,
3.98770613,
3.97358123,
3.96589571,
3.95946059,
3.94995533,
3.94128534,
3.93114789,
3.91870273,
3.90749163,
3.90064315,
3.8958719,
3.88847126,
3.88184277,
3.86841705,
3.85642932,
3.84721152,
3.84099201,
3.83689676,
3.8295672,
3.82234648,
3.81501541,
3.80286989,
3.79370807,
3.78728177,
3.78449351,
3.77865864,
3.76988501,
3.76230126,
3.75251025,
3.74474277,
3.73953663,
3.73534961,
3.72974059,
3.72466752,
3.71785112,
3.70903202,
3.70176221,
3.6976847,
3.6944938,
3.68996741,
3.68449851,
3.67888767,
3.67142884,
3.66522708,
3.65968721,
3.65649679,
3.65207508,
3.65156885,
3.643952,
3.63644572,
3.63029181,
3.62665696,
3.62527741,
3.62117738,
3.61789837,
3.6128686,
3.59904477,
3.5976517,
3.59678297,
3.59434356,
3.59116304,
3.58814574,
3.5835558,
3.57659985,
3.5726481,
3.56990393,
3.56879169,
3.56501955,
3.56127173,
3.55720436,
3.55194666,
3.54597713,
3.5436994,
3.54287161,
3.53974477,
3.53649679,
3.53314876,
3.52700997,
3.52175088,
3.51873367,
3.51846468,
3.51401711,
3.5106822,
3.50742162,
3.50113309,
3.49658758,
3.49376264,
3.49238249,
3.48979047,
3.48725107,
3.48341163,
3.47810608,
3.47381485,
3.47184685,
3.47110719,
3.46801712,
3.46472076,
3.45913659,
3.45209404,
3.4484684,
3.44587153,
3.44472549,
3.44242755,
3.43895355,
3.43549018,
3.43080058,
3.42621252,
3.42437516,
3.42371762,
3.42122891,
3.41861765,
3.41451447,
3.40936002,
3.4051931,
3.40307035,
3.40295986,
3.40052495,
3.39688763,
3.39279348,
3.38725208,
3.38421998,
3.38214471,
3.38133324,
3.37908335,
3.37689107,
3.37364203,
3.36937673,
3.36593888,
3.36250238,
3.36109704,
3.35878324,
3.35666501,
3.35305866,
3.34754255,
3.34364255,
3.34157534,
3.34085629,
3.33864193,
3.33563376,
3.33016843,
3.32687574,
3.32338656,
3.32166421,
3.32107266,
3.31861916,
3.31615129,
3.31334059,
3.30792367,
3.30479742,
3.30339238,
3.30296421,
3.30041534,
]
)
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 10 seconds, something is most likely broken
assert (time.time() - start_time) < 15
# Run a second time but with initial state from last run.
start_time = time.time()
results = sequential_bounds(np.array(t), alpha=0.003333333, sides=2, state=results.state)
my_bounds = results.bounds
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 10 seconds, something is most likely broken
print(f"Time passed second round: {time.time() - start_time}")
assert (time.time() - start_time) < 0.01
@pytest.mark.skip(reason="Skipping because this test is very slow")
def test_many_days_fast_and_no_crash():
"""
This is based on experiment 1735 on 26.11.2020. The calculation of the corresponding bounds takes many minutes
without performance tweak. Therefore, this test only checks for absence of crashs and time constraints, but
does not compare against the baseline without performance tweak. There is a Jupyter notebook making that comparison.
"""
t = [
0.011404679673257933,
0.02292450819418779,
0.0356455988484443,
0.04835740420885424,
0.05971666577058213,
0.06976017458481187,
0.07984165086754545,
0.09002459314412276,
0.10026356929804565,
0.11129746744100509,
0.1222487922920801,
0.13250332796555583,
0.1418309168157694,
0.15072692856918676,
0.15940425274581055,
0.16819162796171988,
0.17766544268380677,
0.18725283769713902,
0.19600162922594835,
0.20386600701959812,
0.21159934032678884,
0.21916233120704773,
0.22688560894714668,
0.23509036348536208,
0.24366994698965522,
0.2515994198750076,
0.25875219123481424,
0.2659624389836802,
0.2731790169781248,
0.28051081384508175,
0.28822790138928306,
0.2962915558739476,
0.3037246366701631,
0.31063411372423433,
0.31767205835063517,
0.32464032826076655,
0.3318100596369355,
0.3397812253123048,
0.3476375502493003,
0.3550356746451523,
0.3616457394863339,
0.3683042335071859,
0.375005792804928,
0.38175551518794676,
0.3891222824602354,
0.39652683513644266,
0.40347332732118724,
0.4098512458112366,
0.4163205187081655,
0.42263992444151655,
0.42899148558161226,
0.43464157988476515,
0.43858871208254674,
0.44192382717460427,
0.44482627278235426,
0.4474605932759375,
0.44957511937869815,
0.4509048070694502,
0.45222422911858906,
0.45333747002744257,
0.45426598540713137,
0.4551955091445229,
0.45605329943533507,
0.456895460181754,
0.4578387508027823,
0.45881449093488524,
0.45965707183034693,
0.4603621239391219,
0.4610501740166303,
0.46173166976907054,
0.4624475477181825,
0.4632872155802805,
0.4641010162663083,
0.46481571779810027,
0.4654194019478082,
0.4660207332628762,
0.4666458170038323,
0.4672646265190821,
0.46791675385342846,
0.4685898046101078,
0.46918687841487516,
0.46969451649339183,
0.47019581032136176,
0.4706811945055765,
0.47116992587716583,
0.47170379526092326,
0.47227291514937425,
0.4727852448922026,
0.47322669549150526,
0.4736554715946826,
0.47408022827201673,
0.47450655350577753,
0.4749737592414058,
0.47545756086422586,
0.4759381553493523,
0.47630259262910407,
0.4766609657576709,
0.47699441004302984,
0.4773518028238301,
0.477775327063972,
0.4781977729215707,
0.47856485714029223,
0.47888037506649034,
0.47919262983512245,
0.47949520717080135,
0.47980748994936967,
0.4801789017032324,
0.4805627078538587,
0.48090167009664675,
0.4811904245288165,
0.48149113920373887,
0.4817901452725537,
0.4820966860142033,
0.48243977972257923,
0.4827841618880198,
0.48309197708176604,
0.4833586316742829,
0.4836129058750043,
0.4838654994795544,
0.4841171547512422,
0.48439948090305657,
0.48470691796266424,
0.4849764575786085,
0.4852081697757299,
0.48545255646897667,
0.4856974893559792,
0.48595208567096676,
0.48624575584693763,
0.4865416528128355,
0.4867930840050338,
0.4870117575768593,
0.4872274340855126,
0.4874240218226533,
0.4876215198827202,
0.4878617751103791,
0.488108108494191,
0.48831807097586183,
0.4884937072807334,
0.48866595438332605,
0.488852192449045,
0.48903411698459087,
0.4892522303576926,
0.4894829201921431,
0.4896802221826566,
0.4898457609055321,
0.49001188783706756,
0.4901847091433521,
0.4903469286887892,
0.4905345812562857,
0.49073597269748276,
0.49091467609036693,
0.4910691508884479,
0.4912115954189357,
0.49135658885361677,
0.49150574176382184,
0.49167835299558493,
0.49186735004001847,
0.49203167033066975,
0.49216849886895175,
0.4923075682021289,
0.4924506289512129,
0.49259525825672346,
0.49276396210238826,
0.49294465420074185,
0.4931019580023778,
0.49330306934421303,
0.4935200763248353,
0.49373208353184794,
0.4939721566949216,
0.4942334053697541,
0.4944958444668745,
0.4947262121870588,
0.49492469059489225,
0.4951192336066912,
0.495294323717807,
0.4954780829041733,
0.4956838158854796,
0.49592192835302007,
0.49614550366367866,
0.49633301618149417,
0.49652995404283723,
0.4967104500716375,
0.4969174855149766,
0.49712443692850716,
0.4973541744251272,
0.49756258235533957,
0.49772464784612763,
0.4978989396740621,
0.4980669292663541,
0.4982378038820735,
0.49843929335804726,
0.4986487236509305,
0.49883442952786183,
0.49899118713574214,
0.49915640374435144,
0.49932506557511197,
]
alpha = 0.0033333333333333335
sides = 2
start_time = time.time()
my_bounds = sequential_bounds(np.array(t), alpha=alpha, sides=sides).bounds
expected = np.array(
[
5.0536015,
4.819334,
4.70702194,
4.60970036,
4.55329219,
4.5118919,
4.465161,
4.42168832,
4.37932413,
4.33343066,
4.29780246,
4.26550766,
4.2476601,
4.22343408,
4.20455427,
4.1834642,
4.15580542,
4.13352266,
4.1170148,
4.10326736,
4.08845795,
4.07496919,
4.05959646,
4.0417501,
4.02262887,
4.01056674,
4.00192679,
3.98996708,
3.97709149,
3.96442225,
3.95010566,
3.93456306,
3.92603865,
3.91801377,
3.90630556,
3.8975012,
3.88641115,
3.87143326,
3.85966246,
3.85112482,
3.84569926,
3.83714224,
3.82719647,
3.81910741,
3.80682977,
3.79652758,
3.78889289,
3.78428912,
3.77646938,
3.76966463,
3.76150223,
3.75820905,
3.76088934,
3.76171382,
3.76141619,
3.76079216,
3.76237742,
3.76725034,
3.76769877,
3.7690107,
3.7710916,
3.77168583,
3.76813708,
3.7705804,
3.76669411,
3.76711572,
3.76808636,
3.76962133,
3.76680748,
3.76844159,
3.76552364,
3.76210975,
3.76321355,
3.76471956,
3.76227721,
3.76424368,
3.76172169,
3.75923,
3.76099518,
3.75829319,
3.76028082,
3.75824824,
3.7562443,
3.76013739,
3.75818674,
3.7560594,
3.75379557,
3.75757852,
3.75582548,
3.75412511,
3.75244297,
3.75075688,
3.74891172,
3.75280489,
3.75090966,
3.7494744,
3.74806463,
3.75254602,
3.75114099,
3.74947802,
3.74782149,
3.74638383,
3.75092969,
3.74970739,
3.7485241,
3.74730404,
3.74585452,
3.74435839,
3.74303855,
3.74191532,
3.74074663,
3.73958567,
3.74415751,
3.74282592,
3.74149075,
3.74029857,
3.73926672,
3.73828357,
3.73730769,
3.7363362,
3.7352472,
3.73406243,
3.74020438,
3.7393112,
3.73836986,
3.73742713,
3.73644796,
3.73531947,
3.73418345,
3.73321896,
3.73238074,
3.73155456,
3.73080198,
3.73004637,
3.7291278,
3.72818669,
3.7273851,
3.72671496,
3.72605809,
3.72534827,
3.72465527,
3.72382494,
3.72294733,
3.73077145,
3.73014101,
3.72950865,
3.72885115,
3.7282343,
3.72752112,
3.72675617,
3.7260778,
3.7254917,
3.72495149,
3.72440186,
3.72383671,
3.723183,
3.72246763,
3.72184599,
3.7213286,
3.72080295,
3.72026245,
3.71971626,
3.71907946,
3.71839777,
3.71780463,
3.71704671,
3.7162294,
3.71543144,
3.71452847,
3.72065881,
3.71967136,
3.71880523,
3.71805949,
3.71732896,
3.71667185,
3.71598258,
3.71521135,
3.71431933,
3.71348235,
3.71278081,
3.71204444,
3.71136994,
3.7105967,
3.70982427,
3.70896735,
3.71527887,
3.71467395,
3.71402372,
3.71339733,
3.71276051,
3.71201001,
3.71123041,
3.71053954,
3.70995666,
3.70934263,
3.70871611,
]
)
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 30 seconds, something is most likely broken
assert (time.time() - start_time) < 30
|
[
"numpy.allclose",
"pytest.mark.skip",
"numpy.array",
"time.time"
] |
[((155, 221), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Skipping because this test is very slow"""'}), "(reason='Skipping because this test is very slow')\n", (171, 221), False, 'import pytest\n'), ((10635, 10701), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Skipping because this test is very slow"""'}), "(reason='Skipping because this test is very slow')\n", (10651, 10701), False, 'import pytest\n'), ((5478, 5489), 'time.time', 'time.time', ([], {}), '()\n', (5487, 5489), False, 'import time\n'), ((5610, 7901), 'numpy.array', 'np.array', (['[5.75400023, 8.0, 5.14701605, 4.91478643, 4.80691346, 4.69004328, \n 4.57921075, 4.49683943, 4.44452939, 4.38899083, 4.35683792, 4.33289847,\n 4.301461, 4.27383028, 4.24513591, 4.21444005, 4.18809224, 4.17037988, \n 4.15702106, 4.13796352, 4.12345883, 4.10808648, 4.07898394, 4.06169498,\n 4.04985422, 4.04453139, 4.03288177, 4.02205301, 4.00664024, 3.98770613,\n 3.97358123, 3.96589571, 3.95946059, 3.94995533, 3.94128534, 3.93114789,\n 3.91870273, 3.90749163, 3.90064315, 3.8958719, 3.88847126, 3.88184277, \n 3.86841705, 3.85642932, 3.84721152, 3.84099201, 3.83689676, 3.8295672, \n 3.82234648, 3.81501541, 3.80286989, 3.79370807, 3.78728177, 3.78449351,\n 3.77865864, 3.76988501, 3.76230126, 3.75251025, 3.74474277, 3.73953663,\n 3.73534961, 3.72974059, 3.72466752, 3.71785112, 3.70903202, 3.70176221,\n 3.6976847, 3.6944938, 3.68996741, 3.68449851, 3.67888767, 3.67142884, \n 3.66522708, 3.65968721, 3.65649679, 3.65207508, 3.65156885, 3.643952, \n 3.63644572, 3.63029181, 3.62665696, 3.62527741, 3.62117738, 3.61789837,\n 3.6128686, 3.59904477, 3.5976517, 3.59678297, 3.59434356, 3.59116304, \n 3.58814574, 3.5835558, 3.57659985, 3.5726481, 3.56990393, 3.56879169, \n 3.56501955, 3.56127173, 3.55720436, 3.55194666, 3.54597713, 3.5436994, \n 3.54287161, 3.53974477, 3.53649679, 3.53314876, 3.52700997, 3.52175088,\n 3.51873367, 3.51846468, 3.51401711, 3.5106822, 3.50742162, 3.50113309, \n 3.49658758, 3.49376264, 3.49238249, 3.48979047, 3.48725107, 3.48341163,\n 3.47810608, 3.47381485, 3.47184685, 3.47110719, 3.46801712, 3.46472076,\n 3.45913659, 3.45209404, 3.4484684, 3.44587153, 3.44472549, 3.44242755, \n 3.43895355, 3.43549018, 3.43080058, 3.42621252, 3.42437516, 3.42371762,\n 3.42122891, 3.41861765, 3.41451447, 3.40936002, 3.4051931, 3.40307035, \n 3.40295986, 3.40052495, 3.39688763, 3.39279348, 3.38725208, 3.38421998,\n 3.38214471, 3.38133324, 3.37908335, 3.37689107, 3.37364203, 3.36937673,\n 3.36593888, 3.36250238, 3.36109704, 3.35878324, 3.35666501, 3.35305866,\n 3.34754255, 3.34364255, 3.34157534, 3.34085629, 3.33864193, 3.33563376,\n 3.33016843, 3.32687574, 3.32338656, 3.32166421, 3.32107266, 3.31861916,\n 3.31615129, 3.31334059, 3.30792367, 3.30479742, 3.30339238, 3.30296421,\n 3.30041534]'], {}), '([5.75400023, 8.0, 5.14701605, 4.91478643, 4.80691346, 4.69004328, \n 4.57921075, 4.49683943, 4.44452939, 4.38899083, 4.35683792, 4.33289847,\n 4.301461, 4.27383028, 4.24513591, 4.21444005, 4.18809224, 4.17037988, \n 4.15702106, 4.13796352, 4.12345883, 4.10808648, 4.07898394, 4.06169498,\n 4.04985422, 4.04453139, 4.03288177, 4.02205301, 4.00664024, 3.98770613,\n 3.97358123, 3.96589571, 3.95946059, 3.94995533, 3.94128534, 3.93114789,\n 3.91870273, 3.90749163, 3.90064315, 3.8958719, 3.88847126, 3.88184277, \n 3.86841705, 3.85642932, 3.84721152, 3.84099201, 3.83689676, 3.8295672, \n 3.82234648, 3.81501541, 3.80286989, 3.79370807, 3.78728177, 3.78449351,\n 3.77865864, 3.76988501, 3.76230126, 3.75251025, 3.74474277, 3.73953663,\n 3.73534961, 3.72974059, 3.72466752, 3.71785112, 3.70903202, 3.70176221,\n 3.6976847, 3.6944938, 3.68996741, 3.68449851, 3.67888767, 3.67142884, \n 3.66522708, 3.65968721, 3.65649679, 3.65207508, 3.65156885, 3.643952, \n 3.63644572, 3.63029181, 3.62665696, 3.62527741, 3.62117738, 3.61789837,\n 3.6128686, 3.59904477, 3.5976517, 3.59678297, 3.59434356, 3.59116304, \n 3.58814574, 3.5835558, 3.57659985, 3.5726481, 3.56990393, 3.56879169, \n 3.56501955, 3.56127173, 3.55720436, 3.55194666, 3.54597713, 3.5436994, \n 3.54287161, 3.53974477, 3.53649679, 3.53314876, 3.52700997, 3.52175088,\n 3.51873367, 3.51846468, 3.51401711, 3.5106822, 3.50742162, 3.50113309, \n 3.49658758, 3.49376264, 3.49238249, 3.48979047, 3.48725107, 3.48341163,\n 3.47810608, 3.47381485, 3.47184685, 3.47110719, 3.46801712, 3.46472076,\n 3.45913659, 3.45209404, 3.4484684, 3.44587153, 3.44472549, 3.44242755, \n 3.43895355, 3.43549018, 3.43080058, 3.42621252, 3.42437516, 3.42371762,\n 3.42122891, 3.41861765, 3.41451447, 3.40936002, 3.4051931, 3.40307035, \n 3.40295986, 3.40052495, 3.39688763, 3.39279348, 3.38725208, 3.38421998,\n 3.38214471, 3.38133324, 3.37908335, 3.37689107, 3.37364203, 3.36937673,\n 3.36593888, 3.36250238, 3.36109704, 3.35878324, 3.35666501, 3.35305866,\n 3.34754255, 3.34364255, 3.34157534, 3.34085629, 3.33864193, 3.33563376,\n 3.33016843, 3.32687574, 3.32338656, 3.32166421, 3.32107266, 3.31861916,\n 3.31615129, 3.31334059, 3.30792367, 3.30479742, 3.30339238, 3.30296421,\n 3.30041534])\n', (5618, 7901), True, 'import numpy as np\n'), ((9979, 10011), 'numpy.allclose', 'np.allclose', (['my_bounds', 'expected'], {}), '(my_bounds, expected)\n', (9990, 10011), True, 'import numpy as np\n'), ((10237, 10248), 'time.time', 'time.time', ([], {}), '()\n', (10246, 10248), False, 'import time\n'), ((10385, 10417), 'numpy.allclose', 'np.allclose', (['my_bounds', 'expected'], {}), '(my_bounds, expected)\n', (10396, 10417), True, 'import numpy as np\n'), ((16776, 16787), 'time.time', 'time.time', ([], {}), '()\n', (16785, 16787), False, 'import time\n'), ((16884, 19358), 'numpy.array', 'np.array', (['[5.0536015, 4.819334, 4.70702194, 4.60970036, 4.55329219, 4.5118919, \n 4.465161, 4.42168832, 4.37932413, 4.33343066, 4.29780246, 4.26550766, \n 4.2476601, 4.22343408, 4.20455427, 4.1834642, 4.15580542, 4.13352266, \n 4.1170148, 4.10326736, 4.08845795, 4.07496919, 4.05959646, 4.0417501, \n 4.02262887, 4.01056674, 4.00192679, 3.98996708, 3.97709149, 3.96442225,\n 3.95010566, 3.93456306, 3.92603865, 3.91801377, 3.90630556, 3.8975012, \n 3.88641115, 3.87143326, 3.85966246, 3.85112482, 3.84569926, 3.83714224,\n 3.82719647, 3.81910741, 3.80682977, 3.79652758, 3.78889289, 3.78428912,\n 3.77646938, 3.76966463, 3.76150223, 3.75820905, 3.76088934, 3.76171382,\n 3.76141619, 3.76079216, 3.76237742, 3.76725034, 3.76769877, 3.7690107, \n 3.7710916, 3.77168583, 3.76813708, 3.7705804, 3.76669411, 3.76711572, \n 3.76808636, 3.76962133, 3.76680748, 3.76844159, 3.76552364, 3.76210975,\n 3.76321355, 3.76471956, 3.76227721, 3.76424368, 3.76172169, 3.75923, \n 3.76099518, 3.75829319, 3.76028082, 3.75824824, 3.7562443, 3.76013739, \n 3.75818674, 3.7560594, 3.75379557, 3.75757852, 3.75582548, 3.75412511, \n 3.75244297, 3.75075688, 3.74891172, 3.75280489, 3.75090966, 3.7494744, \n 3.74806463, 3.75254602, 3.75114099, 3.74947802, 3.74782149, 3.74638383,\n 3.75092969, 3.74970739, 3.7485241, 3.74730404, 3.74585452, 3.74435839, \n 3.74303855, 3.74191532, 3.74074663, 3.73958567, 3.74415751, 3.74282592,\n 3.74149075, 3.74029857, 3.73926672, 3.73828357, 3.73730769, 3.7363362, \n 3.7352472, 3.73406243, 3.74020438, 3.7393112, 3.73836986, 3.73742713, \n 3.73644796, 3.73531947, 3.73418345, 3.73321896, 3.73238074, 3.73155456,\n 3.73080198, 3.73004637, 3.7291278, 3.72818669, 3.7273851, 3.72671496, \n 3.72605809, 3.72534827, 3.72465527, 3.72382494, 3.72294733, 3.73077145,\n 3.73014101, 3.72950865, 3.72885115, 3.7282343, 3.72752112, 3.72675617, \n 3.7260778, 3.7254917, 3.72495149, 3.72440186, 3.72383671, 3.723183, \n 3.72246763, 3.72184599, 3.7213286, 3.72080295, 3.72026245, 3.71971626, \n 3.71907946, 3.71839777, 3.71780463, 3.71704671, 3.7162294, 3.71543144, \n 3.71452847, 3.72065881, 3.71967136, 3.71880523, 3.71805949, 3.71732896,\n 3.71667185, 3.71598258, 3.71521135, 3.71431933, 3.71348235, 3.71278081,\n 3.71204444, 3.71136994, 3.7105967, 3.70982427, 3.70896735, 3.71527887, \n 3.71467395, 3.71402372, 3.71339733, 3.71276051, 3.71201001, 3.71123041,\n 3.71053954, 3.70995666, 3.70934263, 3.70871611]'], {}), '([5.0536015, 4.819334, 4.70702194, 4.60970036, 4.55329219, \n 4.5118919, 4.465161, 4.42168832, 4.37932413, 4.33343066, 4.29780246, \n 4.26550766, 4.2476601, 4.22343408, 4.20455427, 4.1834642, 4.15580542, \n 4.13352266, 4.1170148, 4.10326736, 4.08845795, 4.07496919, 4.05959646, \n 4.0417501, 4.02262887, 4.01056674, 4.00192679, 3.98996708, 3.97709149, \n 3.96442225, 3.95010566, 3.93456306, 3.92603865, 3.91801377, 3.90630556,\n 3.8975012, 3.88641115, 3.87143326, 3.85966246, 3.85112482, 3.84569926, \n 3.83714224, 3.82719647, 3.81910741, 3.80682977, 3.79652758, 3.78889289,\n 3.78428912, 3.77646938, 3.76966463, 3.76150223, 3.75820905, 3.76088934,\n 3.76171382, 3.76141619, 3.76079216, 3.76237742, 3.76725034, 3.76769877,\n 3.7690107, 3.7710916, 3.77168583, 3.76813708, 3.7705804, 3.76669411, \n 3.76711572, 3.76808636, 3.76962133, 3.76680748, 3.76844159, 3.76552364,\n 3.76210975, 3.76321355, 3.76471956, 3.76227721, 3.76424368, 3.76172169,\n 3.75923, 3.76099518, 3.75829319, 3.76028082, 3.75824824, 3.7562443, \n 3.76013739, 3.75818674, 3.7560594, 3.75379557, 3.75757852, 3.75582548, \n 3.75412511, 3.75244297, 3.75075688, 3.74891172, 3.75280489, 3.75090966,\n 3.7494744, 3.74806463, 3.75254602, 3.75114099, 3.74947802, 3.74782149, \n 3.74638383, 3.75092969, 3.74970739, 3.7485241, 3.74730404, 3.74585452, \n 3.74435839, 3.74303855, 3.74191532, 3.74074663, 3.73958567, 3.74415751,\n 3.74282592, 3.74149075, 3.74029857, 3.73926672, 3.73828357, 3.73730769,\n 3.7363362, 3.7352472, 3.73406243, 3.74020438, 3.7393112, 3.73836986, \n 3.73742713, 3.73644796, 3.73531947, 3.73418345, 3.73321896, 3.73238074,\n 3.73155456, 3.73080198, 3.73004637, 3.7291278, 3.72818669, 3.7273851, \n 3.72671496, 3.72605809, 3.72534827, 3.72465527, 3.72382494, 3.72294733,\n 3.73077145, 3.73014101, 3.72950865, 3.72885115, 3.7282343, 3.72752112, \n 3.72675617, 3.7260778, 3.7254917, 3.72495149, 3.72440186, 3.72383671, \n 3.723183, 3.72246763, 3.72184599, 3.7213286, 3.72080295, 3.72026245, \n 3.71971626, 3.71907946, 3.71839777, 3.71780463, 3.71704671, 3.7162294, \n 3.71543144, 3.71452847, 3.72065881, 3.71967136, 3.71880523, 3.71805949,\n 3.71732896, 3.71667185, 3.71598258, 3.71521135, 3.71431933, 3.71348235,\n 3.71278081, 3.71204444, 3.71136994, 3.7105967, 3.70982427, 3.70896735, \n 3.71527887, 3.71467395, 3.71402372, 3.71339733, 3.71276051, 3.71201001,\n 3.71123041, 3.71053954, 3.70995666, 3.70934263, 3.70871611])\n', (16892, 19358), True, 'import numpy as np\n'), ((21602, 21634), 'numpy.allclose', 'np.allclose', (['my_bounds', 'expected'], {}), '(my_bounds, expected)\n', (21613, 21634), True, 'import numpy as np\n'), ((5522, 5533), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (5530, 5533), True, 'import numpy as np\n'), ((10281, 10292), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (10289, 10292), True, 'import numpy as np\n'), ((10126, 10137), 'time.time', 'time.time', ([], {}), '()\n', (10135, 10137), False, 'import time\n'), ((10599, 10610), 'time.time', 'time.time', ([], {}), '()\n', (10608, 10610), False, 'import time\n'), ((16822, 16833), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (16830, 16833), True, 'import numpy as np\n'), ((21749, 21760), 'time.time', 'time.time', ([], {}), '()\n', (21758, 21760), False, 'import time\n'), ((10559, 10570), 'time.time', 'time.time', ([], {}), '()\n', (10568, 10570), False, 'import time\n')]
|
import networkx as nx
import numpy as np
def gen_graph(graph_type, n, mean_deg):
"""Generates and returns a nx.Digraph and its adjacency matrix. Nodes are randomly permutated.
Arguments:
graph_type (string): type of graph Erdos-Renyi, scale-free, sachs or any graph in BNRepo
n (int): number of nodes
mean_deg (float): average degree of nodes
"""
# beta is the unpermutated adjacency matrix
if graph_type == "erdos-renyi":
beta = gen_random_graph(n, mean_deg)
elif graph_type == "scale-free":
# select
import igraph as ig
G_ig = ig.Graph.Barabasi(n=n, m=int(round(mean_deg / 2)), directed=True)
beta = np.array(G_ig.get_adjacency().data)
else:
raise NotImplementedError
# Randomly permute nodes
perm_mat = np.random.permutation(np.eye(n))
adj_matrix = perm_mat.T @ beta @ perm_mat
# Sanity check, is the graph acyclic?
assert np.trace(np.linalg.matrix_power(np.eye(n) + adj_matrix, n)) == n
# Create and return directed graph
graph = nx.from_numpy_array(adj_matrix, create_using=nx.DiGraph)
return graph, adj_matrix
def gen_random_graph(n, mean_deg):
"""Returns the adjacency matrix of an Erdos Renyi DAG
Args:
n (int): number of nodes
mean_deg (float): average degree of a node
"""
assert mean_deg <= n - 1
prob_one_edge = mean_deg / (n - 1)
beta = np.triu(np.random.random((n, n)) < prob_one_edge, k=1)
return np.float32(beta)
def simulate_parameter(adj_matrix, w_ranges):
"""Simulate SEM parameters for a DAG.
Args:
adj_matrix (np.array): [n, n] binary adj matrix of DAG
w_ranges (tuple): disjoint weight ranges
Returns:
weighted_adj_matrix (np.array): [n, n] weighted adj matrix of DAG
"""
weighted_adj_matrix = np.zeros(adj_matrix.shape)
range_choice = np.random.randint(len(w_ranges), size=adj_matrix.shape) # which range
for i, (low, high) in enumerate(w_ranges):
weights = np.random.uniform(low=low, high=high, size=adj_matrix.shape)
weighted_adj_matrix += adj_matrix * (range_choice == i) * weights
return weighted_adj_matrix
def sample_lin_scms(graph_type, noise_type, adj_matrix, nb_samples=1000,
weighted=False,
w_ranges=((-2.0, -.5), (.5, 2.0))):
""" Given a directed graph and a particular noise type, generates edge weights and samples
Args:
graph_type (string): type of graph
noise_type (string): one of gaussian, exp, gumbel, type of random noise
adj_matrix (np.array): [n, n] binary adjacency matrix
nb_samples (int): number of samples to generate
weighted (bool): whether to use uniformly weighted edges or all edges are
w_ranges (tuple): negative and positive ranges to sample edge weights (if weighted)
Returns:
X (np.array): [nb_samples, n] sample matrix
beta (np.array): [n, n] weighted adjacency matrix
sigma_n (np.array): [n, n] sample covariance matrix
"""
n = adj_matrix.shape[0]
# Sample edge weights
if weighted:
beta = simulate_parameter(adj_matrix, w_ranges)
else:
beta = adj_matrix
aux_inv = np.linalg.inv(np.eye(n) - beta)
# Sample noise
if noise_type == "gaussian":
epsilon = np.random.normal(size=(nb_samples, n))
elif noise_type == "exp":
epsilon = np.random.exponential(size=(nb_samples, n))
elif noise_type == "gumbel":
epsilon = np.random.gumbel(size=(nb_samples, n))
else:
raise NotImplementedError
X = epsilon @ aux_inv
sigma_n = np.cov(X.T, bias=True)
return X, beta, sigma_n
|
[
"numpy.random.gumbel",
"numpy.random.uniform",
"numpy.float32",
"numpy.random.exponential",
"numpy.zeros",
"networkx.from_numpy_array",
"numpy.random.random",
"numpy.random.normal",
"numpy.eye",
"numpy.cov"
] |
[((1068, 1124), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['adj_matrix'], {'create_using': 'nx.DiGraph'}), '(adj_matrix, create_using=nx.DiGraph)\n', (1087, 1124), True, 'import networkx as nx\n'), ((1497, 1513), 'numpy.float32', 'np.float32', (['beta'], {}), '(beta)\n', (1507, 1513), True, 'import numpy as np\n'), ((1852, 1878), 'numpy.zeros', 'np.zeros', (['adj_matrix.shape'], {}), '(adj_matrix.shape)\n', (1860, 1878), True, 'import numpy as np\n'), ((3666, 3688), 'numpy.cov', 'np.cov', (['X.T'], {'bias': '(True)'}), '(X.T, bias=True)\n', (3672, 3688), True, 'import numpy as np\n'), ((840, 849), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (846, 849), True, 'import numpy as np\n'), ((2034, 2094), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high', 'size': 'adj_matrix.shape'}), '(low=low, high=high, size=adj_matrix.shape)\n', (2051, 2094), True, 'import numpy as np\n'), ((3360, 3398), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(nb_samples, n)'}), '(size=(nb_samples, n))\n', (3376, 3398), True, 'import numpy as np\n'), ((1439, 1463), 'numpy.random.random', 'np.random.random', (['(n, n)'], {}), '((n, n))\n', (1455, 1463), True, 'import numpy as np\n'), ((3271, 3280), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (3277, 3280), True, 'import numpy as np\n'), ((3447, 3490), 'numpy.random.exponential', 'np.random.exponential', ([], {'size': '(nb_samples, n)'}), '(size=(nb_samples, n))\n', (3468, 3490), True, 'import numpy as np\n'), ((3542, 3580), 'numpy.random.gumbel', 'np.random.gumbel', ([], {'size': '(nb_samples, n)'}), '(size=(nb_samples, n))\n', (3558, 3580), True, 'import numpy as np\n'), ((983, 992), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (989, 992), True, 'import numpy as np\n')]
|
#!/bin/env python
"""
Implements Python interface to NRL NAAPS files
"""
import os
import sys
from types import *
from pyhdf import SD
from glob import glob
from numpy import ones, concatenate, array,linspace,arange, transpose
from datetime import date, datetime, timedelta
from .config import strTemplate
MISSING = -9999.99
ALIAS = dict (latitude = 'lat' ,
longitude = 'lon' ,
elevation = 'zs' ,
time = 'Time')
ALIAS['532_attenuated_backscatter'] = 'taback'
ALIAS['532_attenuated_backscatter_error'] = 'taback_err'
ALIAS['532_attenuated_molecular_backscatter'] = 'mol_aback'
SDS = list(ALIAS.keys())
#.........................................................................................
class NAAPS(object):
"""
Base class for NAAPS object.
"""
def __init__ (self,Path,keep=None,Verbose=0,only_good=True):
"""
Creates an NAAPS object defining the attributes corresponding
to the SDS's on input.
The optional parameter *keep* is used to specify the number of scan
lines (from the left of the swath) to keep. This is needed for
coping with the row anomaly problem.
"""
# Initially are lists of numpy arrays for each granule
# ----------------------------------------------------
self.verb = Verbose
self.keep = keep
self.SDS = SDS
# Variable names
# --------------
self.Names = []
for name in SDS:
self.Names.append(name)
self.Names += ['nymd','nhms']
# Create empty lists for SDS to be read from orbit file;
# each element of the list contains data for one orbit
# ------------------------------------------------------
for name in self.Names:
self.__dict__[name] = []
self.time_ = [] # to hold datetime objects
# Read each orbit, appending them to the list
# -------------------------------------------
if type(Path) is ListType:
if len(Path) == 0:
self.nobs = 0
print("WARNING: Empty NAAPS object created")
return
else:
Path = [Path, ]
self._readList(Path)
# Make each attribute a single numpy array
# ----------------------------------------
for name in self.Names:
# print 'name',name, 'donnees',self.__dict__[name]
try:
self.__dict__[name] = concatenate((self.__dict__[name]))
except:
print("Failed concatenating "+name)
# Make aliases for compatibility with older code
# ----------------------------------------------
# Alias = ALIAS.keys()
for name in self.Names:
if name in SDS:
self.__dict__[ALIAS[name]] = self.__dict__[name]
#---
def _readList(self,List):
"""
Recursively, look for files in list; list items can
be files or directories.
"""
for item in List:
if os.path.isdir(item): self._readDir(item)
elif os.path.isfile(item): self._readOrbit(item)
else:
print("%s is not a valid file or directory, ignoring it"%item)
#---
def _readDir(self,dir):
"""Recursively, look for files in directory."""
for item in os.listdir(dir):
path = dir + os.sep + item
if os.path.isdir(path): self._readDir(path)
elif os.path.isfile(path): self._readOrbit(path)
else:
print("%s is not a valid file or directory, ignoring it"%item)
#---
def _readOrbit(self,filename):
"""Reads one CALIPSO orbit with Level 1.5 data."""
# Reference time
# --------------
REF_DATE = datetime(1993,1,1,0,0,0)
# Open the CALIPSO file and loop over the datasets,
# extracting GEOLOCATION and Data fields
# ----------------------------------------------
if self.verb:
print("[] working on <%s>"%filename)
f = SD.SD(filename)
# for group in self.SDS.keys():
for name in self.SDS:
v = name
print('v', v)
if v == 'time':
sd = f.select(v)
Time = sd.get()
nobs = len(Time)
nymd = ones(nobs).astype('int')
nhms = ones(nobs).astype('int')
self.__dict__[v].append(Time) # time as on file
for i in range(nobs):
yymmdd = Time[i]
nymd0 = int(Time[i])
nd = Time[i] - nymd0
nd0 = nd * 24.0
hh = int(nd0)
nd1 = nd0 - hh
nd2 = nd1 * 60
mm = int(nd2)
nd3 = nd2 - mm
nd4 = nd3 * 60
ss = int(nd4)
nymd[i] = 20000000 + nymd0
nhms[i] = ((hh * 100) + mm) * 100 + ss
self.nymd.append(nymd)
self.nhms.append(nhms)
year = int(nymd[i]/10000)
month = int((nymd[i] - 10000*year)/100)
day = nymd[i] - (year*10000 + month * 100)
self.time_.append(datetime(year,month,day,hh,mm,ss))
else:
sd = f.select(v)
data = sd.get() # most of parameter : data = (nobs) or (nobs,km) except L2 feature type(nobs,km,4)
data = transpose(data)
print('data', data.shape)
if self.keep != None:
self.__dict__[v].append(data[0:self.keep,:])
else:
self.__dict__[v].append(data)
#---
def writeg(self,g5,syn_time,nsyn=8,g5_h=None,g5_ab=None,filename=None,dir='.',expid='NAAPS',Verb=1):
"""
Writes gridded CALIPSO measurements to file (same grid as GEOS-5 file).
Verb -- Verbose level:
0 - really quiet (default)
1 - Warns if invalid file is found
2 - Prints out non-zero number of fires in each file.
"""
from gfio import GFIO
from binObs_ import binobs3dh
# Determine synoptic time range
# -----------------------------
dt = timedelta(seconds = 12. * 60. * 60. / nsyn)
t1, t2 = (syn_time-dt,syn_time+dt)
# Lat lon grid from GEOS-5 file
# ------------
im = 360
jm = 181
print('im,jm', im, jm)
glon = linspace(-180.,180.,im,endpoint=False)
glat = linspace(-90.,90.,jm)
print('glon', glon, glat)
dLon = 360. / im
dLat = 180. / ( jm - 1.)
print('dlon', dLon, dLat)
nymd = 10000 * syn_time.year + 100 * syn_time.month + syn_time.day
nhms = 10000 * syn_time.hour + 100 * syn_time.minute + syn_time.second
print('nymd=',nymd, 'nhms=',nhms)
na_height = arange(0,8100,400) # height above sea level for NAAPS 100mfor night 400m forday
print('na_height shape', na_height.shape, g5_h.shape)
g5_height = g5_h
km = g5_height.shape[0] # because it is at the edge
print('km', km, g5_height.shape, g5_height[:,0])
nobs = self.lon.shape
vtitle = [ 'taback',
'taback_err',
'mol_aback',
'height' ]
vname = ['taback','taback_err', 'mol_aback']
vunits = [ 'km-1 sr-1','km-1 sr-1', 'km-1 sr-1' ]
kmvar = [km, km, km]
title = 'Gridded NAAPS attenuated backscatter coeff lev Geos5'
source = 'NASA/GSFC/GMAO GEOS-5 Aerosol Group'
contact = 'Virginie'
if filename is None:
filename = '%s/%s.day.calipso_l3a.%d_%02dz.nc4'%(dir,expid,nymd,nhms/10000)
# QA filtering
# ------------
I_bad = ones(self.taback.shape) # bad data
I_bad = False
# Time filter of data
# -------------------
lon = self.lon
lat = self.lat
taback = _timefilter(self.time_,t1,t2,self.taback,I_bad)
taback_err = _timefilter(self.time_,t1,t2,self.taback_err,I_bad)
mol_aback = _timefilter(self.time_,t1,t2,self.mol_aback,I_bad)
# height = _timefilter(self.time_,t1,t2,na_height,I_bad)
print('taback', taback.shape)
# Create the file
# ---------------
f = GFIO()
glevs=arange(km)
f.create(filename, vname, nymd, nhms,
lon=glon, lat=glat, levs=glevs, levunits='m',
vtitle=vtitle, vunits=vunits,kmvar=kmvar,amiss=MISSING,
title=title, source=source, contact=contact)
# gObs=binobs3dh(lon[13:14],lat[13:14],taback[13:14,:],na_height,g5_height[:,13:14],im,jm,MISSING)
print('test', lon[10:11],lat[10:11],taback[10:11,:],na_height,g5_height[:,10:11])
gObs=binobs3dh(lon[10:11],lat[10:11],taback[10:11,:],na_height,g5_height[:,10:11],im,jm,MISSING)
print('gobs', gObs[357:358,101:102,:])
# Grid variable and write to file
# -------------------------------
f.write('taback', nymd, nhms, binobs3dh(lon,lat,taback,na_height,g5_height,im,jm,MISSING) )
f.write('taback_err', nymd, nhms, binobs3dh(lon,lat,taback_err,na_height,g5_height,im,jm,MISSING) )
f.write('mol_aback', nymd, nhms, binobs3dh(lon,lat,mol_aback,na_height,g5_height,im,jm,MISSING) )
# f.write('height', nymd, nhms, g5_height)
if Verb >=1:
print("[w] Wrote file "+filename)
#....................................................................
def _timefilter ( t, t1, t2, a, I_bad ):
filler = MISSING * ones(a.shape[1:])
b = a.copy()
for i in range(len(t)):
if (t[i]<t1) or (t[i]>=t2):
b[i] = filler
if len(b.shape) == 3:
b[I_bad,:] = MISSING
elif len(b.shape) == 2:
b[I_bad] = MISSING
else:
raise IndexError("Invalid rank=%d for time filtering"%len(b.shape))
return b
#---
def orbits (path, syn_time, nsyn=8, period='night', Verbose=0 ):
"""
Returns a list of CALIPSO orbits for a given product at given synoptic time.
On input,
path --- mounting point for the CALIPSO Level 1.5 files
syn_time --- synoptic time (timedate format)
nsyn --- number of synoptic times per day (optional)
"""
# Determine synoptic time range
# -----------------------------
dt = timedelta(seconds = 12. * 60. * 60. / nsyn)
t1, t2 = (syn_time-dt,syn_time+dt)
print("[*] ", t1,"|", t2)
today = syn_time
yesterday = today - timedelta(hours=24)
Files = []
for t in (yesterday,today):
yy, mm, dd = (t.year,t.month,t.day)
dirn = "%s/%02d/%s"%(path,mm,period)
Files += glob("%s/naaps_caliop_assim_*.cdf"%(dirn))
# print 'Files', dirn, Files
Orbits = []
for f in Files:
dirn, filen = os.path.split(f)
tokens = filen.split('_')
beg_yy = int(tokens[3][0:4])
beg_mm = int(tokens[3][4:6])
beg_dd = int(tokens[3][6:8])
beg_h = int(tokens[3][8:10])
beg_m = int(tokens[3][10:12])
t_beg = datetime(beg_yy,beg_mm,beg_dd,beg_h,beg_m,0)
t_end = t_beg + timedelta(minutes=90)
# t_end = datetime(end_yy,end_mm,end_dd,end_h,end_m,0)
# print 'year', beg_yy, 'month', beg_mm, 'day', beg_dd, 'hour', beg_h, 'min', beg_m
if (t_beg>=t1 and t_beg<t2) or (t_end>=t1 and t_end<t2):
print("[x] ", t_beg, '|', t_end)
Orbits += [f,]
if Verbose:
print("[] ", f)
return Orbits
#............................................................................
if __name__ == "__main__":
# syn_time = datetime(2008,6,30,0,0,0)
# Time interval snd time step
# ---------------------------
t_beg = datetime(2007,4,1,0)
t_end = datetime(2007,4,1,21)
dt = timedelta(seconds=3*60*60) # 3-hourly
t = t_beg - dt
while t < t_end:
t += dt
syn_time = t
Files = orbits('/nobackup/2/vbuchard/CALIPSO_L15/NAAPS/',syn_time,period='day',Verbose=1)
print('files',Files)
#def hold():
# NAAPS files
naap = NAAPS(Files,Verbose=1)
# GEOS-5 file
g_template = "/nobackup/2/vbuchard/CALIPSO_L15/GEOS-5/aback_63lay/Y%y4/M%m2/dR_MERRA-AA-r2_ext532nm_Nv_63layers.%y4%m2%d2_%h200z.nc4"
g_fn = strTemplate(g_template,dtime=syn_time)
lon=naap.lon
lat=naap.lat
g = GFIO(g_fn)
g5_height = g.interp('h',lon,lat)
g5_aback = g.read('taback')
naap.writeg(g,syn_time,nsyn=8,g5_h=g5_height,g5_ab=g5_aback,filename=None,dir='/nobackup/2/vbuchard/CALIPSO_L15/',expid='NAAPS',Verb=1)
|
[
"binObs_.binobs3dh",
"os.path.isdir",
"gfio.GFIO",
"numpy.transpose",
"numpy.ones",
"datetime.datetime",
"os.path.isfile",
"datetime.timedelta",
"numpy.arange",
"numpy.linspace",
"pyhdf.SD.SD",
"glob.glob",
"os.path.split",
"os.listdir",
"numpy.concatenate"
] |
[((10722, 10766), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(12.0 * 60.0 * 60.0 / nsyn)'}), '(seconds=12.0 * 60.0 * 60.0 / nsyn)\n', (10731, 10766), False, 'from datetime import date, datetime, timedelta\n'), ((12181, 12204), 'datetime.datetime', 'datetime', (['(2007)', '(4)', '(1)', '(0)'], {}), '(2007, 4, 1, 0)\n', (12189, 12204), False, 'from datetime import date, datetime, timedelta\n'), ((12214, 12238), 'datetime.datetime', 'datetime', (['(2007)', '(4)', '(1)', '(21)'], {}), '(2007, 4, 1, 21)\n', (12222, 12238), False, 'from datetime import date, datetime, timedelta\n'), ((12245, 12275), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3 * 60 * 60)'}), '(seconds=3 * 60 * 60)\n', (12254, 12275), False, 'from datetime import date, datetime, timedelta\n'), ((3457, 3472), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (3467, 3472), False, 'import os\n'), ((3903, 3932), 'datetime.datetime', 'datetime', (['(1993)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1993, 1, 1, 0, 0, 0)\n', (3911, 3932), False, 'from datetime import date, datetime, timedelta\n'), ((4180, 4195), 'pyhdf.SD.SD', 'SD.SD', (['filename'], {}), '(filename)\n', (4185, 4195), False, 'from pyhdf import SD\n'), ((6539, 6583), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(12.0 * 60.0 * 60.0 / nsyn)'}), '(seconds=12.0 * 60.0 * 60.0 / nsyn)\n', (6548, 6583), False, 'from datetime import date, datetime, timedelta\n'), ((6765, 6808), 'numpy.linspace', 'linspace', (['(-180.0)', '(180.0)', 'im'], {'endpoint': '(False)'}), '(-180.0, 180.0, im, endpoint=False)\n', (6773, 6808), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((6818, 6843), 'numpy.linspace', 'linspace', (['(-90.0)', '(90.0)', 'jm'], {}), '(-90.0, 90.0, jm)\n', (6826, 6843), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((7193, 7213), 'numpy.arange', 'arange', (['(0)', '(8100)', '(400)'], {}), '(0, 8100, 400)\n', (7199, 7213), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((8124, 8147), 'numpy.ones', 'ones', (['self.taback.shape'], {}), '(self.taback.shape)\n', (8128, 8147), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((8656, 8662), 'gfio.GFIO', 'GFIO', ([], {}), '()\n', (8660, 8662), False, 'from gfio import GFIO\n'), ((8676, 8686), 'numpy.arange', 'arange', (['km'], {}), '(km)\n', (8682, 8686), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((9143, 9247), 'binObs_.binobs3dh', 'binobs3dh', (['lon[10:11]', 'lat[10:11]', 'taback[10:11, :]', 'na_height', 'g5_height[:, 10:11]', 'im', 'jm', 'MISSING'], {}), '(lon[10:11], lat[10:11], taback[10:11, :], na_height, g5_height[:,\n 10:11], im, jm, MISSING)\n', (9152, 9247), False, 'from binObs_ import binobs3dh\n'), ((9942, 9959), 'numpy.ones', 'ones', (['a.shape[1:]'], {}), '(a.shape[1:])\n', (9946, 9959), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((10882, 10901), 'datetime.timedelta', 'timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (10891, 10901), False, 'from datetime import date, datetime, timedelta\n'), ((11081, 11123), 'glob.glob', 'glob', (["('%s/naaps_caliop_assim_*.cdf' % dirn)"], {}), "('%s/naaps_caliop_assim_*.cdf' % dirn)\n", (11085, 11123), False, 'from glob import glob\n'), ((11219, 11235), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (11232, 11235), False, 'import os\n'), ((11481, 11530), 'datetime.datetime', 'datetime', (['beg_yy', 'beg_mm', 'beg_dd', 'beg_h', 'beg_m', '(0)'], {}), '(beg_yy, beg_mm, beg_dd, beg_h, beg_m, 0)\n', (11489, 11530), False, 'from datetime import date, datetime, timedelta\n'), ((12836, 12846), 'gfio.GFIO', 'GFIO', (['g_fn'], {}), '(g_fn)\n', (12840, 12846), False, 'from gfio import GFIO\n'), ((3141, 3160), 'os.path.isdir', 'os.path.isdir', (['item'], {}), '(item)\n', (3154, 3160), False, 'import os\n'), ((3528, 3547), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3541, 3547), False, 'import os\n'), ((9405, 9471), 'binObs_.binobs3dh', 'binobs3dh', (['lon', 'lat', 'taback', 'na_height', 'g5_height', 'im', 'jm', 'MISSING'], {}), '(lon, lat, taback, na_height, g5_height, im, jm, MISSING)\n', (9414, 9471), False, 'from binObs_ import binobs3dh\n'), ((9508, 9578), 'binObs_.binobs3dh', 'binobs3dh', (['lon', 'lat', 'taback_err', 'na_height', 'g5_height', 'im', 'jm', 'MISSING'], {}), '(lon, lat, taback_err, na_height, g5_height, im, jm, MISSING)\n', (9517, 9578), False, 'from binObs_ import binobs3dh\n'), ((9614, 9683), 'binObs_.binobs3dh', 'binobs3dh', (['lon', 'lat', 'mol_aback', 'na_height', 'g5_height', 'im', 'jm', 'MISSING'], {}), '(lon, lat, mol_aback, na_height, g5_height, im, jm, MISSING)\n', (9623, 9683), False, 'from binObs_ import binobs3dh\n'), ((11550, 11571), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(90)'}), '(minutes=90)\n', (11559, 11571), False, 'from datetime import date, datetime, timedelta\n'), ((2543, 2575), 'numpy.concatenate', 'concatenate', (['self.__dict__[name]'], {}), '(self.__dict__[name])\n', (2554, 2575), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((3204, 3224), 'os.path.isfile', 'os.path.isfile', (['item'], {}), '(item)\n', (3218, 3224), False, 'import os\n'), ((3591, 3611), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (3605, 3611), False, 'import os\n'), ((5728, 5743), 'numpy.transpose', 'transpose', (['data'], {}), '(data)\n', (5737, 5743), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((4475, 4485), 'numpy.ones', 'ones', (['nobs'], {}), '(nobs)\n', (4479, 4485), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((4522, 4532), 'numpy.ones', 'ones', (['nobs'], {}), '(nobs)\n', (4526, 4532), False, 'from numpy import ones, concatenate, array, linspace, arange, transpose\n'), ((5478, 5516), 'datetime.datetime', 'datetime', (['year', 'month', 'day', 'hh', 'mm', 'ss'], {}), '(year, month, day, hh, mm, ss)\n', (5486, 5516), False, 'from datetime import date, datetime, timedelta\n')]
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from __future__ import unicode_literals
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('sknano', parent_package, top_path)
config.add_subpackage('apps')
config.add_subpackage('core')
config.add_subpackage('generators')
config.add_subpackage('io')
config.add_subpackage('scripts')
config.add_subpackage('structures')
config.add_subpackage('testing')
config.add_subpackage('utils')
config.add_data_dir('data')
#config.make_config_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
[
"numpy.distutils.misc_util.Configuration"
] |
[((251, 300), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""sknano"""', 'parent_package', 'top_path'], {}), "('sknano', parent_package, top_path)\n", (264, 300), False, 'from numpy.distutils.misc_util import Configuration\n')]
|
# -*- coding: utf-8 -*-
"""
Transmission Line helper functions
"""
import numpy as np
def ZL_2_Zin(L,Z0,gamma,ZL):
"""
Returns the input impedance seen through a lossy transmission line of
characteristic impedance Z0 and complex wavenumber gamma=alpha+j*beta
Zin = ZL_2_Zin(L,Z0,gamma,ZL)
Args
----
L : length [m] of the transmission line
Z0: characteristic impedance of the transmission line
gamma: complex wavenumber associated to the transmission line
ZL: Load impedance
Returns
-------
Zin: input impedance
"""
assert L > 0
assert Z0 > 0
Zin = Z0*(ZL + Z0*np.tanh(gamma*L))/(Z0 + ZL*np.tanh(gamma*L))
return Zin
def transfer_matrix(L,V0,I0,Z0,gamma):
"""
Returns the voltage and the current at a distance L from an
initial voltage V0 and current I0 on a transmission line which
propagation constant is gamma.
VL, IL = transfer_matrix(L,V0,I0,Z0,gamma)
L is positive from the load toward the generator
Args
-----
L : transmission line length [m]
V0: initial voltage [V]
I0: initial current [A]
Z0 : characteristic impedance of the transmission line
gamma: =alpha+j*beta propagation constant of the transmission line
Returns
--------
VL: voltage at length L
IL: current at length L
"""
if Z0 <= 0:
raise ValueError
transfer_matrix = np.array([[np.cosh(gamma*L), Z0*np.sinh(gamma*L)],
[np.sinh(gamma*L)/Z0, np.cosh(gamma*L)]])
U = np.array([V0,I0])
A = transfer_matrix @ U
VL = A[0]
IL = A[1]
return VL, IL
def V0f_2_VL(L, V0f, gamma, reflection_coefficient):
"""
Propagation of the voltage at a distance L from the forward
voltage and reflection coefficient
VL = V0f_2_VL(L, V0f, gamma, reflectionCoefficient)
Args
----
L : Transmission Line Length [m]
V0f : forward voltage [V]
gamma : Transmission Line Complex Propagatioon Constant [1]
reflectionCoefficient : complex reflection coefficient [1]
Returns
--------
VL : (total) voltage at length L
"""
assert L > 0
assert gamma > 0
assert reflection_coefficient > 0
VL = V0f*(np.exp(-gamma*L) + reflection_coefficient*np.exp(+gamma*L))
return VL
|
[
"numpy.tanh",
"numpy.array",
"numpy.exp",
"numpy.cosh",
"numpy.sinh"
] |
[((1595, 1613), 'numpy.array', 'np.array', (['[V0, I0]'], {}), '([V0, I0])\n', (1603, 1613), True, 'import numpy as np\n'), ((2315, 2333), 'numpy.exp', 'np.exp', (['(-gamma * L)'], {}), '(-gamma * L)\n', (2321, 2333), True, 'import numpy as np\n'), ((686, 704), 'numpy.tanh', 'np.tanh', (['(gamma * L)'], {}), '(gamma * L)\n', (693, 704), True, 'import numpy as np\n'), ((1472, 1490), 'numpy.cosh', 'np.cosh', (['(gamma * L)'], {}), '(gamma * L)\n', (1479, 1490), True, 'import numpy as np\n'), ((1567, 1585), 'numpy.cosh', 'np.cosh', (['(gamma * L)'], {}), '(gamma * L)\n', (1574, 1585), True, 'import numpy as np\n'), ((2357, 2375), 'numpy.exp', 'np.exp', (['(+gamma * L)'], {}), '(+gamma * L)\n', (2363, 2375), True, 'import numpy as np\n'), ((659, 677), 'numpy.tanh', 'np.tanh', (['(gamma * L)'], {}), '(gamma * L)\n', (666, 677), True, 'import numpy as np\n'), ((1493, 1511), 'numpy.sinh', 'np.sinh', (['(gamma * L)'], {}), '(gamma * L)\n', (1500, 1511), True, 'import numpy as np\n'), ((1546, 1564), 'numpy.sinh', 'np.sinh', (['(gamma * L)'], {}), '(gamma * L)\n', (1553, 1564), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
from sklearn.preprocessing import Normalizer
# For reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Create a dummy dataset
data = np.array([1.0, 2.0])
print(data)
# Max normalization
n_max = Normalizer(norm='max')
nm = n_max.fit_transform(data.reshape(1, -1))
print(nm)
# L1 normalization
n_l1 = Normalizer(norm='l1')
nl1 = n_l1.fit_transform(data.reshape(1, -1))
print(nl1)
# L2 normalization
n_l2 = Normalizer(norm='l2')
nl2 = n_l2.fit_transform(data.reshape(1, -1))
print(nl2)
|
[
"numpy.array",
"sklearn.preprocessing.Normalizer",
"numpy.random.seed"
] |
[((134, 154), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (148, 154), True, 'import numpy as np\n'), ((227, 247), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (235, 247), True, 'import numpy as np\n'), ((305, 327), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""max"""'}), "(norm='max')\n", (315, 327), False, 'from sklearn.preprocessing import Normalizer\n'), ((432, 453), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (442, 453), False, 'from sklearn.preprocessing import Normalizer\n'), ((559, 580), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (569, 580), False, 'from sklearn.preprocessing import Normalizer\n')]
|
# -*- coding: utf-8 -*-
import biapol_utilities as biau
import numpy as np
def test_suppression():
a = np.random.rand(100).reshape(10, -1)
threshold = 0.5
a_sup = biau.label.suppressed_similarity(a, threshold=threshold)
assert(all(a_sup[a < threshold].ravel() == 0))
if __name__ == "__main__":
test_suppression()
|
[
"numpy.random.rand",
"biapol_utilities.label.suppressed_similarity"
] |
[((180, 236), 'biapol_utilities.label.suppressed_similarity', 'biau.label.suppressed_similarity', (['a'], {'threshold': 'threshold'}), '(a, threshold=threshold)\n', (212, 236), True, 'import biapol_utilities as biau\n'), ((111, 130), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (125, 130), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 6 09:59:14 2021
@author: ll17354
"""
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model._logistic import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
import sys
import warnings
import math
import statsmodels
import numpy as np
from scipy import stats
import statsmodels.api as smf
def firth_likelihood(beta, logit):
return -(logit.loglike(beta) + 0.5*np.log(np.linalg.det(-logit.hessian(beta))))
def null_fit_firth(y, X, start_vec = None, step_limit=1000, convergence_limit=0.0001):
"""
Computes the null model in the likelihood ratio test
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. Make sure X has an intercept
term (column of ones).
y : array-like of shape (n_samples,)
Target vector relative to X. Please note this function only currently works for
binomial regression so output values of {0, 1} will work while
{0, 1, 2} will not.
start_vec : int or None, optional
starting vector The default is None.
step_limit : TYPE, optional
Max number of steps before MLE termination. The default is 1000.
convergence_limit : TYPE, optional
Minimum difference between MLE's. The default is 0.0001.
Returns
-------
return_fit :
intercept: Intercept coeffcient
beta: list of beta coeffcients
bse: coeffcient standard errors
fitll: fit log-likelihood
"""
logit_model = smf.Logit(y, X)
if start_vec is None:
start_vec = np.zeros(X.shape[1])
beta_iterations = []
beta_iterations.append(start_vec)
for i in range(0, step_limit):
pi = logit_model.predict(beta_iterations[i])
W = np.diagflat(np.multiply(pi, 1-pi))
var_covar_mat = np.linalg.pinv(-logit_model.hessian(beta_iterations[i]))
# build hat matrix
rootW = np.sqrt(W)
H = np.dot(np.transpose(X), np.transpose(rootW))
H = np.matmul(var_covar_mat, H)
H = np.matmul(np.dot(rootW, X), H)
# penalised score
U = np.matmul(np.transpose(X), y - pi + np.multiply(np.diagonal(H), 0.5 - pi))
new_beta = beta_iterations[i] + np.matmul(var_covar_mat, U)
# step halving
j = 0
while firth_likelihood(new_beta, logit_model) > firth_likelihood(beta_iterations[i], logit_model):
new_beta = beta_iterations[i] + 0.5*(new_beta - beta_iterations[i])
j = j + 1
if (j > step_limit):
sys.stderr.write('Firth regression failed. Try increasing step limit.\n')
return None
beta_iterations.append(new_beta)
if i > 0 and (np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) < convergence_limit):
break
return_fit = None
if np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) >= convergence_limit:
sys.stderr.write('Firth regression failed to converge.\n')
else:
# Calculate stats
fitll = -firth_likelihood(beta_iterations[-1], logit_model)
intercept = beta_iterations[-1][0]
beta = beta_iterations[-1][1:].tolist()
bse = np.sqrt(np.diagonal(np.linalg.pinv(-logit_model.hessian(beta_iterations[-1]))))
return_fit = intercept, beta, bse, fitll
return return_fit
class Firth_LogisticRegression(LogisticRegression,
ClassifierMixin,
BaseEstimator):
"""
This class represents a rewriting Firth regression originally implemented
by John Lees (https://gist.github.com/johnlees/3e06380965f367e4894ea20fbae2b90d)
into a class which can interact with the sci-kit learn ecosystem.
To use the fit function make sure X has an intercept term (column of ones).
When using validation functions make sure to not include this 'dummy' column
of ones.
Please note: This estimator class does not currently pass the check_estimator test
in sklearn. This is because it cannot perform the multinomial classification task that
check_estimator attempts to pass it.
Parameters
----------
start_vec : ndarray of shape (n_features, 1). Default set to None in which
case the zero vector is used.
step_limit : int.
convergence_limit : float.
multi_class : string. Default is set to 'ovr' to let this function intgerate
with the logistic_regression parent class and pass the _check_multi_class
function. A bit hacky but works.
Antributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
beta_ : list of size n_features. This is used in the wald and likelihood
ratio test functions.
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
"""
def __init__(self, start_vec = None,
step_limit = 1000,
convergence_limit = 0.0001,
multi_class = 'ovr'):
self.start_vec = start_vec
self.step_limit = step_limit
self.convergence_limit = convergence_limit
self.multi_class = multi_class # multiclass should not be changed from 'ovr'
def fit(self, X = None, y = None):
"""
Fits the model accoridng to given training data. This fit function which
has been changed to work inaccordance with the sklearn estimator
documentation. Major changes are, rather than returning specific
variables fit() return an instance of itself allowing other functions
to be run from it.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. Make sure X has an intercept
term (column of ones).
y : array-like of shape (n_samples,)
Target vector relative to X. Please note this function only currently works for
binomial regression so output values of {0, 1} will work while
{0, 1, 2} will not.
Returns
-------
self
Fitted estimator.
self.fitll_ : fit log-likelihood
self.intercept_ : intercept
self.coef_ : coeffcients not including intercept (used in all other sklearn classes )
self.beta_ : coeffcients including intercept (used in wald and LR tests)
self.bse_ : standard errors
"""
X, y = check_X_y(X, y)
self.n_features_in = X.shape[1]-1
self.classes_ = np.unique(y)
logit_model = smf.Logit(y, X)
if self.start_vec is None:
start_vec = np.zeros(X.shape[1])
beta_iterations = []
beta_iterations.append(start_vec)
for i in range(0, self.step_limit):
pi = logit_model.predict(beta_iterations[i])
W = np.diagflat(np.multiply(pi, 1-pi))
var_covar_mat = np.linalg.pinv(-logit_model.hessian(beta_iterations[i]))
# build hat matrix
rootW = np.sqrt(W)
H = np.dot(np.transpose(X), np.transpose(rootW))
H = np.matmul(var_covar_mat, H)
H = np.matmul(np.dot(rootW, X), H)
# penalised score
U = np.matmul(np.transpose(X), y - pi + np.multiply(np.diagonal(H), 0.5 - pi))
new_beta = beta_iterations[i] + np.matmul(var_covar_mat, U)
# step halving
j = 0
while firth_likelihood(new_beta, logit_model) > firth_likelihood(beta_iterations[i], logit_model):
new_beta = beta_iterations[i] + 0.5*(new_beta - beta_iterations[i])
j = j + 1
if (j > self.step_limit):
sys.stderr.write('Firth regression failed. Try increasing step limit.\n')
return None
beta_iterations.append(new_beta)
if i > 0 and (np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) < self.convergence_limit):
break
if np.linalg.norm(beta_iterations[i] - beta_iterations[i-1]) >= self.convergence_limit:
sys.stderr.write('Firth regression failed to converge\n')
else:
# Calculate stats
self.fitll_ = -firth_likelihood(beta_iterations[-1], logit_model)
self.intercept_ = beta_iterations[-1][0]
self.coef_ = np.array(beta_iterations[-1][1:].tolist()).reshape((1, self.n_features_in)) #for other sklearn functions
self.beta_ = [self.intercept_] + beta_iterations[-1][1:].tolist() #used by Wald and LR test
self.bse_ = np.sqrt(np.diagonal(np.linalg.pinv(-logit_model.hessian(beta_iterations[-1]))))
return self
def test_wald(self):
'''
Implemnatation of the wald test
Returns
-------
waldp : list
A list p-values from the Wald test.
'''
check_is_fitted(self)
waldp = []
for beta_val, bse_val in zip(self.beta_, self.bse_):
waldp.append(2 * (1 - stats.norm.cdf(abs(beta_val/bse_val))))
return waldp
def test_likelihoodratio(self, X, y, start_vec = None, step_limit=1000, convergence_limit=0.0001):
"""
Implementation of the likelihood ratio test. An external function,
null_fit_firth(), is used to refit the null-estimator.
Parameters
----------
X : {array-like} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. Make sure to include the dummy column
of ones.
y : array-like of shape (n_samples,)
Target vector relative to X.
Returns
-------
lrtp : List
List of p-values from the likelihood ratio test.
"""
check_is_fitted(self)
X_np = X.values
lrtp = []
for beta_idx, (beta_val, bse_val) in enumerate(zip(self.beta_, self.bse_)):
null_X = np.delete(X_np, beta_idx, axis=1)
(null_intercept, null_beta, null_bse, null_fitll) = null_fit_firth(y, null_X, start_vec, step_limit, convergence_limit)
lrstat = -2*(null_fitll - self.fitll_)
lrt_pvalue = 1
if lrstat > 0: # non-convergence
lrt_pvalue = stats.chi2.sf(lrstat, 1)
lrtp.append(lrt_pvalue)
return lrtp
|
[
"scipy.stats.chi2.sf",
"numpy.multiply",
"statsmodels.api.Logit",
"sklearn.utils.validation.check_X_y",
"numpy.zeros",
"numpy.transpose",
"sklearn.utils.validation.check_is_fitted",
"numpy.diagonal",
"numpy.linalg.norm",
"numpy.matmul",
"numpy.dot",
"sys.stderr.write",
"numpy.delete",
"numpy.unique",
"numpy.sqrt"
] |
[((1714, 1729), 'statsmodels.api.Logit', 'smf.Logit', (['y', 'X'], {}), '(y, X)\n', (1723, 1729), True, 'import statsmodels.api as smf\n'), ((1781, 1801), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (1789, 1801), True, 'import numpy as np\n'), ((2130, 2140), 'numpy.sqrt', 'np.sqrt', (['W'], {}), '(W)\n', (2137, 2140), True, 'import numpy as np\n'), ((2210, 2237), 'numpy.matmul', 'np.matmul', (['var_covar_mat', 'H'], {}), '(var_covar_mat, H)\n', (2219, 2237), True, 'import numpy as np\n'), ((3053, 3112), 'numpy.linalg.norm', 'np.linalg.norm', (['(beta_iterations[i] - beta_iterations[i - 1])'], {}), '(beta_iterations[i] - beta_iterations[i - 1])\n', (3067, 3112), True, 'import numpy as np\n'), ((3141, 3199), 'sys.stderr.write', 'sys.stderr.write', (['"""Firth regression failed to converge.\n"""'], {}), "('Firth regression failed to converge.\\n')\n", (3157, 3199), False, 'import sys\n'), ((7328, 7343), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (7337, 7343), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((7410, 7422), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (7419, 7422), True, 'import numpy as np\n'), ((7454, 7469), 'statsmodels.api.Logit', 'smf.Logit', (['y', 'X'], {}), '(y, X)\n', (7463, 7469), True, 'import statsmodels.api as smf\n'), ((9833, 9854), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (9848, 9854), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((10805, 10826), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (10820, 10826), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((1982, 2005), 'numpy.multiply', 'np.multiply', (['pi', '(1 - pi)'], {}), '(pi, 1 - pi)\n', (1993, 2005), True, 'import numpy as np\n'), ((2160, 2175), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (2172, 2175), True, 'import numpy as np\n'), ((2177, 2196), 'numpy.transpose', 'np.transpose', (['rootW'], {}), '(rootW)\n', (2189, 2196), True, 'import numpy as np\n'), ((2260, 2276), 'numpy.dot', 'np.dot', (['rootW', 'X'], {}), '(rootW, X)\n', (2266, 2276), True, 'import numpy as np\n'), ((2330, 2345), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (2342, 2345), True, 'import numpy as np\n'), ((2435, 2462), 'numpy.matmul', 'np.matmul', (['var_covar_mat', 'U'], {}), '(var_covar_mat, U)\n', (2444, 2462), True, 'import numpy as np\n'), ((7538, 7558), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (7546, 7558), True, 'import numpy as np\n'), ((7932, 7942), 'numpy.sqrt', 'np.sqrt', (['W'], {}), '(W)\n', (7939, 7942), True, 'import numpy as np\n'), ((8020, 8047), 'numpy.matmul', 'np.matmul', (['var_covar_mat', 'H'], {}), '(var_covar_mat, H)\n', (8029, 8047), True, 'import numpy as np\n'), ((8931, 8990), 'numpy.linalg.norm', 'np.linalg.norm', (['(beta_iterations[i] - beta_iterations[i - 1])'], {}), '(beta_iterations[i] - beta_iterations[i - 1])\n', (8945, 8990), True, 'import numpy as np\n'), ((9028, 9085), 'sys.stderr.write', 'sys.stderr.write', (['"""Firth regression failed to converge\n"""'], {}), "('Firth regression failed to converge\\n')\n", (9044, 9085), False, 'import sys\n'), ((10974, 11007), 'numpy.delete', 'np.delete', (['X_np', 'beta_idx'], {'axis': '(1)'}), '(X_np, beta_idx, axis=1)\n', (10983, 11007), True, 'import numpy as np\n'), ((2759, 2832), 'sys.stderr.write', 'sys.stderr.write', (['"""Firth regression failed. Try increasing step limit.\n"""'], {}), "('Firth regression failed. Try increasing step limit.\\n')\n", (2775, 2832), False, 'import sys\n'), ((2925, 2984), 'numpy.linalg.norm', 'np.linalg.norm', (['(beta_iterations[i] - beta_iterations[i - 1])'], {}), '(beta_iterations[i] - beta_iterations[i - 1])\n', (2939, 2984), True, 'import numpy as np\n'), ((7768, 7791), 'numpy.multiply', 'np.multiply', (['pi', '(1 - pi)'], {}), '(pi, 1 - pi)\n', (7779, 7791), True, 'import numpy as np\n'), ((7966, 7981), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (7978, 7981), True, 'import numpy as np\n'), ((7983, 8002), 'numpy.transpose', 'np.transpose', (['rootW'], {}), '(rootW)\n', (7995, 8002), True, 'import numpy as np\n'), ((8074, 8090), 'numpy.dot', 'np.dot', (['rootW', 'X'], {}), '(rootW, X)\n', (8080, 8090), True, 'import numpy as np\n'), ((8156, 8171), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (8168, 8171), True, 'import numpy as np\n'), ((8265, 8292), 'numpy.matmul', 'np.matmul', (['var_covar_mat', 'U'], {}), '(var_covar_mat, U)\n', (8274, 8292), True, 'import numpy as np\n'), ((11292, 11316), 'scipy.stats.chi2.sf', 'stats.chi2.sf', (['lrstat', '(1)'], {}), '(lrstat, 1)\n', (11305, 11316), False, 'from scipy import stats\n'), ((2368, 2382), 'numpy.diagonal', 'np.diagonal', (['H'], {}), '(H)\n', (2379, 2382), True, 'import numpy as np\n'), ((8626, 8699), 'sys.stderr.write', 'sys.stderr.write', (['"""Firth regression failed. Try increasing step limit.\n"""'], {}), "('Firth regression failed. Try increasing step limit.\\n')\n", (8642, 8699), False, 'import sys\n'), ((8808, 8867), 'numpy.linalg.norm', 'np.linalg.norm', (['(beta_iterations[i] - beta_iterations[i - 1])'], {}), '(beta_iterations[i] - beta_iterations[i - 1])\n', (8822, 8867), True, 'import numpy as np\n'), ((8194, 8208), 'numpy.diagonal', 'np.diagonal', (['H'], {}), '(H)\n', (8205, 8208), True, 'import numpy as np\n')]
|
from avgn.utils.audio import get_samplerate
from avgn.utils.json import NoIndent, NoIndentEncoder
import numpy as np
from avgn.utils.paths import DATA_DIR
import librosa
from datetime import datetime
import pandas as pd
import avgn
import json
DATASET_ID = 'mobysound_humpback_whale'
def load_labs(labels):
all_labels = []
for label_file in labels:
label_df = pd.DataFrame(
[line.split() for line in open(label_file, "r")],
columns=["start_time", "end_time", "low_freq", "high_freq", "SNR"],
)
label_df['file'] = label_file.stem
all_labels.append(label_df)
all_labels = pd.concat(all_labels).reset_index()
for lab in ['start_time', 'end_time', 'low_freq', 'high_freq', 'SNR']:
all_labels[lab] = all_labels[lab].values.astype('float32')
return all_labels
def find_longest_nonvocal_stretch(file_df, wav_duration):
""" An ugly function to find the longest stretch of nonvocal behavior in a syllable dataframe
"""
## find the longest stretch of non-vocal behavior in this wav
max_break = np.argmax(file_df.start_time.values[1:] - file_df.end_time.values[:-1])
noise_end_time = file_df.start_time.values[1:][max_break]
noise_start_time = file_df.end_time.values[:-1][max_break]
start_noise = file_df.start_time.values[0]
end_noise = wav_duration - file_df.end_time.values[-1]
noise_lens = np.array([noise_end_time - noise_start_time, start_noise, end_noise])
noise_start_ends = np.array(
[
[noise_start_time, noise_end_time],
[0, start_noise],
[file_df.end_time.values[-1], wav_duration],
]
)
noise_start, noise_end = noise_start_ends[np.argmax(noise_lens)]
return noise_start, noise_end
def generate_noise_and_json(bout_number, fn, DT_ID, wavloc, file_df):
# location of wav
#wavloc = np.array(wavs)[np.array([i.stem for i in wavs]) == fn][0]
# wav time
wavdate = datetime.strptime(fn, "%y%m%d-%H%M")
wav_date = wavdate.strftime("%Y-%m-%d_%H-%M-%S")
# wav samplerate and duration
sr = get_samplerate(wavloc.as_posix())
wav_duration = librosa.get_duration(filename=wavloc)
# df of syllables in file
#file_df = label_df[label_df.file == fn].sort_values(by="start_time")
## find the longest stretch of non-vocal behavior in this wav
noise_start, noise_end = find_longest_nonvocal_stretch(file_df, wav_duration)
bout_start_string = avgn.utils.general.seconds_to_str(noise_start)
# determine save locations
noise_out = (
DATA_DIR
/ "processed"
/ DATASET_ID
/ DT_ID
/ "NOISE"
/ (fn + "__" + bout_start_string + ".WAV")
)
json_out = DATA_DIR / "processed" / DATASET_ID / DT_ID / "JSON" / (fn + ".JSON")
# wav general information
json_dict = {}
json_dict["bout_number"] = bout_number
json_dict["species"] = "Megaptera novaengliae"
json_dict["common_name"] = "Humpback whale"
json_dict["datetime"] = wav_date
json_dict["samplerate_hz"] = sr
json_dict["length_s"] = wav_duration
json_dict["wav_loc"] = wavloc.as_posix()
json_dict["noise_loc"] = noise_out.as_posix()
json_dict["indvs"] = {
"UNK": {
"syllables": {
"start_times": NoIndent(
list(file_df.start_time.values.astype("float"))
),
"end_times": NoIndent(list(file_df.end_time.astype("float"))),
"high_freq": NoIndent(list(file_df.high_freq.astype("float"))),
"low_freq": NoIndent(list(file_df.low_freq.astype("float"))),
"SNR": NoIndent(list(file_df.SNR.astype("float"))),
}
}
}
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
# save wav file
noise_wav, sr = librosa.load(
wavloc, sr=None, mono=True, offset=noise_start, duration=noise_end - noise_start
)
avgn.utils.paths.ensure_dir(noise_out)
librosa.output.write_wav(noise_out, y=noise_wav, sr=sr, norm=True)
# save json
avgn.utils.paths.ensure_dir(json_out.as_posix())
print(json_txt, file=open(json_out.as_posix(), "w"))
|
[
"numpy.argmax",
"avgn.utils.paths.ensure_dir",
"json.dumps",
"datetime.datetime.strptime",
"numpy.array",
"avgn.utils.general.seconds_to_str",
"librosa.load",
"librosa.output.write_wav",
"pandas.concat",
"librosa.get_duration"
] |
[((1087, 1158), 'numpy.argmax', 'np.argmax', (['(file_df.start_time.values[1:] - file_df.end_time.values[:-1])'], {}), '(file_df.start_time.values[1:] - file_df.end_time.values[:-1])\n', (1096, 1158), True, 'import numpy as np\n'), ((1407, 1476), 'numpy.array', 'np.array', (['[noise_end_time - noise_start_time, start_noise, end_noise]'], {}), '([noise_end_time - noise_start_time, start_noise, end_noise])\n', (1415, 1476), True, 'import numpy as np\n'), ((1500, 1614), 'numpy.array', 'np.array', (['[[noise_start_time, noise_end_time], [0, start_noise], [file_df.end_time.\n values[-1], wav_duration]]'], {}), '([[noise_start_time, noise_end_time], [0, start_noise], [file_df.\n end_time.values[-1], wav_duration]])\n', (1508, 1614), True, 'import numpy as np\n'), ((1968, 2004), 'datetime.datetime.strptime', 'datetime.strptime', (['fn', '"""%y%m%d-%H%M"""'], {}), "(fn, '%y%m%d-%H%M')\n", (1985, 2004), False, 'from datetime import datetime\n'), ((2154, 2191), 'librosa.get_duration', 'librosa.get_duration', ([], {'filename': 'wavloc'}), '(filename=wavloc)\n', (2174, 2191), False, 'import librosa\n'), ((2469, 2515), 'avgn.utils.general.seconds_to_str', 'avgn.utils.general.seconds_to_str', (['noise_start'], {}), '(noise_start)\n', (2502, 2515), False, 'import avgn\n'), ((3754, 3806), 'json.dumps', 'json.dumps', (['json_dict'], {'cls': 'NoIndentEncoder', 'indent': '(2)'}), '(json_dict, cls=NoIndentEncoder, indent=2)\n', (3764, 3806), False, 'import json\n'), ((3848, 3947), 'librosa.load', 'librosa.load', (['wavloc'], {'sr': 'None', 'mono': '(True)', 'offset': 'noise_start', 'duration': '(noise_end - noise_start)'}), '(wavloc, sr=None, mono=True, offset=noise_start, duration=\n noise_end - noise_start)\n', (3860, 3947), False, 'import librosa\n'), ((3961, 3999), 'avgn.utils.paths.ensure_dir', 'avgn.utils.paths.ensure_dir', (['noise_out'], {}), '(noise_out)\n', (3988, 3999), False, 'import avgn\n'), ((4004, 4070), 'librosa.output.write_wav', 'librosa.output.write_wav', (['noise_out'], {'y': 'noise_wav', 'sr': 'sr', 'norm': '(True)'}), '(noise_out, y=noise_wav, sr=sr, norm=True)\n', (4028, 4070), False, 'import librosa\n'), ((1717, 1738), 'numpy.argmax', 'np.argmax', (['noise_lens'], {}), '(noise_lens)\n', (1726, 1738), True, 'import numpy as np\n'), ((640, 661), 'pandas.concat', 'pd.concat', (['all_labels'], {}), '(all_labels)\n', (649, 661), True, 'import pandas as pd\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import unittest
from federatedml.ftl.plain_ftl import PlainFTLHostModel
from federatedml.ftl.hetero_ftl.hetero_ftl_guest import HeteroPlainFTLGuest, HeteroFTLGuest
from federatedml.ftl.plain_ftl import PlainFTLGuestModel
from federatedml.feature.instance import Instance
from federatedml.ftl.common.data_util import create_table
from federatedml.ftl.test.fake_models import FakeAutoencoder, FakeDiffConverge
from federatedml.param.param import FTLModelParam
from federatedml.util.transfer_variable import HeteroFTLTransferVariable
from arch.api.eggroll import init
class TestHeteroFTLGuest(HeteroPlainFTLGuest):
def __init__(self, guest, model_param, transfer_variable):
super(TestHeteroFTLGuest, self).__init__(guest, model_param, transfer_variable)
U_B = np.array([[4, 2, 3, 1, 2],
[6, 5, 1, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
overlap_indexes = [1, 2]
Wh = np.ones((5, U_B.shape[1]))
bh = np.zeros(U_B.shape[1])
autoencoderB = FakeAutoencoder(1)
autoencoderB.build(U_B.shape[1], Wh, bh)
self.host = PlainFTLHostModel(autoencoderB, self.model_param)
self.host.set_batch(U_B, overlap_indexes)
def _do_remote(self, value=None, name=None, tag=None, role=None, idx=None):
print("@_do_remote", value, name, tag, role, idx)
def _do_get(self, name=None, tag=None, idx=None):
print("@_do_get", name, tag, idx)
if tag == "HeteroFTLTransferVariable.host_sample_indexes.0":
return [np.array([1, 2, 4, 5])]
elif tag == "HeteroFTLTransferVariable.host_component_list.0.0":
return self.host.send_components()
return None
class TestCreateGuestHostEggrollTable(unittest.TestCase):
def test_hetero_plain_guest_prepare_table(self):
U_A = np.array([[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
y = np.array([[1], [-1], [1], [-1]])
Wh = np.ones((5, U_A.shape[1]))
bh = np.zeros(U_A.shape[1])
model_param = FTLModelParam(alpha=1, max_iteration=1)
autoencoderA = FakeAutoencoder(0)
autoencoderA.build(U_A.shape[1], Wh, bh)
guest = PlainFTLGuestModel(autoencoderA, model_param)
converge_func = FakeDiffConverge(None)
ftl_guest = TestHeteroFTLGuest(guest, model_param, HeteroFTLTransferVariable())
ftl_guest.set_converge_function(converge_func)
guest_sample_indexes = np.array([0, 1, 2, 3])
guest_x_dict = {}
guest_label_dict = {}
instance_dict = {}
instance_list = []
np.random.seed(100)
for i, feature, label, in zip(guest_sample_indexes, U_A, y):
instance = Instance(inst_id=i, features=feature, label=label[0])
guest_x_dict[i] = feature
guest_label_dict[i] = label[0]
instance_dict[i] = instance
instance_list.append(instance)
guest_x = create_table(instance_list, indexes=guest_sample_indexes)
guest_x, overlap_indexes, non_overlap_indexes, guest_y = ftl_guest.prepare_data(guest_x)
print("guest_x", guest_x)
print("overlap_indexes", overlap_indexes)
print("non_overlap_indexes", non_overlap_indexes)
print("guest_y", guest_y)
if __name__ == '__main__':
init()
unittest.main()
|
[
"unittest.main",
"federatedml.ftl.plain_ftl.PlainFTLGuestModel",
"numpy.random.seed",
"federatedml.util.transfer_variable.HeteroFTLTransferVariable",
"federatedml.feature.instance.Instance",
"numpy.zeros",
"numpy.ones",
"federatedml.ftl.test.fake_models.FakeAutoencoder",
"federatedml.ftl.plain_ftl.PlainFTLHostModel",
"numpy.array",
"federatedml.ftl.test.fake_models.FakeDiffConverge",
"federatedml.ftl.common.data_util.create_table",
"arch.api.eggroll.init",
"federatedml.param.param.FTLModelParam"
] |
[((4080, 4086), 'arch.api.eggroll.init', 'init', ([], {}), '()\n', (4084, 4086), False, 'from arch.api.eggroll import init\n'), ((4091, 4106), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4104, 4106), False, 'import unittest\n'), ((1417, 1496), 'numpy.array', 'np.array', (['[[4, 2, 3, 1, 2], [6, 5, 1, 4, 5], [7, 4, 1, 9, 10], [6, 5, 1, 4, 5]]'], {}), '([[4, 2, 3, 1, 2], [6, 5, 1, 4, 5], [7, 4, 1, 9, 10], [6, 5, 1, 4, 5]])\n', (1425, 1496), True, 'import numpy as np\n'), ((1617, 1643), 'numpy.ones', 'np.ones', (['(5, U_B.shape[1])'], {}), '((5, U_B.shape[1]))\n', (1624, 1643), True, 'import numpy as np\n'), ((1657, 1679), 'numpy.zeros', 'np.zeros', (['U_B.shape[1]'], {}), '(U_B.shape[1])\n', (1665, 1679), True, 'import numpy as np\n'), ((1704, 1722), 'federatedml.ftl.test.fake_models.FakeAutoencoder', 'FakeAutoencoder', (['(1)'], {}), '(1)\n', (1719, 1722), False, 'from federatedml.ftl.test.fake_models import FakeAutoencoder, FakeDiffConverge\n'), ((1793, 1842), 'federatedml.ftl.plain_ftl.PlainFTLHostModel', 'PlainFTLHostModel', (['autoencoderB', 'self.model_param'], {}), '(autoencoderB, self.model_param)\n', (1810, 1842), False, 'from federatedml.ftl.plain_ftl import PlainFTLHostModel\n'), ((2510, 2595), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [4, 5, 6, 7, 8], [7, 8, 9, 10, 11], [4, 5, 6, 7, 8]]'], {}), '([[1, 2, 3, 4, 5], [4, 5, 6, 7, 8], [7, 8, 9, 10, 11], [4, 5, 6, 7, 8]]\n )\n', (2518, 2595), True, 'import numpy as np\n'), ((2675, 2707), 'numpy.array', 'np.array', (['[[1], [-1], [1], [-1]]'], {}), '([[1], [-1], [1], [-1]])\n', (2683, 2707), True, 'import numpy as np\n'), ((2722, 2748), 'numpy.ones', 'np.ones', (['(5, U_A.shape[1])'], {}), '((5, U_A.shape[1]))\n', (2729, 2748), True, 'import numpy as np\n'), ((2762, 2784), 'numpy.zeros', 'np.zeros', (['U_A.shape[1]'], {}), '(U_A.shape[1])\n', (2770, 2784), True, 'import numpy as np\n'), ((2808, 2847), 'federatedml.param.param.FTLModelParam', 'FTLModelParam', ([], {'alpha': '(1)', 'max_iteration': '(1)'}), '(alpha=1, max_iteration=1)\n', (2821, 2847), False, 'from federatedml.param.param import FTLModelParam\n'), ((2872, 2890), 'federatedml.ftl.test.fake_models.FakeAutoencoder', 'FakeAutoencoder', (['(0)'], {}), '(0)\n', (2887, 2890), False, 'from federatedml.ftl.test.fake_models import FakeAutoencoder, FakeDiffConverge\n'), ((2956, 3001), 'federatedml.ftl.plain_ftl.PlainFTLGuestModel', 'PlainFTLGuestModel', (['autoencoderA', 'model_param'], {}), '(autoencoderA, model_param)\n', (2974, 3001), False, 'from federatedml.ftl.plain_ftl import PlainFTLGuestModel\n'), ((3027, 3049), 'federatedml.ftl.test.fake_models.FakeDiffConverge', 'FakeDiffConverge', (['None'], {}), '(None)\n', (3043, 3049), False, 'from federatedml.ftl.test.fake_models import FakeAutoencoder, FakeDiffConverge\n'), ((3225, 3247), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (3233, 3247), True, 'import numpy as np\n'), ((3366, 3385), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (3380, 3385), True, 'import numpy as np\n'), ((3715, 3772), 'federatedml.ftl.common.data_util.create_table', 'create_table', (['instance_list'], {'indexes': 'guest_sample_indexes'}), '(instance_list, indexes=guest_sample_indexes)\n', (3727, 3772), False, 'from federatedml.ftl.common.data_util import create_table\n'), ((3109, 3136), 'federatedml.util.transfer_variable.HeteroFTLTransferVariable', 'HeteroFTLTransferVariable', ([], {}), '()\n', (3134, 3136), False, 'from federatedml.util.transfer_variable import HeteroFTLTransferVariable\n'), ((3478, 3531), 'federatedml.feature.instance.Instance', 'Instance', ([], {'inst_id': 'i', 'features': 'feature', 'label': 'label[0]'}), '(inst_id=i, features=feature, label=label[0])\n', (3486, 3531), False, 'from federatedml.feature.instance import Instance\n'), ((2218, 2240), 'numpy.array', 'np.array', (['[1, 2, 4, 5]'], {}), '([1, 2, 4, 5])\n', (2226, 2240), True, 'import numpy as np\n')]
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__= '1.8'
__status__ = "Research"
__date__ = "2/1/2020"
__license__= "MIT License"
import os
import sys
import numpy as np
import time
import glob
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
import imageio
import skimage
from parameters import Params
from sys_utils import tohms
from image_utils import save_image
#========================================================================================
class TeePipe(object):
#source: https://stackoverflow.com/q/616645
def __init__(self, filename="Red.Wood", mode="a", buff=0):
self.stdout = sys.stdout
# self.file = open(filename, mode, buff)
self.file = open(filename, mode)
sys.stdout = self
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, message):
self.stdout.write(message)
self.file.write(message)
def flush(self):
self.stdout.flush()
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
if self.stdout != None:
sys.stdout = self.stdout
self.stdout = None
if self.file != None:
self.file.close()
self.file = None
# ========================================================================================
class MLLogger():
def __init__(self, hps):
self.hps = hps
self.logf = None
self.im_size = hps.img_size
self.epoch_num = hps.epochs_max # Total number of epochs
self.iter_num = {} # Iterations per epoch
# self.iter_epoch = 0
self.batch_size = hps.batch_size
self.data = []
self.dkeys_id=['ts', 'epoch', 'iter', 'stage'] # Key lookup by ID
self.dkeys = {} # ID lookup by key
self.m_first = {} # stage_name -> position of first record
self.m_last = {} # stage_name -> position of last record
self.start_time = None # blobal start timestamp
self.iter_global = 0 # Total iteration since the begining of the training
self.print_header = True
self.data_format_changed = True
self.last_report_pos={} # stage_name -> Position in self.data of the last report
# Tensorboard
self.writer = None
self.log_id = None
return
def load_config(self):
logdir = self.exp_path+'/log/'
cfg_filename = os.path.join(logdir, 'cfg-'+str(self.log_id-1)+'-*.json')
cfg_files = glob.glob(cfg_filename)
cfg_files.sort(reverse=True)
if len(cfg_files) == 0 or not os.path.isfile(cfg_files[0]):
return None
p = Params()
if not p.load(cfg_files[0]):
return None
return p
def save_config(self, epoch=None):
logdir = self.exp_path+'/log/'
cfg_filename = os.path.join(logdir, 'cfg-'+str(self.log_id)+'-'+str(epoch)+'.json')
self.hps.save(cfg_filename)
return
def open_experiment(self, experiment_name='m1'):
"""
Creates sub-directory structure
- Creates new log file
-
"""
self.experiment_name = experiment_name
self.exp_path = os.path.join(experiment_name)
os.makedirs(self.exp_path, exist_ok=True)
if not self.hps.eval and self.experiment_name != '.':
# Backup source code & configs
os.system('cp *.py ' + self.exp_path+ '/')
logdir = self.exp_path+'/tblog/'
os.makedirs(logdir, exist_ok=True)
self.writer = SummaryWriter(logdir)
logdir = self.exp_path+'/log/'
os.makedirs(logdir, exist_ok=True)
self.model_path =os.path.join(self.exp_path, 'models')
os.makedirs(self.model_path, exist_ok=True)
# Create new log files
prefix = 'eval-' if self.hps.eval else 'train-'
log_id = 0
while True:
log_filename = prefix+'log-'+str(log_id)+'.txt'
log_path = os.path.join(logdir, log_filename)
if not os.path.isfile(log_path):
break
log_id += 1
if self.hps.log_stdout:
stdout_log_filename = prefix+'stdout-'+str(log_id)+'.txt'
stdout_log_filename = os.path.join(logdir, stdout_log_filename)
self.stdout_logger = TeePipe(stdout_log_filename)
print("Creating new log file:",log_path)
self.logf = open(log_path, 'wt')
self.log_id = log_id
return
def set_samples_num(self, stage_name, samples_num):
self.iter_num[stage_name] = self.hps.batch_size * int(np.floor(samples_num / self.hps.batch_size))
def start_epoch(self, stage_name, epoch):
"""
Creates a null record with a current timestamp
"""
if self.start_time is None:
self.start_time = time.time()
# Stored the position of the first epoch record
# There can be one start per stage
self.m_first[stage_name] = len(self.data)
self.m_last[stage_name] = len(self.data)
self.last_report_pos[stage_name] = len(self.data)
rec = [0]*len(self.dkeys_id)
rec[0] = time.time() - self.start_time
rec[1] = epoch
rec[2] = self.iter_global
rec[3] = stage_name
self.data.append(rec)
self.print_header = True
return
def log_loss(self, epoch, iter, losses, stage_name):
"""
Args:
epoch (int): current epoch starting from 0
iter (int): sample iteration within the epoch
stage_name (str): 'train', 'val', 'test'
losses (dict): dictionary of loss_name->loss_val
"""
if iter is not None:
self.iter_global = iter
# Collect new value keys
for key, val in losses.items():
if key not in self.dkeys_id:
# Add new key=val
self.dkeys_id.append(key)
self.data_format_changed = True
# Update the key-index lookup table
if self.data_format_changed:
self.dkeys = {}
for i, key in enumerate(self.dkeys_id):
self.dkeys[key] = i
# Store new data
rec = [0]*len(self.dkeys_id)
rec[0] = time.time() - self.start_time
rec[1] = epoch
rec[2] = self.iter_global # Global iteration
rec[3] = stage_name
# Generate tensorboar record
tboard_losses = {}
for key, val in losses.items():
id = self.dkeys[key]
rec[id] = val
key = stage_name+'_'+key
tboard_losses[key] = val
self.data.append(rec)
# Append log to the file
if self.logf is not None:
if self.data_format_changed:
# Insert data format header
header_str = [str(v) for v in self.dkeys_id]
self.logf.write('\n'+' '.join(header_str)+'\n')
line = [str(v) for v in rec]
self.logf.write(' '.join(line)+'\n')
self.logf.flush()
# Update tensorboard
# {'d_loss': d_loss, 'grad_penalty': grad_penalty}
self.writer.add_scalars('losses', tboard_losses, self.iter_global)
self.m_last[stage_name] = len(self.data)-1
self.data_format_changed= False
return
def print_table(self, name, data, header=None):
"""
max_iter = self.iter_num*self.epoch_num
epoch_str = str(rec[-1][1])+" ("+str(int(done))+"%)"
header = ['T', 'e('+str(self.epoch_num)+')', 'iter('+str(max_iter//1000)+'k)', 'batch (ms)']
data = [[rec[-1][3], epoch_str, str(last_iter), batch_took_avg*1000.0]]
"""
# Print table
table_width = 0
if header is not None:
self.col_width = []
line = ""
for i, hv in enumerate(header):
line += '{:<{c0}}'.format(hv, c0=len(hv))
self.col_width.append(len(hv))
print('')
if name is not None:
print(name)
print(line)
head_len = len(line)
print('-'*head_len )
table_width = head_len
# Print data
for r, rv in enumerate(data):
line = ""
for c, cv in enumerate(rv):
line += '{:<{c0}}'.format(cv, c0=self.col_width[c])
print(line, flush=True)
if len(line) > table_width:
table_width = len(line)
return table_width
def get_avg(self, begin, end, cols=[]):
rec = self.data[begin:end]
# Get the max number of stored value in this run
mx = 0
for val in rec:
if len(val)>mx: mx = len(val)
# Create numpy vector for the averages
rec = np.asarray([x+[0]*(mx-len(x)) for x in rec], dtype=np.object )
# Get only the records with loss values
rec_avg = rec.copy()
rec_avg[:,:4] = 0
rec_avg = rec_avg.astype(np.float)
rec_avg = rec_avg.mean(0)
return rec_avg
def print_batch_stat(self, stage_name='t'):
last_epoch_pos = self.m_last.get(stage_name, 0)
last_report_pos = self.last_report_pos.get(stage_name, 0)
if last_report_pos == last_epoch_pos:
# Already reported
return
# Get averages since the last report
rec_avg = self.get_avg(last_report_pos+1, last_epoch_pos+1)
rec_last = self.data[last_epoch_pos]
time_now, last_epoch, last_iter, last_stage_name = rec_last[:4]
iter = last_iter - self.data[self.m_first[stage_name]][2]
done = round(100*iter/self.iter_num.get(stage_name), 2) if stage_name in self.iter_num else 0
batch_took_avg = float(time_now) - float(self.data[last_report_pos+1][0])
if self.batch_size is not None:
batch_took_avg /= self.batch_size
self.last_report_pos[stage_name] = last_epoch_pos
# Print table
header = None
if self.print_header:
max_iter = self.iter_num.get(stage_name, 0)*self.epoch_num
header = ['Time ',
'E('+str(self.epoch_num)+') ',
'Iter('+str(max_iter//1000)+'k) ',
'Batch (ms) ']
for key in self.dkeys_id[4:]:
header.append(key+' '*(15-len(key)))
self.print_header=False
data = [tohms(time_now), str(last_epoch)+' ('+str(done)+'%)', str(last_iter), round(batch_took_avg*1000.0, 3)]
for key in self.dkeys_id[4:]:
data.append(round(rec_avg[self.dkeys[key]], 4))
table_width = self.print_table(last_stage_name, [data], header)
return
def print_epoch_stat(self, stage_name, **kwargs):
"""
Batch train log format
Epoch train log format
Test log format
"""
first_epoch_pos = self.m_first.get(stage_name, 0)
last_epoch_pos = self.m_last.get(stage_name, 0)
rec_avg = self.get_avg(first_epoch_pos+1, last_epoch_pos+1)
rec_last = self.data[last_epoch_pos]
time_now, last_epoch, last_iter, last_stage_name = rec_last[:4]
epoch_took = tohms(time_now - self.data[first_epoch_pos][0])
# Print table
max_iter = self.iter_num*self.epoch_num
header = ['Time ',
'E('+str(self.epoch_num)+') ',
'Iter('+str(max_iter//1000)+'k) ',
'Epoch (H:M:S) ']
for key in self.dkeys_id[4:]:
header.append(key)
data = [tohms(time_now), str(last_epoch), str(last_iter), epoch_took]
for key in self.dkeys_id[4:]:
data.append(round(rec_avg[self.dkeys[key]], 4))
table_width = self.print_table(last_stage_name, [data], header)
print("-"*table_width)
return
def log_images(self, x, epoch, name_suffix, name, channels=3, nrow=8):
img_path = os.path.join(self.experiment_name, name)
os.makedirs(img_path, exist_ok=True)
img_size = self.im_size
if img_size < 1:
img_size2 = x.nelement() / x.size(0) / channels
img_size = int(np.sqrt(img_size2))
x = x.view(-1, channels, img_size, img_size) # * 0.5 + 0.5
grid = save_image(x,
img_path+'/sample_' + str(epoch) + "_" + str(name_suffix) + '.jpg',
nrow = nrow, normalize=True, scale_each=True)
img_grid = make_grid(x, normalize=True, scale_each=True, nrow=nrow)
self.writer.add_image(name, img_grid , self.iter_global)
return
def _merge(self, images, size, labels=[], strike=[]):
h, w = images.shape[1], images.shape[2]
resize_factor=1.0
h_ = int(h * resize_factor)
w_ = int(w * resize_factor)
img = np.zeros((h_ * size[0], w_ * size[1]))
for idx, image in enumerate(images):
i = int(idx % size[1])
j = int(idx / size[1])
image_ = skimage.transform.resize(image, output_shape=(w_, h_))
img[j * h_:j * h_ + h_, i * w_:i * w_ + w_] = image_
if len(labels) == len(images):
if labels[idx] == 1:
img[j * h_:j * h_ + 2, i * w_:i * w_ + w_-4] = np.ones((2, w_-4))
if len(strike) == len(images):
if strike[idx] == 1:
img[j * h_+h_//2:j * h_ + h_//2+1, i * w_:i * w_ + w_-4] = np.ones((1, w_-4))
return img
def save_images(self, images, img_size=(28,28), labels=[], strike=[], name='result.jpg'):
n_img_y = 16
n_img_x = 32
images = images.reshape(n_img_x * n_img_y, img_size[0], img_size[1])
imageio.imsave(name, self._merge(images, [n_img_y, n_img_x], labels, strike))
#=================================================================================
if __name__ == "__main__":
print("NOT AN EXECUTABLE!")
|
[
"tensorboardX.SummaryWriter",
"os.makedirs",
"sys_utils.tohms",
"numpy.floor",
"parameters.Params",
"numpy.zeros",
"os.system",
"numpy.ones",
"time.time",
"torchvision.utils.make_grid",
"os.path.isfile",
"skimage.transform.resize",
"glob.glob",
"os.path.join",
"numpy.sqrt"
] |
[((2599, 2622), 'glob.glob', 'glob.glob', (['cfg_filename'], {}), '(cfg_filename)\n', (2608, 2622), False, 'import glob\n'), ((2765, 2773), 'parameters.Params', 'Params', ([], {}), '()\n', (2771, 2773), False, 'from parameters import Params\n'), ((3307, 3336), 'os.path.join', 'os.path.join', (['experiment_name'], {}), '(experiment_name)\n', (3319, 3336), False, 'import os\n'), ((3345, 3386), 'os.makedirs', 'os.makedirs', (['self.exp_path'], {'exist_ok': '(True)'}), '(self.exp_path, exist_ok=True)\n', (3356, 3386), False, 'import os\n'), ((3606, 3640), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (3617, 3640), False, 'import os\n'), ((3663, 3684), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['logdir'], {}), '(logdir)\n', (3676, 3684), False, 'from tensorboardX import SummaryWriter\n'), ((3733, 3767), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (3744, 3767), False, 'import os\n'), ((3795, 3832), 'os.path.join', 'os.path.join', (['self.exp_path', '"""models"""'], {}), "(self.exp_path, 'models')\n", (3807, 3832), False, 'import os\n'), ((3842, 3885), 'os.makedirs', 'os.makedirs', (['self.model_path'], {'exist_ok': '(True)'}), '(self.model_path, exist_ok=True)\n', (3853, 3885), False, 'import os\n'), ((11366, 11413), 'sys_utils.tohms', 'tohms', (['(time_now - self.data[first_epoch_pos][0])'], {}), '(time_now - self.data[first_epoch_pos][0])\n', (11371, 11413), False, 'from sys_utils import tohms\n'), ((12150, 12190), 'os.path.join', 'os.path.join', (['self.experiment_name', 'name'], {}), '(self.experiment_name, name)\n', (12162, 12190), False, 'import os\n'), ((12199, 12235), 'os.makedirs', 'os.makedirs', (['img_path'], {'exist_ok': '(True)'}), '(img_path, exist_ok=True)\n', (12210, 12235), False, 'import os\n'), ((12690, 12746), 'torchvision.utils.make_grid', 'make_grid', (['x'], {'normalize': '(True)', 'scale_each': '(True)', 'nrow': 'nrow'}), '(x, normalize=True, scale_each=True, nrow=nrow)\n', (12699, 12746), False, 'from torchvision.utils import make_grid\n'), ((13049, 13087), 'numpy.zeros', 'np.zeros', (['(h_ * size[0], w_ * size[1])'], {}), '((h_ * size[0], w_ * size[1]))\n', (13057, 13087), True, 'import numpy as np\n'), ((3513, 3556), 'os.system', 'os.system', (["('cp *.py ' + self.exp_path + '/')"], {}), "('cp *.py ' + self.exp_path + '/')\n", (3522, 3556), False, 'import os\n'), ((4097, 4131), 'os.path.join', 'os.path.join', (['logdir', 'log_filename'], {}), '(logdir, log_filename)\n', (4109, 4131), False, 'import os\n'), ((4360, 4401), 'os.path.join', 'os.path.join', (['logdir', 'stdout_log_filename'], {}), '(logdir, stdout_log_filename)\n', (4372, 4401), False, 'import os\n'), ((4957, 4968), 'time.time', 'time.time', ([], {}), '()\n', (4966, 4968), False, 'import time\n'), ((5281, 5292), 'time.time', 'time.time', ([], {}), '()\n', (5290, 5292), False, 'import time\n'), ((6377, 6388), 'time.time', 'time.time', ([], {}), '()\n', (6386, 6388), False, 'import time\n'), ((10593, 10608), 'sys_utils.tohms', 'tohms', (['time_now'], {}), '(time_now)\n', (10598, 10608), False, 'from sys_utils import tohms\n'), ((11771, 11786), 'sys_utils.tohms', 'tohms', (['time_now'], {}), '(time_now)\n', (11776, 11786), False, 'from sys_utils import tohms\n'), ((13226, 13280), 'skimage.transform.resize', 'skimage.transform.resize', (['image'], {'output_shape': '(w_, h_)'}), '(image, output_shape=(w_, h_))\n', (13250, 13280), False, 'import skimage\n'), ((2698, 2726), 'os.path.isfile', 'os.path.isfile', (['cfg_files[0]'], {}), '(cfg_files[0])\n', (2712, 2726), False, 'import os\n'), ((4151, 4175), 'os.path.isfile', 'os.path.isfile', (['log_path'], {}), '(log_path)\n', (4165, 4175), False, 'import os\n'), ((4719, 4762), 'numpy.floor', 'np.floor', (['(samples_num / self.hps.batch_size)'], {}), '(samples_num / self.hps.batch_size)\n', (4727, 4762), True, 'import numpy as np\n'), ((12389, 12407), 'numpy.sqrt', 'np.sqrt', (['img_size2'], {}), '(img_size2)\n', (12396, 12407), True, 'import numpy as np\n'), ((13494, 13514), 'numpy.ones', 'np.ones', (['(2, w_ - 4)'], {}), '((2, w_ - 4))\n', (13501, 13514), True, 'import numpy as np\n'), ((13673, 13693), 'numpy.ones', 'np.ones', (['(1, w_ - 4)'], {}), '((1, w_ - 4))\n', (13680, 13693), True, 'import numpy as np\n')]
|
import scipy.spatial as sci_spatial
import skimage.draw as ski_draw
import shapely.geometry as shapely_geom
import numpy as np
import os, sys
def create_landscape(no_of_circles, radius):
# create the middle points of the ponds (the ponds should not overlap)
x,y = np.random.randint(0,400), np.random.randint(0,400)
list_of_points = [(x + 400, y + 400),
(x + 400, y),
(x + 800, y + 400),
(x + 400, y + 800),
(x, y + 400)]
for i in range(no_of_circles-1):
new_point_found = False
trials = 0
while not new_point_found and trials < 500:
x,y = np.random.randint(0,400), np.random.randint(0,400)
new_point = shapely_geom.Point((x + 400, y + 400))
trials += 1
if not new_point.buffer(radius * 2 + 50).intersects(shapely_geom.MultiPoint(list_of_points)):
new_point_found = True
list_of_points.append((x + 400, y + 400))
list_of_points.append((x + 400, y))
list_of_points.append((x + 800, y + 400))
list_of_points.append((x + 400, y + 800))
list_of_points.append((x, y + 400))
# landscape with ponds
ponds_img = np.full((1200 + 2*radius, 1200 + 2*radius), 55)
# draw the ponds
for point_i in list_of_points:
rr, cc = ski_draw.disk(point_i, radius)
ponds_img[rr + radius, cc + radius] = 105
ponds_img = ponds_img[400+radius : 800+radius, 400+radius : 800+radius]
# pond-id
ponds_id_img = np.full((1200 + 2*radius, 1200 + 2*radius), -999)
# draw the ponds
id_i = 0
for point_i, id_i in zip(list_of_points, np.repeat(np.arange(len(list_of_points)/5), 5)):
rr, cc = ski_draw.disk(point_i, radius)
ponds_id_img[rr + radius, cc + radius] = id_i
ponds_id_img = ponds_id_img[400+radius : 800+radius, 400+radius : 800+radius]
# create an raster image with the middle points marked
is_center_img = np.zeros_like(ponds_img)
boundary = shapely_geom.Polygon([(399, 399), (799, 399), (799, 799), (399, 799)])
selection = [shapely_geom.Point(point_i).intersects(boundary) for point_i in list_of_points]
x,y = np.array(list_of_points)[selection].T
x -= 400
y -= 400
is_center_img[x, y] = 1
return is_center_img, ponds_img, ponds_id_img
def make_corridors(is_center, ponds):
without_boundaries = np.zeros((400*3, 400*3))
without_boundaries[0:400, 400:800] = is_center
without_boundaries[400:800, 0:400] = is_center
without_boundaries[400:800, 400:800] = is_center
without_boundaries[800:1200, 400:800] = is_center
without_boundaries[400:800, 800:1200] = is_center
loc = np.where(without_boundaries == 1)
center_points = np.swapaxes(loc, 0, 1)
result = sci_spatial.distance.cdist(center_points, center_points)
new_img = np.full_like(without_boundaries, 55) # 55 --> green in netlogo
points_with_corridors = np.where(np.logical_and( result != 0, result < 170)) #mean(result[result != 0]) * 0.3
for i in np.arange(0, np.shape(points_with_corridors)[1]):
index_from = points_with_corridors[0][i]
index_to = points_with_corridors[1][i]
x = [loc[1][index_from], loc[1][index_to]]
y = [loc[0][index_from], loc[0][index_to]]
x_corr, y_corr = shapely_geom.LineString([(x[0], y[0]), (x[1], y[1])]).buffer(4.5).exterior.coords.xy
rr, cc = ski_draw.polygon(y_corr, x_corr, without_boundaries.shape)
new_img[rr, cc] = 35 # 35 --> brown in netlogo
final_img = new_img[400:800, 400:800]
final_img[np.where(ponds == 105)] = 105 # 105 --> blue in netlogo
return final_img
def make_buffers(corridor_img, is_center_img):
radius = 15
corridor_area = np.sum(corridor_img == 35)
no_of_ponds = np.sum(is_center_img)
buffer_radius = np.sqrt( ( (corridor_area / no_of_ponds) + np.pi *radius **2) / np.pi )
without_boundaries = np.zeros((400*3, 400*3))
without_boundaries[0:400, 400:800] = is_center_img
without_boundaries[400:800, 0:400] = is_center_img
without_boundaries[400:800, 400:800] = is_center_img
without_boundaries[800:1200, 400:800] = is_center_img
without_boundaries[400:800, 800:1200] = is_center_img
x,y = np.where(without_boundaries == 1)
new_img = np.full_like(without_boundaries, 55) # 55 --> green in netlogo
# make buffers
for x_i, y_i in zip(x,y):
rr, cc = ski_draw.disk((x_i, y_i), buffer_radius)
filter_1 = (rr >= 0) & (rr <= 1199)
filter_2 = (cc >= 0) & (cc <= 1199)
rr = rr[filter_1 & filter_2]
cc = cc[filter_1 & filter_2]
new_img[rr, cc] = 35
# make ponds
for x_i, y_i in zip(x,y):
rr, cc = ski_draw.disk((x_i, y_i), radius)
filter_1 = (rr >= 0) & (rr <= 1199)
filter_2 = (cc >= 0) & (cc <= 1199)
rr = rr[filter_1 & filter_2]
cc = cc[filter_1 & filter_2]
new_img[rr, cc] = 105
return new_img[400:800, 400:800]
if __name__ == "__main__":
#print('Scenario-Number:', sys.argv[1])
os.makedirs('gis_output/' + sys.argv[1])
os.chdir('gis_output/' + sys.argv[1])
is_center_of_pond, pond, pond_id = create_landscape(no_of_circles=int(sys.argv[2]), radius=int(sys.argv[3]))
corridors = make_corridors(is_center_of_pond, pond)
buffers = make_buffers(corridors, is_center_of_pond)
with open("../pcolor.asc") as myfile:
head = [next(myfile) for x in range(6)]
np.savetxt('corridors.asc',corridors, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
np.savetxt('buffers.asc',buffers, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
np.savetxt('center.asc',is_center_of_pond, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
np.savetxt('id.asc',pond_id, fmt='%i', newline='\n', header=''.join(head)[:-1], comments='')
|
[
"numpy.sum",
"numpy.shape",
"numpy.random.randint",
"os.chdir",
"numpy.full",
"numpy.full_like",
"numpy.zeros_like",
"shapely.geometry.Point",
"shapely.geometry.MultiPoint",
"shapely.geometry.Polygon",
"shapely.geometry.LineString",
"numpy.swapaxes",
"scipy.spatial.distance.cdist",
"skimage.draw.polygon",
"os.makedirs",
"numpy.logical_and",
"skimage.draw.disk",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.sqrt"
] |
[((1304, 1355), 'numpy.full', 'np.full', (['(1200 + 2 * radius, 1200 + 2 * radius)', '(55)'], {}), '((1200 + 2 * radius, 1200 + 2 * radius), 55)\n', (1311, 1355), True, 'import numpy as np\n'), ((1632, 1685), 'numpy.full', 'np.full', (['(1200 + 2 * radius, 1200 + 2 * radius)', '(-999)'], {}), '((1200 + 2 * radius, 1200 + 2 * radius), -999)\n', (1639, 1685), True, 'import numpy as np\n'), ((2093, 2117), 'numpy.zeros_like', 'np.zeros_like', (['ponds_img'], {}), '(ponds_img)\n', (2106, 2117), True, 'import numpy as np\n'), ((2138, 2208), 'shapely.geometry.Polygon', 'shapely_geom.Polygon', (['[(399, 399), (799, 399), (799, 799), (399, 799)]'], {}), '([(399, 399), (799, 399), (799, 799), (399, 799)])\n', (2158, 2208), True, 'import shapely.geometry as shapely_geom\n'), ((2528, 2556), 'numpy.zeros', 'np.zeros', (['(400 * 3, 400 * 3)'], {}), '((400 * 3, 400 * 3))\n', (2536, 2556), True, 'import numpy as np\n'), ((2827, 2860), 'numpy.where', 'np.where', (['(without_boundaries == 1)'], {}), '(without_boundaries == 1)\n', (2835, 2860), True, 'import numpy as np\n'), ((2881, 2903), 'numpy.swapaxes', 'np.swapaxes', (['loc', '(0)', '(1)'], {}), '(loc, 0, 1)\n', (2892, 2903), True, 'import numpy as np\n'), ((2917, 2973), 'scipy.spatial.distance.cdist', 'sci_spatial.distance.cdist', (['center_points', 'center_points'], {}), '(center_points, center_points)\n', (2943, 2973), True, 'import scipy.spatial as sci_spatial\n'), ((2989, 3025), 'numpy.full_like', 'np.full_like', (['without_boundaries', '(55)'], {}), '(without_boundaries, 55)\n', (3001, 3025), True, 'import numpy as np\n'), ((3903, 3929), 'numpy.sum', 'np.sum', (['(corridor_img == 35)'], {}), '(corridor_img == 35)\n', (3909, 3929), True, 'import numpy as np\n'), ((3948, 3969), 'numpy.sum', 'np.sum', (['is_center_img'], {}), '(is_center_img)\n', (3954, 3969), True, 'import numpy as np\n'), ((4000, 4068), 'numpy.sqrt', 'np.sqrt', (['((corridor_area / no_of_ponds + np.pi * radius ** 2) / np.pi)'], {}), '((corridor_area / no_of_ponds + np.pi * radius ** 2) / np.pi)\n', (4007, 4068), True, 'import numpy as np\n'), ((4100, 4128), 'numpy.zeros', 'np.zeros', (['(400 * 3, 400 * 3)'], {}), '((400 * 3, 400 * 3))\n', (4108, 4128), True, 'import numpy as np\n'), ((4419, 4452), 'numpy.where', 'np.where', (['(without_boundaries == 1)'], {}), '(without_boundaries == 1)\n', (4427, 4452), True, 'import numpy as np\n'), ((4467, 4503), 'numpy.full_like', 'np.full_like', (['without_boundaries', '(55)'], {}), '(without_boundaries, 55)\n', (4479, 4503), True, 'import numpy as np\n'), ((5251, 5291), 'os.makedirs', 'os.makedirs', (["('gis_output/' + sys.argv[1])"], {}), "('gis_output/' + sys.argv[1])\n", (5262, 5291), False, 'import os, sys\n'), ((5296, 5333), 'os.chdir', 'os.chdir', (["('gis_output/' + sys.argv[1])"], {}), "('gis_output/' + sys.argv[1])\n", (5304, 5333), False, 'import os, sys\n'), ((279, 304), 'numpy.random.randint', 'np.random.randint', (['(0)', '(400)'], {}), '(0, 400)\n', (296, 304), True, 'import numpy as np\n'), ((305, 330), 'numpy.random.randint', 'np.random.randint', (['(0)', '(400)'], {}), '(0, 400)\n', (322, 330), True, 'import numpy as np\n'), ((1430, 1460), 'skimage.draw.disk', 'ski_draw.disk', (['point_i', 'radius'], {}), '(point_i, radius)\n', (1443, 1460), True, 'import skimage.draw as ski_draw\n'), ((1833, 1863), 'skimage.draw.disk', 'ski_draw.disk', (['point_i', 'radius'], {}), '(point_i, radius)\n', (1846, 1863), True, 'import skimage.draw as ski_draw\n'), ((3090, 3131), 'numpy.logical_and', 'np.logical_and', (['(result != 0)', '(result < 170)'], {}), '(result != 0, result < 170)\n', (3104, 3131), True, 'import numpy as np\n'), ((3560, 3618), 'skimage.draw.polygon', 'ski_draw.polygon', (['y_corr', 'x_corr', 'without_boundaries.shape'], {}), '(y_corr, x_corr, without_boundaries.shape)\n', (3576, 3618), True, 'import skimage.draw as ski_draw\n'), ((3732, 3754), 'numpy.where', 'np.where', (['(ponds == 105)'], {}), '(ponds == 105)\n', (3740, 3754), True, 'import numpy as np\n'), ((4598, 4638), 'skimage.draw.disk', 'ski_draw.disk', (['(x_i, y_i)', 'buffer_radius'], {}), '((x_i, y_i), buffer_radius)\n', (4611, 4638), True, 'import skimage.draw as ski_draw\n'), ((4896, 4929), 'skimage.draw.disk', 'ski_draw.disk', (['(x_i, y_i)', 'radius'], {}), '((x_i, y_i), radius)\n', (4909, 4929), True, 'import skimage.draw as ski_draw\n'), ((767, 805), 'shapely.geometry.Point', 'shapely_geom.Point', (['(x + 400, y + 400)'], {}), '((x + 400, y + 400))\n', (785, 805), True, 'import shapely.geometry as shapely_geom\n'), ((2316, 2340), 'numpy.array', 'np.array', (['list_of_points'], {}), '(list_of_points)\n', (2324, 2340), True, 'import numpy as np\n'), ((3194, 3225), 'numpy.shape', 'np.shape', (['points_with_corridors'], {}), '(points_with_corridors)\n', (3202, 3225), True, 'import numpy as np\n'), ((692, 717), 'numpy.random.randint', 'np.random.randint', (['(0)', '(400)'], {}), '(0, 400)\n', (709, 717), True, 'import numpy as np\n'), ((718, 743), 'numpy.random.randint', 'np.random.randint', (['(0)', '(400)'], {}), '(0, 400)\n', (735, 743), True, 'import numpy as np\n'), ((2226, 2253), 'shapely.geometry.Point', 'shapely_geom.Point', (['point_i'], {}), '(point_i)\n', (2244, 2253), True, 'import shapely.geometry as shapely_geom\n'), ((895, 934), 'shapely.geometry.MultiPoint', 'shapely_geom.MultiPoint', (['list_of_points'], {}), '(list_of_points)\n', (918, 934), True, 'import shapely.geometry as shapely_geom\n'), ((3457, 3510), 'shapely.geometry.LineString', 'shapely_geom.LineString', (['[(x[0], y[0]), (x[1], y[1])]'], {}), '([(x[0], y[0]), (x[1], y[1])])\n', (3480, 3510), True, 'import shapely.geometry as shapely_geom\n')]
|
import math
from random import randint
from numpy import sqrt
def GCD(a, b):
if b == 0:
return a
return GCD(b, a % b)
#######################################
def ExtendedEuclid(a, b):
if b == 0:
return (1, 0)
(x, y) = ExtendedEuclid(b, a % b)
k = a // b
return (y, x - k * y)
def InvertModulo(a, n):
(b, x) = ExtendedEuclid(a, n)
if b < 0:
b = (b % n + n) % n # we don’t want −ve integers
return b
##################################
def PowMod(a, n, mod):
if n == 0:
return 1 % mod
elif n == 1:
return a % mod
else:
b = PowMod(a, n // 2, mod)
b = b * b % mod
if n % 2 == 0:
return b
else:
return b * a % mod
def ConvertToInt( message_str):
res = 0
for i in range(len(message_str)):
res = res * 256 + ord(message_str[i])
return res
#####################################
def ConvertToStr(n):
res = ""
while n > 0:
res += chr(n % 256)
n //= 256
return res[::-1]
#question1
def Encrypt(m, n, e):
m=ConvertToInt(m)
c=PowMod(m,e,n)
return c
#############################
def Decrypt(c, p, q, e):
euler=(p-1)*(q-1)
d=InvertModulo(e,euler)
n=p*q
m=PowMod(c,d,n)
m=ConvertToStr(m)
return m
chiper_message=Encrypt("attack", 1000000007*1000000009,23917)
print(Decrypt(chiper_message, 1000000007,1000000009,23917))
#question2
def DecipherSimple(c, n, e, potential_messages):
decipheredtext=''
for i in potential_messages:
if Encrypt(i,n,e)==c:
decipheredtext=i
return decipheredtext
modulo = 101
exponent = 12
ciphertext = Encrypt("attack", modulo, exponent)
print(DecipherSimple(ciphertext, modulo, exponent, ["attack", "don't attack", "wait"]))
# get a missing prime number
def get_prime_number(i,j,n):
for i in range(i,j):
if(n%i==0):
return i
return 0
##question3
def DecipherSmallPrime(c, n, e):
p=get_prime_number(2,1000000,n)
decipheredtext=Decrypt(c,p,n//p,e)
return decipheredtext
modulo = 101 *18298970732541109011012304219376080251334480295537316123696052970419466495220522723330315111017831737980079504337868198011077274303193766040393009648852841770668239779097280026631944319501437547002412556176186750790476901358334138818777298389724049250700606462316428106882097210008142941838672676714188593227684360287806974345181893018133710957167334490627178666071809992955566020058374505477745993383434501768887090900283569055646901291270870833498474402084748161755197005050874785474707550376333429671113753137201128897550014524209754619355308207537703754006699795711188492048286436285518105948050401762394690148387
exponent = 239
ciphertext = Encrypt("attack", modulo, exponent)
print(DecipherSmallPrime(ciphertext, modulo, exponent))
#question4
def DecipherSmallDiff(c, n, e):
p=get_prime_number(int(sqrt(n)-5000),int(sqrt(n)),n)
decipheredtext=Decrypt(c,p,n//p,e)
return decipheredtext
p = 1000000007
q = 1000000009
n = p * q
e = 239
ciphertext = Encrypt("attack", n, e)
message = DecipherSmallDiff(ciphertext, n, e)
print(message)
#question5
def DecipherCommonDivisor(c1, n1, e1, c2, n2, e2):
p=GCD(n1,n2)
first_decipheredtext= Decrypt(c1,p,n1//p,e1)
second_decipheredtext=Decrypt(c2,p,n2//p,e2)
return first_decipheredtext, second_decipheredtext
p = 101
q1 = 18298970732541109011012304219376080251334480295537316123696052970419466495220522723330315111017831737980079504337868198011077274303193766040393009648852841770668239779097280026631944319501437547002412556176186750790476901358334138818777298389724049250700606462316428106882097210008142941838672676714188593227684360287806974345181893018133710957167334490627178666071809992955566020058374505477745993383434501768887090900283569055646901291270870833498474402084748161755197005050874785474707550376333429671113753137201128897550014524209754619355308207537703754006699795711188492048286436285518105948050401762394690148387
q2 = 1000000007
first_modulo = p * q1
second_modulo = p * q2
first_exponent = 239
second_exponent = 17
first_ciphertext = Encrypt("attack", first_modulo, first_exponent)
second_ciphertext = Encrypt("wait", second_modulo, second_exponent)
print(DecipherCommonDivisor(first_ciphertext, first_modulo, first_exponent, second_ciphertext, second_modulo, second_exponent))
#question6
def DecipherHastad(c1, n1, c2, n2, e):
N1=(n1*n2)//n1
N2=(n1*n2)//n2
x1=InvertModulo(N1,n1)
x2=InvertModulo(N2,n2)
c_square=(c1*N1*x1+c2*N2*x2)%(n1*n2)
c=int(round(sqrt(float(c_square))))
broadcast_message=ConvertToStr(c)
# m1= int(round(sqrt(float(c1))))
#m2= int(round(sqrt(float(c2))))
#if(m1==m2):
# broadcast_message=ConvertToStr(m1)
return broadcast_message
p1 = 790383132652258876190399065097
q1 = 662503581792812531719955475509
p2 = 656917682542437675078478868539
q2 = 1263581691331332127259083713503
n1 = p1 * q1
n2 = p2 * q2
e = 2
ciphertext1 = Encrypt("attack", n1, e)
ciphertext2 = Encrypt("attack", n2, e)
message = DecipherHastad(ciphertext1, n1, ciphertext2, n2, e)
print(message)
|
[
"numpy.sqrt"
] |
[((2985, 2992), 'numpy.sqrt', 'sqrt', (['n'], {}), '(n)\n', (2989, 2992), False, 'from numpy import sqrt\n'), ((2967, 2974), 'numpy.sqrt', 'sqrt', (['n'], {}), '(n)\n', (2971, 2974), False, 'from numpy import sqrt\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import with_statement
import abc
import json
import logging
import numpy as np
import os
import keras
from keras.optimizers import Adadelta, SGD, RMSprop, Adam
from nlplingo.nn.constants import supported_pytorch_models
from nlplingo.nn.keras_models.common import keras_custom_objects
import time
from datetime import datetime
from shutil import copyfile
import random
import math
from nlplingo.nn.framework.sentence_re import SentenceRETrain
logger = logging.getLogger(__name__)
class ExtractionModel(abc.ABC):
verbosity = 0
def __init__(self, params, extractor_params, event_domain, embeddings, hyper_params, features):
"""
:type event_domain: nlplingo.tasks.event_domain.EventDomain
:type embeddings: dict[str : nlplingo.embeddings.word_embeddings.WordEmbedding]
:type model_name: str
:type features: object containing a 'feature_strings' attribute
"""
self.hyper_params = hyper_params
self.params = params
self.extractor_params = extractor_params
self.event_domain = event_domain
self.num_event_types = len(event_domain.event_types)
self.num_role_types = len(event_domain.event_roles)
self.num_ne_types = len(event_domain.entity_types)
self.num_ne_bio_types = None
self.num_entity_relation_types = len(event_domain.entity_relation_types)
self.num_eer_types = len(event_domain.eer_types)
self.word_vec_length = 1 # because we use word vector index
self.embeddings_vector_size = None
if 'embeddings' in extractor_params:
self.embeddings_vector_size = extractor_params['embeddings']['vector_size']
self.word_embeddings = None
if embeddings is not None and 'word_embeddings' in embeddings:
self.word_embeddings = embeddings['word_embeddings'].word_vec
""":type: numpy.ndarray"""
self.model_type = extractor_params['model_type']
self.optimizer = self._configure_optimizer(extractor_params)
self.model_file = extractor_params['model_file']
self.data_keys = []
self.num_output = None
self.model_dir = None
self.model = None
self.id2label = dict([(v, k) for k, v in self.event_domain.event_roles.items()])
self.trained_model = None
self.features = features
if 'engine' in extractor_params and (extractor_params['engine'] == 'pytorch'):
import torch
import random
torch.manual_seed(extractor_params['seed'])
np.random.seed(extractor_params['seed'])
random.seed(1234)
self.extractor_params['cuda'] = torch.cuda.is_available()
if extractor_params.get('cpu', False):
self.extractor_params['cuda'] = False
elif extractor_params.get('cuda', False):
torch.cuda.manual_seed(extractor_params['seed'])
self.layers = None
def _get_framework_class(self):
if self.model_type in supported_pytorch_models:
return SentenceRETrain
else:
raise Exception('model type ' + self.model_type + ' is not supported')
def fit_txt(self, train_path, dev_path, test_path):
# uses framework (with distinct initialization args)
framework_class = self._get_framework_class()
framework = framework_class(self.model, train_path, dev_path, test_path, self.extractor_params, self.hyper_params, self.features, self.event_domain)
framework.train_model()
def fit_model(self, train_data_list, train_label, test_data_list, test_label):
# uses framework
if self.extractor_params.get('engine') == 'pytorch':
framework_class = self._get_framework_class()
framework = framework_class(self.model, train_data_list, train_label, test_data_list, test_label, self.extractor_params, self.hyper_params, self.features, self.event_domain)
framework.train_model()
elif 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
raise IOError(
"Extractor engine in {'keras', None} but KerasExtractionModel "
"should have implemented its own fit method overriding "
"ExtractionModel.fit_model. This error should no longer exist "
"once KerasExtractionModel is part of framework_class system.")
else:
raise Exception('Only Keras or PyTorch engines are supported.')
def _configure_optimizer(self, params):
optimizer_params = params.get('optimizer', dict())
tunable_params = {}
if 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
if optimizer_params.get('name') == 'SGD':
tunable_params = {
'name': 'SGD',
'lr': optimizer_params.get('lr', 0.01),
'momentum': optimizer_params.get('momentum', 0.0),
'decay': optimizer_params.get('decay', 0.0),
'nesterov': optimizer_params.get('nesterov', False)
}
optimizer = SGD(
lr=tunable_params['lr'],
momentum=tunable_params['momentum'],
decay=tunable_params['decay'],
nesterov=tunable_params['nesterov']
)
elif optimizer_params.get('name') == 'RMSprop':
tunable_params = {
'name': 'RMSprop',
'lr': optimizer_params.get('lr', 0.001),
'rho': optimizer_params.get('rho', 0.9),
'epsilon': optimizer_params.get('epsilon', None),
'decay': optimizer_params.get('decay', 0.0)
}
optimizer = RMSprop(
lr=tunable_params['lr'],
rho=tunable_params['rho'],
epsilon=tunable_params['epsilon'],
decay=tunable_params['decay']
)
elif optimizer_params.get('name') == 'Adam':
tunable_params = {
'name': 'Adam',
'lr': optimizer_params.get('lr', 0.001)
}
optimizer = Adam(
lr=tunable_params['lr']
)
else:
tunable_params = {
'name': 'Adadelta',
'lr': optimizer_params.get('lr', 0.1),
'rho': optimizer_params.get('rho', 0.95),
'epsilon': optimizer_params.get('epsilon', 1e-6),
'decay': optimizer_params.get('decay', 0.0)
}
# Default Adadelta
optimizer = Adadelta(
lr=tunable_params['lr'],
rho=tunable_params['rho'],
epsilon=tunable_params['epsilon']
)
print('=== Optimization parameters ===')
print(json.dumps(tunable_params, sort_keys=True, indent=4))
print('=== Optimization parameters ===')
return optimizer
elif self.extractor_params['engine'] == 'pytorch':
# TODO: make optimizer more configurable
optimizer_params['name'] = optimizer_params.get('name', 'sgd')
optimizer_params['lr'] = optimizer_params.get('lr', 0.3)
optimizer_params['lr_decay'] = optimizer_params.get('lr_decay', 0.9)
optimizer_params['decay_epoch'] = optimizer_params.get('decay_epoch', 5)
return optimizer_params
elif self.extractor_params['engine'] == 'transformers':
pass
else:
raise Exception('Only Keras or PyTorch engines are supported.')
def create_model(self):
pass
def __getstate__(self):
u"""Defines what is to be pickled.
Keras models cannot be pickled. Should call save_keras_model() and load_keras_model() separately.
The sequence is :
obj.save_keras_model('kerasFilename')
pickle.dump(obj, fileHandle)
...
obj = pickle.load(fileHandle)
obj.load_keras_model()"""
# Create state without self.keras_model
state = dict(self.__dict__)
#state.pop(u'keras_model') # probably not needed anymore, now that we've made keras_model global
return state
def __setstate__(self, state):
# Reload state for unpickling
self.__dict__ = state
def load_keras_model(self, filename=None):
self.model = keras.models.load_model(filename, keras_custom_objects)
def save_keras_model(self, filename):
self.model.save(filename)
print(self.model.summary())
def predict(self, test_data_list):
if 'engine' not in self.extractor_params or (('engine' in self.extractor_params) and (self.extractor_params['engine'] == 'keras')):
return self.model.predict(test_data_list)
elif self.extractor_params['engine'] == 'pytorch':
from data.loader import DataLoader as BatchDataLoader
print("Evaluating on test set...")
predictions = []
test_batch = BatchDataLoader(test_data_list, self.features.feature_strings, None, self.hyper_params.dict['batch_size'], self.hyper_params.dict, self.event_domain.event_roles, evaluation=True, test_mode=True)
for i, batch in enumerate(test_batch):
preds, _ = self.trained_model.predict(batch, compute_loss=False, compute_logits=True)
predictions.append(preds)
return np.vstack(predictions)
else:
raise Exception('Only Keras or PyTorch engines are supported.')
|
[
"keras.models.load_model",
"keras.optimizers.Adadelta",
"numpy.random.seed",
"keras.optimizers.SGD",
"torch.manual_seed",
"torch.cuda.manual_seed",
"data.loader.DataLoader",
"json.dumps",
"keras.optimizers.Adam",
"random.seed",
"torch.cuda.is_available",
"numpy.vstack",
"keras.optimizers.RMSprop",
"logging.getLogger"
] |
[((544, 571), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (561, 571), False, 'import logging\n'), ((8820, 8875), 'keras.models.load_model', 'keras.models.load_model', (['filename', 'keras_custom_objects'], {}), '(filename, keras_custom_objects)\n', (8843, 8875), False, 'import keras\n'), ((2619, 2662), 'torch.manual_seed', 'torch.manual_seed', (["extractor_params['seed']"], {}), "(extractor_params['seed'])\n", (2636, 2662), False, 'import torch\n'), ((2675, 2715), 'numpy.random.seed', 'np.random.seed', (["extractor_params['seed']"], {}), "(extractor_params['seed'])\n", (2689, 2715), True, 'import numpy as np\n'), ((2728, 2745), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (2739, 2745), False, 'import random\n'), ((2790, 2815), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2813, 2815), False, 'import torch\n'), ((5388, 5526), 'keras.optimizers.SGD', 'SGD', ([], {'lr': "tunable_params['lr']", 'momentum': "tunable_params['momentum']", 'decay': "tunable_params['decay']", 'nesterov': "tunable_params['nesterov']"}), "(lr=tunable_params['lr'], momentum=tunable_params['momentum'], decay=\n tunable_params['decay'], nesterov=tunable_params['nesterov'])\n", (5391, 5526), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n'), ((7256, 7308), 'json.dumps', 'json.dumps', (['tunable_params'], {'sort_keys': '(True)', 'indent': '(4)'}), '(tunable_params, sort_keys=True, indent=4)\n', (7266, 7308), False, 'import json\n'), ((9449, 9653), 'data.loader.DataLoader', 'BatchDataLoader', (['test_data_list', 'self.features.feature_strings', 'None', "self.hyper_params.dict['batch_size']", 'self.hyper_params.dict', 'self.event_domain.event_roles'], {'evaluation': '(True)', 'test_mode': '(True)'}), "(test_data_list, self.features.feature_strings, None, self.\n hyper_params.dict['batch_size'], self.hyper_params.dict, self.\n event_domain.event_roles, evaluation=True, test_mode=True)\n", (9464, 9653), True, 'from data.loader import DataLoader as BatchDataLoader\n'), ((9858, 9880), 'numpy.vstack', 'np.vstack', (['predictions'], {}), '(predictions)\n', (9867, 9880), True, 'import numpy as np\n'), ((2992, 3040), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (["extractor_params['seed']"], {}), "(extractor_params['seed'])\n", (3014, 3040), False, 'import torch\n'), ((6056, 6186), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': "tunable_params['lr']", 'rho': "tunable_params['rho']", 'epsilon': "tunable_params['epsilon']", 'decay': "tunable_params['decay']"}), "(lr=tunable_params['lr'], rho=tunable_params['rho'], epsilon=\n tunable_params['epsilon'], decay=tunable_params['decay'])\n", (6063, 6186), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n'), ((6514, 6543), 'keras.optimizers.Adam', 'Adam', ([], {'lr': "tunable_params['lr']"}), "(lr=tunable_params['lr'])\n", (6518, 6543), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n'), ((7011, 7111), 'keras.optimizers.Adadelta', 'Adadelta', ([], {'lr': "tunable_params['lr']", 'rho': "tunable_params['rho']", 'epsilon': "tunable_params['epsilon']"}), "(lr=tunable_params['lr'], rho=tunable_params['rho'], epsilon=\n tunable_params['epsilon'])\n", (7019, 7111), False, 'from keras.optimizers import Adadelta, SGD, RMSprop, Adam\n')]
|
from pydub import AudioSegment
import parselmouth
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def draw_pitch(pitch):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values == 0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
def draw_spectrogram(spectrogram, dynamic_range=70):
X, Y = spectrogram.x_grid(), spectrogram.y_grid()
sg_db = 10 * np.log10(spectrogram.values)
plt.pcolormesh(X, Y, sg_db, vmin=sg_db.max() -
dynamic_range, cmap='afmhot')
plt.ylim([spectrogram.ymin, spectrogram.ymax])
plt.xlabel("time [s]")
plt.ylabel("frequency [Hz]")
def draw_intensity(intensity):
plt.plot(intensity.xs(), intensity.values.T, linewidth=3, color='w')
plt.plot(intensity.xs(), intensity.values.T, linewidth=1)
plt.grid(False)
plt.ylim(0)
plt.ylabel("intensity [dB]")
if __name__ == '__main__':
sns.set() # Use seaborn's default style to make attractive graphs
# Plot nice figures using Python's "standard" matplotlib library
snd = parselmouth.Sound(
'output.mp3')
# plt.figure()
# plt.plot(snd.xs(), snd.values.T)
# plt.xlim([snd.xmin, snd.xmax])
# plt.xlabel("time [s]")
# plt.ylabel("amplitude")
# # or plt.savefig("sound.png"), or plt.savefig("sound.pdf")
# plt.savefig("sound.png")
pitch = snd.to_pitch()
# If desired, pre-emphasize the sound fragment before calculating the spectrogram
pre_emphasized_snd = snd.copy()
pre_emphasized_snd.pre_emphasize()
spectrogram = pre_emphasized_snd.to_spectrogram(
window_length=0.03, maximum_frequency=8000)
plt.figure()
draw_spectrogram(spectrogram)
plt.twinx()
draw_pitch(pitch)
plt.xlim([snd.xmin, snd.xmax])
plt.savefig("pitch.png")
# sound = AudioSegment.from_mp3(
# '/Users/dimashulhin/Desktop/kyky_original.mp3')
# # get raw audio data as a bytestring
# raw_data = sound.raw_data
# # get the frame rate
# sample_rate = sound.frame_rate
# # get amount of bytes contained in one sample
# sample_size = sound.sample_width
# # get channels
# channels = sound.channels
# beginning = sound[13000:17000]
# print(beginning.raw_data)
|
[
"matplotlib.pyplot.xlim",
"parselmouth.Sound",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.twinx",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"seaborn.set",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((471, 486), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (479, 486), True, 'import matplotlib.pyplot as plt\n'), ((491, 517), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'pitch.ceiling'], {}), '(0, pitch.ceiling)\n', (499, 517), True, 'import matplotlib.pyplot as plt\n'), ((522, 562), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fundamental frequency [Hz]"""'], {}), "('fundamental frequency [Hz]')\n", (532, 562), True, 'import matplotlib.pyplot as plt\n'), ((822, 868), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[spectrogram.ymin, spectrogram.ymax]'], {}), '([spectrogram.ymin, spectrogram.ymax])\n', (830, 868), True, 'import matplotlib.pyplot as plt\n'), ((873, 895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (883, 895), True, 'import matplotlib.pyplot as plt\n'), ((900, 928), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency [Hz]"""'], {}), "('frequency [Hz]')\n", (910, 928), True, 'import matplotlib.pyplot as plt\n'), ((1101, 1116), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1109, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1132), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)'], {}), '(0)\n', (1129, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""intensity [dB]"""'], {}), "('intensity [dB]')\n", (1147, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1208), 'seaborn.set', 'sns.set', ([], {}), '()\n', (1206, 1208), True, 'import seaborn as sns\n'), ((1346, 1377), 'parselmouth.Sound', 'parselmouth.Sound', (['"""output.mp3"""'], {}), "('output.mp3')\n", (1363, 1377), False, 'import parselmouth\n'), ((1934, 1946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1944, 1946), True, 'import matplotlib.pyplot as plt\n'), ((1985, 1996), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (1994, 1996), True, 'import matplotlib.pyplot as plt\n'), ((2023, 2053), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[snd.xmin, snd.xmax]'], {}), '([snd.xmin, snd.xmax])\n', (2031, 2053), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2082), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pitch.png"""'], {}), "('pitch.png')\n", (2069, 2082), True, 'import matplotlib.pyplot as plt\n'), ((689, 717), 'numpy.log10', 'np.log10', (['spectrogram.values'], {}), '(spectrogram.values)\n', (697, 717), True, 'import numpy as np\n')]
|
import sys
sys.path.append('/Users/kolbt/Desktop/compiled/hoomd-blue/build')
import hoomd
from hoomd import md
from hoomd import dem
from hoomd import deprecated
import numpy as np
# Simulation box mesh into grid delimit by particle diameter
# list of mesh indices random number generator to select index
# remove index from list once particle is placed
tsteps = 5000000
dump_freq = 10000
part_perc_a = 50
part_frac_a = float(part_perc_a) / float(100)
pe_a = 80
pe_b = 300
phi = 0.6
part_num = 24102
dumps = tsteps/dump_freq
diameter = 1
# find the box parameters
area_part = np.pi * ((float(diameter)/float(2))**2) * part_num
box_area = area_part / phi
side = int(np.sqrt(box_area))
side = 140
#while side % 10 != 0: # this is sub par... fix it
#side += 1 # or just pick part_num so that this is okay
# initialize system randomly
hoomd.context.initialize()
part_num = 13950
part_a = part_num * part_frac_a # get the total number of A particles
part_a = int(part_a)
part_b = part_num - part_a # get the total number of B particles
mid = int(part_a) # starting point for assigning B particles
snap = hoomd.data.make_snapshot(N = part_num,
box = hoomd.data.boxdim(L=side,
dimensions=2),
particle_types = ['A', 'B'])
part = np.zeros((3))
start_y = -69.5 # box is -70:70 for x and y dimensions
sep_row = 0.90 # distance between particles along x axis
sep_col = 0.78 # distance to increment rows (maintains center to center distance)
ith = 0 # particle counter
m = 0 # incrementer for y value
row = 2 # start on an even row (this determines first x placement in row)
# Places particles in lower left quadrant (-70, -70) - (0, 0)
# while loop that increments y value
while 1:
part[0] = start_y + m
n = 0
# while that increments x value (place row at constant height, y value)
while 1:
# ensures rows are offset from one another
if row % 2 == 0:
start_x = -69.50
else:
start_x = -69.05
part[1] = start_x + n
snap.particles.position[ith] = part
snap.particles.typeid[ith] = 0
ith += 1
n += sep_row
# placing into lower left quadrant
if start_x + n > 0:
break
row += 1
m += sep_col
# ensure particles are limited to lower left quadrant
if -69.5 + m > 0:
break
# Places particles in upper right quadrant (0,0) - (70, 70)
m = 0
row = 2
start_y = 0.5
while 1:
part[0] = start_y + m
n = 0
while 1:
if row % 2 == 0:
start_x = 0.5
else:
start_x = 0.95
part[1] = 0.5 + n
snap.particles.position[ith] = part
snap.particles.typeid[ith] = 1
ith += 1
n += sep_row
if start_x + n > 70:
break
row += 1
m += sep_col
if start_y + m > 70:
break
print(ith)
print(ith)
# now let's get the quaternion and moment of inertia
thetas = np.random.uniform(0, 2*np.pi, (part_num,)) # generate random angles
quats = np.array([np.cos(thetas/2),
np.zeros_like(thetas),
np.zeros_like(thetas),
np.sin(thetas/2)]).T # generate quaternions from the angles
snap.particles.orientation[:] = quats
inertia = float(1)/float(16)
snap.particles.diameter[:] = 1 # set particle diameters
snap.particles.moment_inertia[:] = (inertia, inertia, 0) # set moment of inertia
snap.particles.types = ['A', 'B'] # or 0, 1 in typeid vernacular
####################################
### NOW SET FORCES / INTEGRATORS ###
####################################
# initialize the system
system = hoomd.init.read_snapshot(snap)
all = hoomd.group.all()
gA = hoomd.group.type(type = 'A', update=True)
gB = hoomd.group.type(type = 'B', update=True)
N = len(all)
part_num = N
Na = len(gA)
Nb = len(gB)
print(part_num)
nl = hoomd.md.nlist.cell()
lj = hoomd.md.pair.lj(r_cut=2**(1/6), nlist=nl)
lj.set_params(mode='shift')
lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('A', 'B', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0)
angle = np.random.rand(part_num) * 2 * np.pi # random orientation of each particle
if part_perc_a != 0 and part_perc_a != 100:
activity_a = []
for i in range(0,mid):
x = (np.cos(angle[i])) * pe_a
y = (np.sin(angle[i])) * pe_a
z = 0
tuple = (x, y, z)
activity_a.append(tuple)
activity_b = []
for i in range(mid,part_num):
x = (np.cos(angle[i])) * pe_b
y = (np.sin(angle[i])) * pe_b
z = 0
tuple = (x, y, z)
activity_b.append(tuple)
hoomd.md.force.active(group=gA,
seed=123,
f_lst=activity_a,
rotation_diff=3.0,
orientation_link=False)
hoomd.md.force.active(group=gB,
seed=375,
f_lst=activity_b,
rotation_diff=3.0,
orientation_link=False)
else:
if part_perc_a == 0:
activity_b = []
for i in range(0,part_num):
x = (np.cos(angle[i])) * pe_b
y = (np.sin(angle[i])) * pe_b
z = 0
tuple = (x, y, z)
activity_b.append(tuple)
hoomd.md.force.active(group=gB,
seed=375,
f_lst=activity_b,
rotation_diff=3.0,
orientation_link=False)
else:
activity_a = []
for i in range(0,part_num):
x = (np.cos(angle[i])) * pe_a
y = (np.sin(angle[i])) * pe_a
z = 0
tuple = (x, y, z)
activity_a.append(tuple)
hoomd.md.force.active(group=gA,
seed=123,
f_lst=activity_a,
rotation_diff=3.0,
orientation_link=False)
# minimize for no overlaps
fire=hoomd.md.integrate.mode_minimize_fire(group=all,
dt=0.00001,
ftol=1e-2,
Etol=1e-7)
hoomd.run(1000)
# brownian integration
hoomd.md.integrate.mode_standard(dt=0.000002)
bd = hoomd.md.integrate.brownian(group=all, kT=0.5, seed=123)
bd.set_gamma('A', gamma=1.0)
bd.set_gamma_r('A', gamma_r=1.0)
#write dump
hoomd.dump.gsd("hcp_test.gsd", period=1000, group=all, overwrite=True, static=[])
#run
hoomd.run(tsteps)
|
[
"hoomd.md.nlist.cell",
"hoomd.group.type",
"numpy.sin",
"hoomd.dump.gsd",
"sys.path.append",
"numpy.zeros_like",
"hoomd.run",
"hoomd.data.boxdim",
"hoomd.init.read_snapshot",
"hoomd.group.all",
"hoomd.md.pair.lj",
"hoomd.context.initialize",
"hoomd.md.integrate.mode_minimize_fire",
"numpy.cos",
"hoomd.md.integrate.mode_standard",
"numpy.random.uniform",
"hoomd.md.force.active",
"numpy.zeros",
"hoomd.md.integrate.brownian",
"numpy.random.rand",
"numpy.sqrt"
] |
[((11, 76), 'sys.path.append', 'sys.path.append', (['"""/Users/kolbt/Desktop/compiled/hoomd-blue/build"""'], {}), "('/Users/kolbt/Desktop/compiled/hoomd-blue/build')\n", (26, 76), False, 'import sys\n'), ((880, 906), 'hoomd.context.initialize', 'hoomd.context.initialize', ([], {}), '()\n', (904, 906), False, 'import hoomd\n'), ((1434, 1445), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1442, 1445), True, 'import numpy as np\n'), ((3217, 3261), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(part_num,)'], {}), '(0, 2 * np.pi, (part_num,))\n', (3234, 3261), True, 'import numpy as np\n'), ((3984, 4014), 'hoomd.init.read_snapshot', 'hoomd.init.read_snapshot', (['snap'], {}), '(snap)\n', (4008, 4014), False, 'import hoomd\n'), ((4022, 4039), 'hoomd.group.all', 'hoomd.group.all', ([], {}), '()\n', (4037, 4039), False, 'import hoomd\n'), ((4045, 4084), 'hoomd.group.type', 'hoomd.group.type', ([], {'type': '"""A"""', 'update': '(True)'}), "(type='A', update=True)\n", (4061, 4084), False, 'import hoomd\n'), ((4092, 4131), 'hoomd.group.type', 'hoomd.group.type', ([], {'type': '"""B"""', 'update': '(True)'}), "(type='B', update=True)\n", (4108, 4131), False, 'import hoomd\n'), ((4209, 4230), 'hoomd.md.nlist.cell', 'hoomd.md.nlist.cell', ([], {}), '()\n', (4228, 4230), False, 'import hoomd\n'), ((4236, 4282), 'hoomd.md.pair.lj', 'hoomd.md.pair.lj', ([], {'r_cut': '(2 ** (1 / 6))', 'nlist': 'nl'}), '(r_cut=2 ** (1 / 6), nlist=nl)\n', (4252, 4282), False, 'import hoomd\n'), ((6409, 6495), 'hoomd.md.integrate.mode_minimize_fire', 'hoomd.md.integrate.mode_minimize_fire', ([], {'group': 'all', 'dt': '(1e-05)', 'ftol': '(0.01)', 'Etol': '(1e-07)'}), '(group=all, dt=1e-05, ftol=0.01, Etol=\n 1e-07)\n', (6446, 6495), False, 'import hoomd\n'), ((6621, 6636), 'hoomd.run', 'hoomd.run', (['(1000)'], {}), '(1000)\n', (6630, 6636), False, 'import hoomd\n'), ((6661, 6703), 'hoomd.md.integrate.mode_standard', 'hoomd.md.integrate.mode_standard', ([], {'dt': '(2e-06)'}), '(dt=2e-06)\n', (6693, 6703), False, 'import hoomd\n'), ((6712, 6768), 'hoomd.md.integrate.brownian', 'hoomd.md.integrate.brownian', ([], {'group': 'all', 'kT': '(0.5)', 'seed': '(123)'}), '(group=all, kT=0.5, seed=123)\n', (6739, 6768), False, 'import hoomd\n'), ((6844, 6929), 'hoomd.dump.gsd', 'hoomd.dump.gsd', (['"""hcp_test.gsd"""'], {'period': '(1000)', 'group': 'all', 'overwrite': '(True)', 'static': '[]'}), "('hcp_test.gsd', period=1000, group=all, overwrite=True,\n static=[])\n", (6858, 6929), False, 'import hoomd\n'), ((6932, 6949), 'hoomd.run', 'hoomd.run', (['tsteps'], {}), '(tsteps)\n', (6941, 6949), False, 'import hoomd\n'), ((668, 685), 'numpy.sqrt', 'np.sqrt', (['box_area'], {}), '(box_area)\n', (675, 685), True, 'import numpy as np\n'), ((4997, 5104), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gA', 'seed': '(123)', 'f_lst': 'activity_a', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gA, seed=123, f_lst=activity_a, rotation_diff=\n 3.0, orientation_link=False)\n', (5018, 5104), False, 'import hoomd\n'), ((5208, 5315), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gB', 'seed': '(375)', 'f_lst': 'activity_b', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gB, seed=375, f_lst=activity_b, rotation_diff=\n 3.0, orientation_link=False)\n', (5229, 5315), False, 'import hoomd\n'), ((1268, 1307), 'hoomd.data.boxdim', 'hoomd.data.boxdim', ([], {'L': 'side', 'dimensions': '(2)'}), '(L=side, dimensions=2)\n', (1285, 1307), False, 'import hoomd\n'), ((4472, 4496), 'numpy.random.rand', 'np.random.rand', (['part_num'], {}), '(part_num)\n', (4486, 4496), True, 'import numpy as np\n'), ((5683, 5790), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gB', 'seed': '(375)', 'f_lst': 'activity_b', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gB, seed=375, f_lst=activity_b, rotation_diff=\n 3.0, orientation_link=False)\n', (5704, 5790), False, 'import hoomd\n'), ((6153, 6260), 'hoomd.md.force.active', 'hoomd.md.force.active', ([], {'group': 'gA', 'seed': '(123)', 'f_lst': 'activity_a', 'rotation_diff': '(3.0)', 'orientation_link': '(False)'}), '(group=gA, seed=123, f_lst=activity_a, rotation_diff=\n 3.0, orientation_link=False)\n', (6174, 6260), False, 'import hoomd\n'), ((3311, 3329), 'numpy.cos', 'np.cos', (['(thetas / 2)'], {}), '(thetas / 2)\n', (3317, 3329), True, 'import numpy as np\n'), ((3347, 3368), 'numpy.zeros_like', 'np.zeros_like', (['thetas'], {}), '(thetas)\n', (3360, 3368), True, 'import numpy as np\n'), ((3388, 3409), 'numpy.zeros_like', 'np.zeros_like', (['thetas'], {}), '(thetas)\n', (3401, 3409), True, 'import numpy as np\n'), ((3429, 3447), 'numpy.sin', 'np.sin', (['(thetas / 2)'], {}), '(thetas / 2)\n', (3435, 3447), True, 'import numpy as np\n'), ((4654, 4670), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (4660, 4670), True, 'import numpy as np\n'), ((4692, 4708), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (4698, 4708), True, 'import numpy as np\n'), ((4857, 4873), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (4863, 4873), True, 'import numpy as np\n'), ((4895, 4911), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (4901, 4911), True, 'import numpy as np\n'), ((5523, 5539), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (5529, 5539), True, 'import numpy as np\n'), ((5565, 5581), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (5571, 5581), True, 'import numpy as np\n'), ((5993, 6009), 'numpy.cos', 'np.cos', (['angle[i]'], {}), '(angle[i])\n', (5999, 6009), True, 'import numpy as np\n'), ((6035, 6051), 'numpy.sin', 'np.sin', (['angle[i]'], {}), '(angle[i])\n', (6041, 6051), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
plt.rcParams['toolbar'] = 'None'
import numpy as np # importando numpy
def genera_montecarlo(N=100000):
plt.figure(figsize=(6,6))
x, y = np.random.uniform(-1, 1, size=(2, N))
interior = (x**2 + y**2) <= 1
pi = interior.sum() * 4 / N
error = abs((pi - np.pi) / pi) * 100
exterior = np.invert(interior)
plt.plot(x[interior], y[interior], 'b.')
plt.plot(x[exterior], y[exterior], 'r.')
plt.plot(0, 0, label='$\hat \pi$ = {:4.4f} \nerror = {:4.4f}%'.format(pi,error), alpha=0, color='g')
plt.axis('square')
plt.legend(frameon=True, framealpha=0.9, fontsize=16)
plt.show()
genera_montecarlo()
|
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.invert",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
] |
[((143, 169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (153, 169), True, 'import matplotlib.pyplot as plt\n'), ((185, 222), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(2, N)'}), '(-1, 1, size=(2, N))\n', (202, 222), True, 'import numpy as np\n'), ((345, 364), 'numpy.invert', 'np.invert', (['interior'], {}), '(interior)\n', (354, 364), True, 'import numpy as np\n'), ((369, 409), 'matplotlib.pyplot.plot', 'plt.plot', (['x[interior]', 'y[interior]', '"""b."""'], {}), "(x[interior], y[interior], 'b.')\n", (377, 409), True, 'import matplotlib.pyplot as plt\n'), ((414, 454), 'matplotlib.pyplot.plot', 'plt.plot', (['x[exterior]', 'y[exterior]', '"""r."""'], {}), "(x[exterior], y[exterior], 'r.')\n", (422, 454), True, 'import matplotlib.pyplot as plt\n'), ((564, 582), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (572, 582), True, 'import matplotlib.pyplot as plt\n'), ((587, 640), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'framealpha': '(0.9)', 'fontsize': '(16)'}), '(frameon=True, framealpha=0.9, fontsize=16)\n', (597, 640), True, 'import matplotlib.pyplot as plt\n'), ((645, 655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (653, 655), True, 'import matplotlib.pyplot as plt\n')]
|
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.keras import backend
from tensorflow.contrib.keras import layers as keras_layers
class DLGMLayer(keras_layers.Layer):
"""
This layer is inspired by the paper "Stochastic Backpropagation and
Approximate Inference in Deep Generative Models"
incoming (Lasagne Layer): preceding layer in DLGM
num_units (int): number of output units in this layer
srng (theano RandomState): random number generator
rec_nets (dictionary of lasagne NNs): Neural networks that
paramaterize the recognition model
J (theano symbolic matrix): Input to rec model
k (float): regularization term on generative weights
"""
def __init__(self, incoming, num_units, rec_nets, k,
output_layer=False, extra_noise=0.01,
param_init=tf.random_normal_initializer(0, 0.01),
nonlinearity=tf.nn.relu,
**kwargs):
super(DLGMLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.num_units = num_units
self.output_layer = output_layer
self.extra_noise = extra_noise
# Initialize generative/decoding Parameters
self.W = self.add_variable(name='W', shape=(num_inputs, num_units),
initializer=param_init)
self.b = self.add_variable(name='b', shape=(num_units,),
initializer=param_init)
self.unc_G = self.add_variable(name='unc_G',
shape=(num_units, num_units),
initializer=param_init)
self.G = (tf.diag(tf.nn.softplus(tf.diag_part(self.unc_G))) +
self.unc_G - tf.matrix_band_part(self.unc_G, 0, -1))
self.nonlinearity = nonlinearity
# regularization term
self.k = k
# Load recognition/encoding Parameters
self.mu_net = rec_nets['mu_net']
self.u_net = rec_nets['u_net']
self.unc_d_net = rec_nets['unc_d_net']
def build(self, incoming, postJ):
rec_params = (self.mu_net.variables +
self.u_net.variables +
self.unc_d_net.variables)
i = 0
for param in rec_params:
self.add_variable(name="param"+str(i), shape=None,
initializer=param)
i += 1
super(DLGMLayer, self).build(incoming)
def calculate_xi(self, postJ):
"""
Calculate xi based on sampled J from posterior
"""
# get output of rec model
self.batch_mu = self.mu_net(postJ)
self.batch_u = self.u_net(postJ)
self.batch_unc_d = self.unc_d_net(postJ)
# add extra dim to batch_u, so it gets treated as column vectors when
# iterated over
self.batch_u = tf.expand_dims(self.batch_u, -1)
def get_cov(acc, inputs):
# convert output of rec model to rank-1 covariance matrix
# use softplus to get positive constrained d, minimum of -15
# since softplus will turn low numbers into 0, which become NaNs
# when inverted
u, unc_d = inputs
d = tf.nn.softplus(tf.maximum(unc_d, -15.0))
D_inv = tf.diag(1.0 / d)
eta = 1.0 / (tf.matmul(tf.matmul(tf.transpose(u), D_inv), u) + 1.0)
C = D_inv - eta*tf.matmul(tf.matmul(tf.matmul(D_inv, u),
tf.transpose(u)), D_inv)
Tr_C = tf.trace(C)
ld_C = tf.log(eta) - tf.reduce_sum(tf.log(d)) # eq 20 in DLGM
# coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))
# simplified coefficient below is more stable as u -> 0
# original coefficient from paper is above
coeff = eta / (1.0 + tf.sqrt(eta))
R = (tf.sqrt(D_inv) - coeff * tf.matmul
(tf.matmul(tf.matmul(D_inv, u), tf.transpose(u)),
tf.sqrt(D_inv)))
return Tr_C, ld_C, R
(self.batch_Tr_C, self.batch_ld_C, self.batch_R) = tf.scan(
get_cov, [self.batch_u, self.batch_unc_d],
initializer=(0.0, tf.zeros([1, 1]), tf.diag(self.batch_unc_d[0])))
self.batch_xi = (self.batch_mu +
(tf.squeeze(tf.matmul(self.batch_R,
(tf.expand_dims(tf.random_normal(
[tf.shape(self.batch_R)[0],
self.num_units]), -1))))))
def call(self, inputs, add_noise=False, use_rec_model=False):
activation = tf.matmul(self.nonlinearity(inputs), self.W) + self.b
if use_rec_model:
# use sample from rec model
xi = self.batch_xi
if add_noise: # additional noise
xi += (self.extra_noise * tf.random_normal
(tf.shape(self.batch_xi)))
else:
# pure random input
xi = tf.random_normal((tf.shape(inputs)[0], self.num_units))
# we want the mean when training, so don't add noise to
# output of last layer when training.
if not self.output_layer:
activation += tf.matmul(xi, self.G)
elif not add_noise:
activation += tf.matmul(xi, self.G)
return activation
def get_ELBO(self, length):
"""
Get ELBO for this layer
length (theano symbolic int): length of current batch
"""
# KL divergence between posterior and N(0,1) prior
KL_div = (0.5 * (tf.reduce_sum(tf.sqrt(tf.reduce_sum(self.batch_mu**2,
axis=1))) + tf.reduce_sum(self.batch_Tr_C) -
tf.reduce_sum(self.batch_ld_C) - length))
weight_reg = ((0.5 / self.k) *
tf.sqrt(tf.reduce_sum(self.W**2)) *
tf.sqrt(tf.reduce_sum(self.G**2)))
return -(weight_reg + KL_div)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.num_units)
class PKBiasLayer(keras_layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, params,
param_init=tf.random_normal_initializer(stddev=0.01),
num_biases=4, **kwargs):
super(PKBiasLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.mode = tf.zeros(num_biases)
self.k = np.cast[backend.floatx()](params['k'])
self.m = self.add_variable(name='m', shape=[num_biases, num_inputs],
initializer=param_init)
self.log_s = self.add_variable(name='log_s',
shape=[num_biases, num_inputs],
initializer=param_init)
# standard deviation will always be positive but optimization over
# log_s can be unconstrained
self.s = tf.exp(self.log_s)
self.draw_biases()
self.draw_on_every_output = True
def build(self, incoming):
if self.draw_on_every_output:
self.draw_biases()
super(PKBiasLayer, self).build(incoming)
def draw_biases(self):
self.biases = self.m + tf.random_normal(shape=self.s.shape,
seed=1234) * self.s
def call(self, inputs):
act_biases = tf.matmul(tf.reshape(tf.cast(
self.mode, backend.floatx()), [1, -1]), self.biases)
return inputs + act_biases
def set_mode(self, mode):
self.mode = mode
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
ELBO = (tf.reduce_sum(-tf.abs(self.biases) / self.k -
tf.log(tf.constant(2.0) * self.k)))
ELBO += tf.reduce_sum(tf.log(self.s))
return ELBO / nbatches
class PKRowBiasLayer(keras_layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input.
This layer has sparsity at the row level, instead of the individual
sparsity of the PKBiasLayer.
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, params,
param_init=tf.random_normal_initializer(stddev=0.01),
num_biases=4, **kwargs):
super(PKRowBiasLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.mode = tf.zeros(num_biases)
# parameters on prior
self.a = np.cast[backend.floatx()](params['a']) # shape
self.b = np.cast[backend.floatx()](params['b']) # rate
# learnable posterior parameters
# normal dist over biases
self.mu = self.add_variable(name='mu', shape=[num_biases, num_inputs],
initializer=param_init)
self.unc_sig = self.add_variable(name='unc_sig',
shape=[num_biases, num_inputs],
initializer=param_init)
# gamma over rows
self.alpha = tf.Variable(initial_value=self.a * np.ones(
(num_biases, 1)), name='alpha', dtype=tf.float32)
self.beta = tf.Variable(initial_value=self.b * np.ones(
(num_biases, 1)), name='beta', dtype=tf.float32)
# update for alpha
self.alpha += (num_inputs / 2.0)
# standard deviation will always be positive but optimization over
# unc_sig can be unconstrained
self.sigma = tf.nn.softplus(self.unc_sig)
self.draw_biases()
self.draw_on_every_output = True
def build(self, incoming):
if self.draw_on_every_output:
self.draw_biases()
super(PKRowBiasLayer, self).build(incoming)
def draw_biases(self):
self.gamma = self.mu + tf.random_normal(
shape=self.sigma.shape, seed=1234) * self.sigma
def call(self, input):
act_biases = tf.matmul(tf.reshape(tf.cast(
self.mode, backend.floatx()), [1, -1]), self.gamma)
return input + act_biases
def set_mode(self, mode):
self.mode = mode
def coord_update(self):
self.beta = self.b + 0.5 * tf.reduce_sum(self.mu**2 + self.sigma**2,
axis=1,
keep_dims=True)
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
self.coord_update()
# Log Density
ELBO = (tf.reduce_sum(-0.5 * (self.mu**2 + self.sigma**2) *
(self.alpha / self.beta) + 0.5 * (tf.digamma(self.alpha) -
tf.log(self.beta)) - 0.5 * tf.log(2 * np.pi)))
ELBO += (tf.reduce_sum((self.a - 1) * (tf.digamma(self.alpha) -
tf.log(self.beta)) - self.b * (self.alpha / self.beta) +
self.a * tf.log(self.b) - tf.lgamma(self.a)))
# entropy
ELBO += (tf.reduce_sum(0.5 * tf.log(2 * np.pi) + 0.5 +
tf.log(self.sigma)))
ELBO += (tf.reduce_sum(self.alpha - tf.log(self.beta) +
tf.lgamma(self.alpha) + (1 - self.alpha) *
tf.digamma(self.alpha)))
return ELBO / nbatches
|
[
"tensorflow.trace",
"tensorflow.matrix_band_part",
"tensorflow.reduce_sum",
"tensorflow.maximum",
"numpy.ones",
"tensorflow.diag_part",
"tensorflow.matmul",
"tensorflow.sqrt",
"tensorflow.contrib.keras.backend.floatx",
"tensorflow.abs",
"tensorflow.diag",
"tensorflow.exp",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.random_normal_initializer",
"tensorflow.random_normal",
"tensorflow.log",
"tensorflow.digamma",
"tensorflow.expand_dims",
"tensorflow.lgamma",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.nn.softplus"
] |
[((1930, 1967), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (1958, 1967), True, 'import tensorflow as tf\n'), ((3970, 4002), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.batch_u', '(-1)'], {}), '(self.batch_u, -1)\n', (3984, 4002), True, 'import tensorflow as tf\n'), ((7644, 7685), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (7672, 7685), True, 'import tensorflow as tf\n'), ((7848, 7868), 'tensorflow.zeros', 'tf.zeros', (['num_biases'], {}), '(num_biases)\n', (7856, 7868), True, 'import tensorflow as tf\n'), ((8378, 8396), 'tensorflow.exp', 'tf.exp', (['self.log_s'], {}), '(self.log_s)\n', (8384, 8396), True, 'import tensorflow as tf\n'), ((10013, 10054), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (10041, 10054), True, 'import tensorflow as tf\n'), ((10220, 10240), 'tensorflow.zeros', 'tf.zeros', (['num_biases'], {}), '(num_biases)\n', (10228, 10240), True, 'import tensorflow as tf\n'), ((11295, 11323), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['self.unc_sig'], {}), '(self.unc_sig)\n', (11309, 11323), True, 'import tensorflow as tf\n'), ((2849, 2887), 'tensorflow.matrix_band_part', 'tf.matrix_band_part', (['self.unc_G', '(0)', '(-1)'], {}), '(self.unc_G, 0, -1)\n', (2868, 2887), True, 'import tensorflow as tf\n'), ((4394, 4410), 'tensorflow.diag', 'tf.diag', (['(1.0 / d)'], {}), '(1.0 / d)\n', (4401, 4410), True, 'import tensorflow as tf\n'), ((4642, 4653), 'tensorflow.trace', 'tf.trace', (['C'], {}), '(C)\n', (4650, 4653), True, 'import tensorflow as tf\n'), ((6314, 6335), 'tensorflow.matmul', 'tf.matmul', (['xi', 'self.G'], {}), '(xi, self.G)\n', (6323, 6335), True, 'import tensorflow as tf\n'), ((9357, 9371), 'tensorflow.log', 'tf.log', (['self.s'], {}), '(self.s)\n', (9363, 9371), True, 'import tensorflow as tf\n'), ((4348, 4372), 'tensorflow.maximum', 'tf.maximum', (['unc_d', '(-15.0)'], {}), '(unc_d, -15.0)\n', (4358, 4372), True, 'import tensorflow as tf\n'), ((4673, 4684), 'tensorflow.log', 'tf.log', (['eta'], {}), '(eta)\n', (4679, 4684), True, 'import tensorflow as tf\n'), ((4984, 4998), 'tensorflow.sqrt', 'tf.sqrt', (['D_inv'], {}), '(D_inv)\n', (4991, 4998), True, 'import tensorflow as tf\n'), ((6390, 6411), 'tensorflow.matmul', 'tf.matmul', (['xi', 'self.G'], {}), '(xi, self.G)\n', (6399, 6411), True, 'import tensorflow as tf\n'), ((6981, 7007), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.G ** 2)'], {}), '(self.G ** 2)\n', (6994, 7007), True, 'import tensorflow as tf\n'), ((7894, 7910), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (7908, 7910), False, 'from tensorflow.contrib.keras import backend\n'), ((8675, 8722), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'self.s.shape', 'seed': '(1234)'}), '(shape=self.s.shape, seed=1234)\n', (8691, 8722), True, 'import tensorflow as tf\n'), ((10296, 10312), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (10310, 10312), False, 'from tensorflow.contrib.keras import backend\n'), ((10361, 10377), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (10375, 10377), False, 'from tensorflow.contrib.keras import backend\n'), ((11605, 11656), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'self.sigma.shape', 'seed': '(1234)'}), '(shape=self.sigma.shape, seed=1234)\n', (11621, 11656), True, 'import tensorflow as tf\n'), ((11980, 12049), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.mu ** 2 + self.sigma ** 2)'], {'axis': '(1)', 'keep_dims': '(True)'}), '(self.mu ** 2 + self.sigma ** 2, axis=1, keep_dims=True)\n', (11993, 12049), True, 'import tensorflow as tf\n'), ((12772, 12789), 'tensorflow.lgamma', 'tf.lgamma', (['self.a'], {}), '(self.a)\n', (12781, 12789), True, 'import tensorflow as tf\n'), ((12890, 12908), 'tensorflow.log', 'tf.log', (['self.sigma'], {}), '(self.sigma)\n', (12896, 12908), True, 'import tensorflow as tf\n'), ((4701, 4710), 'tensorflow.log', 'tf.log', (['d'], {}), '(d)\n', (4707, 4710), True, 'import tensorflow as tf\n'), ((4953, 4965), 'tensorflow.sqrt', 'tf.sqrt', (['eta'], {}), '(eta)\n', (4960, 4965), True, 'import tensorflow as tf\n'), ((5308, 5324), 'tensorflow.zeros', 'tf.zeros', (['[1, 1]'], {}), '([1, 1])\n', (5316, 5324), True, 'import tensorflow as tf\n'), ((5326, 5354), 'tensorflow.diag', 'tf.diag', (['self.batch_unc_d[0]'], {}), '(self.batch_unc_d[0])\n', (5333, 5354), True, 'import tensorflow as tf\n'), ((6812, 6842), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.batch_ld_C'], {}), '(self.batch_ld_C)\n', (6825, 6842), True, 'import tensorflow as tf\n'), ((6923, 6949), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.W ** 2)'], {}), '(self.W ** 2)\n', (6936, 6949), True, 'import tensorflow as tf\n'), ((8883, 8899), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (8897, 8899), False, 'from tensorflow.contrib.keras import backend\n'), ((10894, 10918), 'numpy.ones', 'np.ones', (['(num_biases, 1)'], {}), '((num_biases, 1))\n', (10901, 10918), True, 'import numpy as np\n'), ((11020, 11044), 'numpy.ones', 'np.ones', (['(num_biases, 1)'], {}), '((num_biases, 1))\n', (11027, 11044), True, 'import numpy as np\n'), ((11785, 11801), 'tensorflow.contrib.keras.backend.floatx', 'backend.floatx', ([], {}), '()\n', (11799, 11801), False, 'from tensorflow.contrib.keras import backend\n'), ((12563, 12580), 'tensorflow.log', 'tf.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (12569, 12580), True, 'import tensorflow as tf\n'), ((12992, 13013), 'tensorflow.lgamma', 'tf.lgamma', (['self.alpha'], {}), '(self.alpha)\n', (13001, 13013), True, 'import tensorflow as tf\n'), ((13052, 13074), 'tensorflow.digamma', 'tf.digamma', (['self.alpha'], {}), '(self.alpha)\n', (13062, 13074), True, 'import tensorflow as tf\n'), ((2789, 2813), 'tensorflow.diag_part', 'tf.diag_part', (['self.unc_G'], {}), '(self.unc_G)\n', (2801, 2813), True, 'import tensorflow as tf\n'), ((5104, 5118), 'tensorflow.sqrt', 'tf.sqrt', (['D_inv'], {}), '(D_inv)\n', (5111, 5118), True, 'import tensorflow as tf\n'), ((5999, 6022), 'tensorflow.shape', 'tf.shape', (['self.batch_xi'], {}), '(self.batch_xi)\n', (6007, 6022), True, 'import tensorflow as tf\n'), ((6106, 6122), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (6114, 6122), True, 'import tensorflow as tf\n'), ((6761, 6791), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.batch_Tr_C'], {}), '(self.batch_Tr_C)\n', (6774, 6791), True, 'import tensorflow as tf\n'), ((9230, 9249), 'tensorflow.abs', 'tf.abs', (['self.biases'], {}), '(self.biases)\n', (9236, 9249), True, 'import tensorflow as tf\n'), ((9298, 9314), 'tensorflow.constant', 'tf.constant', (['(2.0)'], {}), '(2.0)\n', (9309, 9314), True, 'import tensorflow as tf\n'), ((12755, 12769), 'tensorflow.log', 'tf.log', (['self.b'], {}), '(self.b)\n', (12761, 12769), True, 'import tensorflow as tf\n'), ((12847, 12864), 'tensorflow.log', 'tf.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (12853, 12864), True, 'import tensorflow as tf\n'), ((12955, 12972), 'tensorflow.log', 'tf.log', (['self.beta'], {}), '(self.beta)\n', (12961, 12972), True, 'import tensorflow as tf\n'), ((4456, 4471), 'tensorflow.transpose', 'tf.transpose', (['u'], {}), '(u)\n', (4468, 4471), True, 'import tensorflow as tf\n'), ((4539, 4558), 'tensorflow.matmul', 'tf.matmul', (['D_inv', 'u'], {}), '(D_inv, u)\n', (4548, 4558), True, 'import tensorflow as tf\n'), ((4598, 4613), 'tensorflow.transpose', 'tf.transpose', (['u'], {}), '(u)\n', (4610, 4613), True, 'import tensorflow as tf\n'), ((5047, 5066), 'tensorflow.matmul', 'tf.matmul', (['D_inv', 'u'], {}), '(D_inv, u)\n', (5056, 5066), True, 'import tensorflow as tf\n'), ((5068, 5083), 'tensorflow.transpose', 'tf.transpose', (['u'], {}), '(u)\n', (5080, 5083), True, 'import tensorflow as tf\n'), ((12495, 12517), 'tensorflow.digamma', 'tf.digamma', (['self.alpha'], {}), '(self.alpha)\n', (12505, 12517), True, 'import tensorflow as tf\n'), ((12536, 12553), 'tensorflow.log', 'tf.log', (['self.beta'], {}), '(self.beta)\n', (12542, 12553), True, 'import tensorflow as tf\n'), ((6699, 6740), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.batch_mu ** 2)'], {'axis': '(1)'}), '(self.batch_mu ** 2, axis=1)\n', (6712, 6740), True, 'import tensorflow as tf\n'), ((12630, 12652), 'tensorflow.digamma', 'tf.digamma', (['self.alpha'], {}), '(self.alpha)\n', (12640, 12652), True, 'import tensorflow as tf\n'), ((12672, 12689), 'tensorflow.log', 'tf.log', (['self.beta'], {}), '(self.beta)\n', (12678, 12689), True, 'import tensorflow as tf\n'), ((5548, 5570), 'tensorflow.shape', 'tf.shape', (['self.batch_R'], {}), '(self.batch_R)\n', (5556, 5570), True, 'import tensorflow as tf\n')]
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: <NAME> (<EMAIL>)
#
from __future__ import absolute_import, division, unicode_literals
import math
import sys
from math import sqrt
from mo_dots import Data, Null, coalesce
from mo_future import text
from mo_logs import Log
from mo_math import OR, almost_equal
from mo_math.vendor import strangman
DEBUG = True
DEBUG_STRANGMAN = False
EPSILON = 0.000000001
ABS_EPSILON = sys.float_info.min * 2 # *2 FOR SAFETY
if DEBUG_STRANGMAN:
try:
import numpy as np
from scipy import stats
import scipy
except Exception as e:
DEBUG_STRANGMAN = False
def chisquare(f_obs, f_exp):
try:
py_result = strangman.stats.chisquare(f_obs, f_exp)
except Exception as e:
Log.error("problem with call", e)
if DEBUG_STRANGMAN:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
sp_result = scipy.stats.chisquare(np.array(f_obs), f_exp=np.array(f_exp))
if not assertAlmostEqualValue(
sp_result[0], py_result[0], digits=9
) and assertAlmostEqualValue(sp_result[1], py_result[1], delta=1e-8):
Log.error("problem with stats lib")
return py_result
def Stats2ZeroMoment(stats):
# MODIFIED FROM http://statsmodels.sourceforge.net/devel/_modules/statsmodels/stats/moment_helpers.html
# ADDED count
mc0, mc1, mc2, skew, kurt = (
stats.count,
coalesce(stats.mean, 0),
coalesce(stats.variance, 0),
coalesce(stats.skew, 0),
coalesce(stats.kurtosis, 0),
)
mz0 = mc0
mz1 = mc1 * mc0
mz2 = (mc2 + mc1 * mc1) * mc0
mc3 = coalesce(skew, 0) * (mc2 ** 1.5) # 3rd central moment
mz3 = (mc3 + 3 * mc1 * mc2 + mc1 ** 3) * mc0 # 3rd non-central moment
mc4 = (coalesce(kurt, 0) + 3.0) * (mc2 ** 2.0) # 4th central moment
mz4 = (mc4 + 4 * mc1 * mc3 + 6 * mc1 * mc1 * mc2 + mc1 ** 4) * mc0
m = ZeroMoment(mz0, mz1, mz2, mz3, mz4)
if DEBUG:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
globals()["DEBUG"] = False
try:
v = ZeroMoment2Stats(m)
assertAlmostEqualValue(v.count, stats.count, places=10)
assertAlmostEqualValue(v.mean, stats.mean, places=10)
assertAlmostEqualValue(v.variance, stats.variance, places=10)
assertAlmostEqualValue(v.skew, stats.skew, places=10)
assertAlmostEqualValue(v.kurtosis, stats.kurtosis, places=10)
except Exception as e:
v = ZeroMoment2Stats(m)
Log.error("programmer error")
globals()["DEBUG"] = True
return m
def ZeroMoment2Stats(z_moment):
Z = z_moment.S
N = Z[0]
if N == 0:
return Stats()
mean = Z[1] / N
Z2 = Z[2] / N
Z3 = Z[3] / N
Z4 = Z[4] / N
if N == 1:
variance = None
skew = None
kurtosis = None
else:
if almost_equal(Z2, mean * mean, digits=9):
variance = 0
skew = None
kurtosis = None
else:
variance = Z2 - mean * mean
mc3 = Z3 - (3 * mean * variance + mean ** 3) # 3rd central moment
mc4 = Z4 - (4 * mean * mc3 + 6 * mean * mean * variance + mean ** 4)
skew = mc3 / (variance ** 1.5)
kurtosis = (mc4 / (variance ** 2.0)) - 3.0
stats = Stats(count=N, mean=mean, variance=variance, skew=skew, kurtosis=kurtosis)
if DEBUG:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
globals()["DEBUG"] = False
v = Null
try:
v = Stats2ZeroMoment(stats)
for i in range(5):
assertAlmostEqualValue(v.S[i], Z[i], places=7)
except Exception as e:
Log.error(
"Conversion failed. Programmer error:\nfrom={{from|indent}},\nresult stats={{stats|indent}},\nexpected param={{expected|indent}}",
{"from": Z},
stats=stats,
expected=v.S,
cause=e,
)
globals()["DEBUG"] = True
return stats
class Stats(Data):
def __init__(self, **kwargs):
Data.__init__(self)
self.count = 0
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
if "samples" in kwargs:
s = ZeroMoment2Stats(ZeroMoment.new_instance(kwargs["samples"]))
self.count = s.count
self.mean = s.mean
self.variance = s.variance
self.skew = s.skew
self.kurtosis = s.kurtosis
return
if "count" not in kwargs:
self.count = 0
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
elif "mean" not in kwargs:
self.count = kwargs["count"]
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
elif "variance" not in kwargs and "std" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = 0
self.skew = None
self.kurtosis = None
elif "skew" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = None
self.kurtosis = None
elif "kurtosis" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = kwargs["skew"]
self.kurtosis = None
else:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = kwargs["skew"]
self.kurtosis = kwargs["kurtosis"]
@property
def std(self):
return sqrt(self.variance)
class ZeroMoment(object):
"""
ZERO-CENTERED MOMENTS
"""
def __init__(self, *args):
self.S = tuple(args)
def __add__(self, other):
if isinstance(other, ZeroMoment):
return ZeroMoment(*map(add, self.S, other.S))
elif hasattr(other, "__iter__"):
return ZeroMoment(*map(add, self.S, ZeroMoment.new_instance(other)))
elif other == None:
return self
else:
return ZeroMoment(
*map(
add,
self.S,
(
1,
other,
pow(other, 2),
pow(other, 3),
pow(other, 4),
pow(other, 2),
),
)
)
def __sub__(self, other):
if isinstance(other, ZeroMoment):
return ZeroMoment(*map(sub, self.S, other.S))
elif hasattr(other, "__iter__"):
return ZeroMoment(*map(sub, self.S, ZeroMoment.new_instance(other)))
elif other == None:
return self
else:
return ZeroMoment(
*map(
sub, self.S, (1, other, pow(other, 2), pow(other, 3), pow(other, 4))
)
)
@property
def tuple(self):
# RETURN AS ORDERED TUPLE
return self.S
@property
def dict(self):
# RETURN HASH OF SUMS
return {"s" + text(i): m for i, m in enumerate(self.S)}
@staticmethod
def new_instance(values=None):
if values == None:
return ZeroMoment()
vals = [v for v in values if v != None]
return ZeroMoment(
len(vals),
sum(vals),
sum([pow(n, 2) for n in vals]),
sum([pow(n, 3) for n in vals]),
sum([pow(n, 4) for n in vals]),
)
@property
def stats(self, *args, **kwargs):
return ZeroMoment2Stats(self, *args, **kwargs)
def add(a, b):
return coalesce(a, 0) + coalesce(b, 0)
def sub(a, b):
return coalesce(a, 0) - coalesce(b, 0)
def ZeroMoment2dict(z):
# RETURN HASH OF SUMS
return {"s" + text(i): m for i, m in enumerate(z.S)}
def median(values, simple=True, mean_weight=0.0):
"""
RETURN MEDIAN VALUE
IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE
MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION
IN THE MEDIAN RANGE
mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS
CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE)
"""
if OR(v == None for v in values):
Log.error("median is not ready to handle None")
try:
if not values:
return Null
l = len(values)
_sorted = sorted(values)
middle = int(l / 2)
_median = float(_sorted[middle])
if len(_sorted) == 1:
return _median
if simple:
if l % 2 == 0:
return (_sorted[middle - 1] + _median) / 2
return _median
# FIND RANGE OF THE median
start_index = middle - 1
while start_index > 0 and _sorted[start_index] == _median:
start_index -= 1
start_index += 1
stop_index = middle + 1
while stop_index < l and _sorted[stop_index] == _median:
stop_index += 1
num_middle = stop_index - start_index
if l % 2 == 0:
if num_middle == 1:
return (_sorted[middle - 1] + _median) / 2
else:
return (_median - 0.5) + (middle - start_index) / num_middle
else:
if num_middle == 1:
return (1 - mean_weight) * _median + mean_weight * (
_sorted[middle - 1] + _sorted[middle + 1]
) / 2
else:
return (_median - 0.5) + (middle + 0.5 - start_index) / num_middle
except Exception as e:
Log.error("problem with median of {{values}}", values=values, cause=e)
def percentile(values, percent):
"""
PERCENTILE WITH INTERPOLATION
RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES
snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
"""
N = sorted(values)
if not N:
return None
k = (len(N) - 1) * percent
f = int(math.floor(k))
c = int(math.ceil(k))
if f == c:
return N[int(k)]
d0 = N[f] * (c - k)
d1 = N[c] * (k - f)
return d0 + d1
zero = Stats()
|
[
"mo_dots.Data.__init__",
"math.sqrt",
"math.ceil",
"mo_math.vendor.strangman.stats.chisquare",
"math.floor",
"mo_testing.fuzzytestcase.assertAlmostEqualValue",
"mo_math.almost_equal",
"mo_dots.coalesce",
"numpy.array",
"mo_future.text",
"mo_logs.Log.error",
"mo_math.OR"
] |
[((9098, 9127), 'mo_math.OR', 'OR', (['(v == None for v in values)'], {}), '(v == None for v in values)\n', (9100, 9127), False, 'from mo_math import OR, almost_equal\n'), ((877, 916), 'mo_math.vendor.strangman.stats.chisquare', 'strangman.stats.chisquare', (['f_obs', 'f_exp'], {}), '(f_obs, f_exp)\n', (902, 916), False, 'from mo_math.vendor import strangman\n'), ((1618, 1641), 'mo_dots.coalesce', 'coalesce', (['stats.mean', '(0)'], {}), '(stats.mean, 0)\n', (1626, 1641), False, 'from mo_dots import Data, Null, coalesce\n'), ((1651, 1678), 'mo_dots.coalesce', 'coalesce', (['stats.variance', '(0)'], {}), '(stats.variance, 0)\n', (1659, 1678), False, 'from mo_dots import Data, Null, coalesce\n'), ((1688, 1711), 'mo_dots.coalesce', 'coalesce', (['stats.skew', '(0)'], {}), '(stats.skew, 0)\n', (1696, 1711), False, 'from mo_dots import Data, Null, coalesce\n'), ((1721, 1748), 'mo_dots.coalesce', 'coalesce', (['stats.kurtosis', '(0)'], {}), '(stats.kurtosis, 0)\n', (1729, 1748), False, 'from mo_dots import Data, Null, coalesce\n'), ((1835, 1852), 'mo_dots.coalesce', 'coalesce', (['skew', '(0)'], {}), '(skew, 0)\n', (1843, 1852), False, 'from mo_dots import Data, Null, coalesce\n'), ((3109, 3148), 'mo_math.almost_equal', 'almost_equal', (['Z2', '(mean * mean)'], {'digits': '(9)'}), '(Z2, mean * mean, digits=9)\n', (3121, 3148), False, 'from mo_math import OR, almost_equal\n'), ((4354, 4373), 'mo_dots.Data.__init__', 'Data.__init__', (['self'], {}), '(self)\n', (4367, 4373), False, 'from mo_dots import Data, Null, coalesce\n'), ((6407, 6426), 'math.sqrt', 'sqrt', (['self.variance'], {}), '(self.variance)\n', (6411, 6426), False, 'from math import sqrt\n'), ((8501, 8515), 'mo_dots.coalesce', 'coalesce', (['a', '(0)'], {}), '(a, 0)\n', (8509, 8515), False, 'from mo_dots import Data, Null, coalesce\n'), ((8518, 8532), 'mo_dots.coalesce', 'coalesce', (['b', '(0)'], {}), '(b, 0)\n', (8526, 8532), False, 'from mo_dots import Data, Null, coalesce\n'), ((8561, 8575), 'mo_dots.coalesce', 'coalesce', (['a', '(0)'], {}), '(a, 0)\n', (8569, 8575), False, 'from mo_dots import Data, Null, coalesce\n'), ((8578, 8592), 'mo_dots.coalesce', 'coalesce', (['b', '(0)'], {}), '(b, 0)\n', (8586, 8592), False, 'from mo_dots import Data, Null, coalesce\n'), ((9137, 9184), 'mo_logs.Log.error', 'Log.error', (['"""median is not ready to handle None"""'], {}), "('median is not ready to handle None')\n", (9146, 9184), False, 'from mo_logs import Log\n'), ((10879, 10892), 'math.floor', 'math.floor', (['k'], {}), '(k)\n', (10889, 10892), False, 'import math\n'), ((10906, 10918), 'math.ceil', 'math.ceil', (['k'], {}), '(k)\n', (10915, 10918), False, 'import math\n'), ((952, 985), 'mo_logs.Log.error', 'Log.error', (['"""problem with call"""', 'e'], {}), "('problem with call', e)\n", (961, 985), False, 'from mo_logs import Log\n'), ((1122, 1137), 'numpy.array', 'np.array', (['f_obs'], {}), '(f_obs)\n', (1130, 1137), True, 'import numpy as np\n'), ((1264, 1327), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['sp_result[1]', 'py_result[1]'], {'delta': '(1e-08)'}), '(sp_result[1], py_result[1], delta=1e-08)\n', (1286, 1327), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((1340, 1375), 'mo_logs.Log.error', 'Log.error', (['"""problem with stats lib"""'], {}), "('problem with stats lib')\n", (1349, 1375), False, 'from mo_logs import Log\n'), ((1976, 1993), 'mo_dots.coalesce', 'coalesce', (['kurt', '(0)'], {}), '(kurt, 0)\n', (1984, 1993), False, 'from mo_dots import Data, Null, coalesce\n'), ((2333, 2388), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.count', 'stats.count'], {'places': '(10)'}), '(v.count, stats.count, places=10)\n', (2355, 2388), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2401, 2454), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.mean', 'stats.mean'], {'places': '(10)'}), '(v.mean, stats.mean, places=10)\n', (2423, 2454), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2467, 2528), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.variance', 'stats.variance'], {'places': '(10)'}), '(v.variance, stats.variance, places=10)\n', (2489, 2528), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2541, 2594), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.skew', 'stats.skew'], {'places': '(10)'}), '(v.skew, stats.skew, places=10)\n', (2563, 2594), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2607, 2668), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.kurtosis', 'stats.kurtosis'], {'places': '(10)'}), '(v.kurtosis, stats.kurtosis, places=10)\n', (2629, 2668), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((8663, 8670), 'mo_future.text', 'text', (['i'], {}), '(i)\n', (8667, 8670), False, 'from mo_future import text\n'), ((10468, 10538), 'mo_logs.Log.error', 'Log.error', (['"""problem with median of {{values}}"""'], {'values': 'values', 'cause': 'e'}), "('problem with median of {{values}}', values=values, cause=e)\n", (10477, 10538), False, 'from mo_logs import Log\n'), ((1145, 1160), 'numpy.array', 'np.array', (['f_exp'], {}), '(f_exp)\n', (1153, 1160), True, 'import numpy as np\n'), ((1177, 1237), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['sp_result[0]', 'py_result[0]'], {'digits': '(9)'}), '(sp_result[0], py_result[0], digits=9)\n', (1199, 1237), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((2748, 2777), 'mo_logs.Log.error', 'Log.error', (['"""programmer error"""'], {}), "('programmer error')\n", (2757, 2777), False, 'from mo_logs import Log\n'), ((3863, 3909), 'mo_testing.fuzzytestcase.assertAlmostEqualValue', 'assertAlmostEqualValue', (['v.S[i]', 'Z[i]'], {'places': '(7)'}), '(v.S[i], Z[i], places=7)\n', (3885, 3909), False, 'from mo_testing.fuzzytestcase import assertAlmostEqualValue\n'), ((3953, 4154), 'mo_logs.Log.error', 'Log.error', (['"""Conversion failed. Programmer error:\nfrom={{from|indent}},\nresult stats={{stats|indent}},\nexpected param={{expected|indent}}"""', "{'from': Z}"], {'stats': 'stats', 'expected': 'v.S', 'cause': 'e'}), '(\n """Conversion failed. Programmer error:\nfrom={{from|indent}},\nresult stats={{stats|indent}},\nexpected param={{expected|indent}}"""\n , {\'from\': Z}, stats=stats, expected=v.S, cause=e)\n', (3962, 4154), False, 'from mo_logs import Log\n'), ((7946, 7953), 'mo_future.text', 'text', (['i'], {}), '(i)\n', (7950, 7953), False, 'from mo_future import text\n')]
|
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
import scipy.sparse as sps
from irspack.definitions import DenseScoreArray, UserIndexArray
from irspack.utils._util_cpp import retrieve_recommend_from_score
from irspack.utils.threading import get_n_threads
if TYPE_CHECKING:
# We should move this module out of "utils".
from irspack.recommenders import BaseRecommender
class IDMappedRecommender:
"""A utility class that helps mapping user/item ids to index, retrieving recommendation score,
and making a recommendation.
Args:
recommender:
The backend base recommender which transforms user/item ids.
user_ids:
user_ids which correspods to the rows of ``recommender.X_train_all``.
item_ids:
item_ids which correspods to the columns of ``recommender.X_train_all``.
Raises:
ValueError: When recommender and user_ids/item_ids are inconsistent.
ValueError: When there is a duplicate in user_ids.
ValueError: When there is a duplicate in item_ids.
"""
def __init__(
self, recommender: "BaseRecommender", user_ids: List[Any], item_ids: List[Any]
):
if (recommender.n_users != len(user_ids)) or (
recommender.n_items != len(item_ids)
):
raise ValueError(
"The recommender and user/item ids have inconsistent lengths."
)
self.recommender = recommender
self.user_ids = user_ids
self.item_ids = item_ids
self.user_id_to_index = {user_id: i for i, user_id in enumerate(user_ids)}
self.item_id_to_index = {item_id: i for i, item_id in enumerate(item_ids)}
def _item_id_list_to_index_list(self, ids: Iterable[Any]) -> List[int]:
return [self.item_id_to_index[id] for id in ids if id in self.item_id_to_index]
def _user_profile_to_data_col(
self, profile: Union[List[Any], Dict[Any, float]]
) -> Tuple[List[float], List[int]]:
data: List[float]
cols: List[int]
# data: np.ndarray
if isinstance(profile, list):
cols = self._item_id_list_to_index_list(profile)
data = [1.0] * len(cols)
else:
data = []
cols = []
for id, score in profile.items():
if id in self.item_id_to_index:
data.append(score)
cols.append(self.item_id_to_index[id])
return data, cols
def _list_of_user_profile_to_matrix(
self, users_info: Sequence[Union[List[Any], Dict[Any, float]]]
) -> sps.csr_matrix:
data: List[float] = []
indptr: List[int] = [0]
col: List[int] = []
indptr_current = 0
for user_info in users_info:
data_u, col_u = self._user_profile_to_data_col(user_info)
data.extend(data_u)
col.extend(col_u)
indptr_current += len(col_u)
indptr.append(indptr_current)
result = sps.csr_matrix(
(data, col, indptr), shape=(len(users_info), len(self.item_ids))
)
return result
def get_recommendation_for_known_user_id(
self,
user_id: Any,
cutoff: int = 20,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
"""Retrieve recommendation result for a known user.
Args:
user_id:
The target user ID.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, recommend the items within this list.
If ``None``, all known item ids can be recommended (except for those in ``item_ids`` argument).
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, never recommend the items within the list. Defaults to None.
Raises:
RuntimeError: When user_id is not in ``self.user_ids``.
Returns:
A List of tuples consisting of ``(item_id, score)``.
"""
if user_id not in self.user_ids:
raise RuntimeError(f"User with user_id {user_id} not found.")
user_index: UserIndexArray = np.asarray(
[self.user_id_to_index[user_id]], dtype=np.int64
)
score = self.recommender.get_score_remove_seen(user_index)[0, :]
return self._score_to_recommended_items(
score,
cutoff=cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
)
def get_recommendation_for_new_user(
self,
user_profile: Union[List[Any], Dict[Any, float]],
cutoff: int = 20,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
"""Retrieve recommendation result for a previously unseen user using item ids with which he or she interacted.
Args:
user_profile:
User's profile given either as a list of item ids the user had a cotact or a item id-rating dict.
Previously unseen item ID will be ignored.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, recommend the items within this list.
If ``None``, all known item ids can be recommended (except for those in ``item_ids`` argument).
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, never recommend the items within the list. Defaults to None.
Returns:
A List of tuples consisting of ``(item_id, score)``.
"""
data, cols = self._user_profile_to_data_col(user_profile)
X_input = sps.csr_matrix(
(data, cols, [0, len(cols)]), shape=(1, len(self.item_ids))
)
score = self.recommender.get_score_cold_user_remove_seen(X_input)[0]
return self._score_to_recommended_items(
score,
cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
)
def get_recommendation_for_new_user_batch(
self,
user_profiles: Sequence[Union[List[Any], Dict[Any, float]]],
cutoff: int = 20,
allowed_item_ids: Optional[List[List[Any]]] = None,
forbidden_item_ids: Optional[List[List[Any]]] = None,
n_threads: Optional[int] = None,
) -> List[List[Tuple[Any, float]]]:
"""Retrieve recommendation result for a previously unseen users using item ids with which they have interacted.
Args:
user_profiles:
A list of user profiles.
Each profile should be either the item ids the user had a cotact, or item-rating dict.
Previously unseen item IDs will be ignored.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, defines "a list of list of recommendable item IDs"
and ``len(allowed_item_ids)`` must be equal to ``len(item_ids)``.
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, defines "a list of list of forbidden item IDs"
and ``len(allowed_item_ids)`` must be equal to ``len(item_ids)``
Defaults to ``None``.
Returns:
A list of list of tuples consisting of ``(item_id, score)``.
Each internal list corresponds to the recommender's recommendation output.
"""
X_input = self._list_of_user_profile_to_matrix(user_profiles)
score = self.recommender.get_score_cold_user_remove_seen(X_input)
return self._score_to_recommended_items_batch(
score,
cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
n_threads=get_n_threads(n_threads=n_threads),
)
def _score_to_recommended_items(
self,
score: DenseScoreArray,
cutoff: int,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
if allowed_item_ids is not None:
allowed_item_indices = np.asarray(
self._item_id_list_to_index_list(allowed_item_ids), dtype=np.int64
)
high_score_inds = allowed_item_indices[
score[allowed_item_indices].argsort()[::-1]
]
else:
high_score_inds = score.argsort()[::-1]
recommendations: List[Tuple[Any, float]] = []
for i in high_score_inds:
i_int = int(i)
score_this = score[i_int]
item_id = self.item_ids[i_int]
if np.isinf(score_this):
continue
if forbidden_item_ids is not None:
if item_id in forbidden_item_ids:
continue
recommendations.append((item_id, float(score_this)))
if len(recommendations) >= cutoff:
break
return recommendations
def _score_to_recommended_items_batch(
self,
score: DenseScoreArray,
cutoff: int,
allowed_item_ids: Optional[List[List[Any]]] = None,
forbidden_item_ids: Optional[List[List[Any]]] = None,
n_threads: int = 1,
) -> List[List[Tuple[Any, float]]]:
if forbidden_item_ids is not None:
assert len(forbidden_item_ids) == score.shape[0]
if allowed_item_ids is not None:
assert len(allowed_item_ids) == score.shape[0]
allowed_item_indices: List[List[int]] = []
if allowed_item_ids is not None:
allowed_item_indices = [
self._item_id_list_to_index_list(_) for _ in allowed_item_ids
]
if forbidden_item_ids is not None:
for u, forbidden_ids_per_user in enumerate(forbidden_item_ids):
score[
u, self._item_id_list_to_index_list(forbidden_ids_per_user)
] = -np.inf
raw_result = retrieve_recommend_from_score(
score,
allowed_item_indices,
cutoff,
n_threads=n_threads,
)
return [
[
(self.item_ids[item_index], score)
for item_index, score in user_wise_raw_result
]
for user_wise_raw_result in raw_result
]
|
[
"irspack.utils.threading.get_n_threads",
"numpy.asarray",
"irspack.utils._util_cpp.retrieve_recommend_from_score",
"numpy.isinf"
] |
[((4405, 4465), 'numpy.asarray', 'np.asarray', (['[self.user_id_to_index[user_id]]'], {'dtype': 'np.int64'}), '([self.user_id_to_index[user_id]], dtype=np.int64)\n', (4415, 4465), True, 'import numpy as np\n'), ((10426, 10517), 'irspack.utils._util_cpp.retrieve_recommend_from_score', 'retrieve_recommend_from_score', (['score', 'allowed_item_indices', 'cutoff'], {'n_threads': 'n_threads'}), '(score, allowed_item_indices, cutoff,\n n_threads=n_threads)\n', (10455, 10517), False, 'from irspack.utils._util_cpp import retrieve_recommend_from_score\n'), ((9089, 9109), 'numpy.isinf', 'np.isinf', (['score_this'], {}), '(score_this)\n', (9097, 9109), True, 'import numpy as np\n'), ((8206, 8240), 'irspack.utils.threading.get_n_threads', 'get_n_threads', ([], {'n_threads': 'n_threads'}), '(n_threads=n_threads)\n', (8219, 8240), False, 'from irspack.utils.threading import get_n_threads\n')]
|
"""
Example to read a FITS file.
Created on Jul 9, 2019
Be aware that hdus.close () needs to be called to limit the number of open files at a given time.
@author: skwok
"""
import astropy.io.fits as pf
from astropy.utils.exceptions import AstropyWarning
import warnings
import numpy as np
from keckdrpframework.models.arguments import Arguments
from keckdrpframework.primitives.base_primitive import BasePrimitive
def open_nowarning(filename):
with warnings.catch_warnings():
warnings.simplefilter("ignore", AstropyWarning)
return pf.open(filename, memmap=False)
class SimpleFitsReader_LRIS(BasePrimitive):
def __init__(self, action, context):
"""
Initializes the super class.
"""
BasePrimitive.__init__(self, action, context)
def _perform(self):
"""
Expects action.args.name as fits file name
Returns HDUs or (later) data model
"""
name = self.action.args.name
self.logger.debug(f"Reading {name}")
out_args = Arguments()
out_args.name = name
out_args.img = self.readData(name)
return out_args
def readData(self, name, cutout=True):
"""
Reads FITS file, mostly from KECK instruments.
If there are multiple HDUs, the image is assembled according to
the kewyrods DETSEC and DATASEC.
Otherwise hdus[0].data is returned.
If cutout is TRUE, then only the none-zero portion is returned.
"""
with open_nowarning(name) as hdus:
if len(hdus) == 1:
return hdus[0].data
else:
imgBuf = hdus[1].data
for hdu in hdus[2:]:
imgBuf = np.concatenate((imgBuf, hdu.data), 1)
return imgBuf
|
[
"warnings.simplefilter",
"keckdrpframework.primitives.base_primitive.BasePrimitive.__init__",
"keckdrpframework.models.arguments.Arguments",
"warnings.catch_warnings",
"astropy.io.fits.open",
"numpy.concatenate"
] |
[((460, 485), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (483, 485), False, 'import warnings\n'), ((495, 542), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'AstropyWarning'], {}), "('ignore', AstropyWarning)\n", (516, 542), False, 'import warnings\n'), ((558, 589), 'astropy.io.fits.open', 'pf.open', (['filename'], {'memmap': '(False)'}), '(filename, memmap=False)\n', (565, 589), True, 'import astropy.io.fits as pf\n'), ((746, 791), 'keckdrpframework.primitives.base_primitive.BasePrimitive.__init__', 'BasePrimitive.__init__', (['self', 'action', 'context'], {}), '(self, action, context)\n', (768, 791), False, 'from keckdrpframework.primitives.base_primitive import BasePrimitive\n'), ((1036, 1047), 'keckdrpframework.models.arguments.Arguments', 'Arguments', ([], {}), '()\n', (1045, 1047), False, 'from keckdrpframework.models.arguments import Arguments\n'), ((1741, 1778), 'numpy.concatenate', 'np.concatenate', (['(imgBuf, hdu.data)', '(1)'], {}), '((imgBuf, hdu.data), 1)\n', (1755, 1778), True, 'import numpy as np\n')]
|
""" timg_denoise.py
"""
import numpy as np
import torch
import torch.nn as nn
class Timg_DenoiseNet_LinT_1Layer(nn.Module):
def __init__(self):
super(Timg_DenoiseNet_LinT_1Layer, self).__init__()
self.C = 64
self.K = 13
self.centre = 3/255.0
self.scale = 2.0
self.conv1 = nn.Conv2d(1, self.C, self.K, padding=self.K//2)
self.norm1 = nn.BatchNorm2d(self.C)
self.relu1 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
# just need time to be above the minimum
self.fix_range_t = nn.Threshold(1/255.0, 1/255.0)
# nn.init.dirac_(self.conv1.weight)
def forward(self, t):
t = self.scale * (t - self.centre)
t = self.conv1(t)
t = self.relu1(t)
t = self.comb(t)
t = self.fix_range_t(t)
return t
class Timg_DenoiseNet_LinT(nn.Module):
def __init__(self, Tmin=1e-3, Tmax=1e3):
super(Timg_DenoiseNet_LinT, self).__init__()
self.C = 64
self.Tmin = Tmin
self.Tmax = Tmax
self.Tmid = 1
self.Tscale = self.Tmid - self.Tmin
self.conv1 = nn.Conv2d(1, self.C, 5, padding=2)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn2 = nn.BatchNorm2d(self.C)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn3 = nn.BatchNorm2d(self.C)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn4 = nn.BatchNorm2d(self.C)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn5 = nn.BatchNorm2d(self.C)
self.relu5 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
self.fix_range1 = nn.Hardtanh(min_val=self.Tmin, max_val=self.Tmax)
self.fix_range2 = nn.Hardtanh(min_val=0, max_val=1)
def forward(self, t):
t = (1.0/self.Tscale) * (t - self.Tmid)
t = self.conv1(t)
t = self.relu1(t)
t = self.conv2(t)
t = self.bn2(t)
t = self.relu2(t)
t = self.conv3(t)
t = self.bn3(t)
t = self.relu3(t)
t = self.conv4(t)
t = self.bn4(t)
t = self.relu4(t)
t = self.conv5(t)
t = self.bn5(t)
t = self.relu5(t)
t = self.comb(t)
t = self.Tmid + (self.Tscale * t)
t = self.fix_range1(t)
y = torch.pow(t, -1)
y = self.fix_range2(y)
return y
class Timg_DenoiseNet(nn.Module):
def __init__(self, Tmin=1e-3, Tmax=1e3):
super(Timg_DenoiseNet, self).__init__()
self.C = 64
self.Tmin = np.log(Tmin)
self.Tmax = np.log(Tmax)
# self.conv1 = nn.Conv2d(1, self.C, 3, padding=1)
self.conv1 = nn.Conv2d(1, self.C, 5, padding=2)
# self.conv1 = nn.Conv2d(1, self.C, 7, padding=3)
# self.conv1 = nn.Conv2d(1, self.C, 9, padding=4)
# self.conv1 = nn.Conv2d(1, self.C, 11, padding=5)
# self.conv1 = nn.Conv2d(1, self.C, 13, padding=6)
# self.conv1 = nn.Conv2d(1, self.C, 15, padding=7)
# self.conv1 = nn.Conv2d(1, self.C, 17, padding=8)
# self.conv1 = nn.Conv2d(1, self.C, 19, padding=9)
# self.conv1 = nn.Conv2d(1, self.C, 21, padding=10)
self.relu1 = nn.ReLU()
# self.conv2 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv2 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn2 = nn.BatchNorm2d(self.C)
self.relu2 = nn.ReLU()
# self.conv3 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv3 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn3 = nn.BatchNorm2d(self.C)
self.relu3 = nn.ReLU()
# self.conv4 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv4 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn4 = nn.BatchNorm2d(self.C)
self.relu4 = nn.ReLU()
# self.conv5 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv5 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn5 = nn.BatchNorm2d(self.C)
self.relu5 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
self.fix_range1 = nn.Hardtanh(min_val=self.Tmin, max_val=self.Tmax)
self.fix_range2 = nn.Hardtanh(min_val=0, max_val=1)
def forward(self, t):
logt = torch.log(t)
logt = self.conv1(logt)
logt = self.relu1(logt)
logt = self.conv2(logt)
logt = self.bn2(logt)
logt = self.relu2(logt)
logt = self.conv3(logt)
logt = self.bn3(logt)
logt = self.relu3(logt)
logt = self.conv4(logt)
logt = self.bn4(logt)
logt = self.relu4(logt)
logt = self.conv5(logt)
logt = self.bn5(logt)
logt = self.relu5(logt)
logt = self.comb(logt)
logt = self.fix_range1(logt)
t = torch.exp(logt)
y = torch.pow(t, -1)
y = self.fix_range2(y)
return y
|
[
"torch.nn.ReLU",
"numpy.log",
"torch.nn.Conv2d",
"torch.nn.Threshold",
"torch.exp",
"torch.nn.BatchNorm2d",
"torch.pow",
"torch.log",
"torch.nn.Hardtanh"
] |
[((327, 376), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.C', 'self.K'], {'padding': '(self.K // 2)'}), '(1, self.C, self.K, padding=self.K // 2)\n', (336, 376), True, 'import torch.nn as nn\n'), ((396, 418), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (410, 418), True, 'import torch.nn as nn\n'), ((440, 449), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (447, 449), True, 'import torch.nn as nn\n'), ((471, 494), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', '(1)', '(1)'], {}), '(self.C, 1, 1)\n', (480, 494), True, 'import torch.nn as nn\n'), ((571, 605), 'torch.nn.Threshold', 'nn.Threshold', (['(1 / 255.0)', '(1 / 255.0)'], {}), '(1 / 255.0, 1 / 255.0)\n', (583, 605), True, 'import torch.nn as nn\n'), ((1142, 1176), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.C', '(5)'], {'padding': '(2)'}), '(1, self.C, 5, padding=2)\n', (1151, 1176), True, 'import torch.nn as nn\n'), ((1198, 1207), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1205, 1207), True, 'import torch.nn as nn\n'), ((1230, 1269), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1239, 1269), True, 'import torch.nn as nn\n'), ((1289, 1311), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1303, 1311), True, 'import torch.nn as nn\n'), ((1333, 1342), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1340, 1342), True, 'import torch.nn as nn\n'), ((1365, 1404), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1374, 1404), True, 'import torch.nn as nn\n'), ((1424, 1446), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1438, 1446), True, 'import torch.nn as nn\n'), ((1468, 1477), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1475, 1477), True, 'import torch.nn as nn\n'), ((1500, 1539), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1509, 1539), True, 'import torch.nn as nn\n'), ((1559, 1581), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1573, 1581), True, 'import torch.nn as nn\n'), ((1603, 1612), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1610, 1612), True, 'import torch.nn as nn\n'), ((1635, 1674), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (1644, 1674), True, 'import torch.nn as nn\n'), ((1694, 1716), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (1708, 1716), True, 'import torch.nn as nn\n'), ((1738, 1747), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1745, 1747), True, 'import torch.nn as nn\n'), ((1769, 1792), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', '(1)', '(1)'], {}), '(self.C, 1, 1)\n', (1778, 1792), True, 'import torch.nn as nn\n'), ((1820, 1869), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': 'self.Tmin', 'max_val': 'self.Tmax'}), '(min_val=self.Tmin, max_val=self.Tmax)\n', (1831, 1869), True, 'import torch.nn as nn\n'), ((1896, 1929), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(0)', 'max_val': '(1)'}), '(min_val=0, max_val=1)\n', (1907, 1929), True, 'import torch.nn as nn\n'), ((2478, 2494), 'torch.pow', 'torch.pow', (['t', '(-1)'], {}), '(t, -1)\n', (2487, 2494), False, 'import torch\n'), ((2712, 2724), 'numpy.log', 'np.log', (['Tmin'], {}), '(Tmin)\n', (2718, 2724), True, 'import numpy as np\n'), ((2745, 2757), 'numpy.log', 'np.log', (['Tmax'], {}), '(Tmax)\n', (2751, 2757), True, 'import numpy as np\n'), ((2838, 2872), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.C', '(5)'], {'padding': '(2)'}), '(1, self.C, 5, padding=2)\n', (2847, 2872), True, 'import torch.nn as nn\n'), ((3365, 3374), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3372, 3374), True, 'import torch.nn as nn\n'), ((3460, 3499), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (3469, 3499), True, 'import torch.nn as nn\n'), ((3519, 3541), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (3533, 3541), True, 'import torch.nn as nn\n'), ((3563, 3572), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3570, 3572), True, 'import torch.nn as nn\n'), ((3658, 3697), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (3667, 3697), True, 'import torch.nn as nn\n'), ((3717, 3739), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (3731, 3739), True, 'import torch.nn as nn\n'), ((3761, 3770), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3768, 3770), True, 'import torch.nn as nn\n'), ((3856, 3895), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (3865, 3895), True, 'import torch.nn as nn\n'), ((3915, 3937), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (3929, 3937), True, 'import torch.nn as nn\n'), ((3959, 3968), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3966, 3968), True, 'import torch.nn as nn\n'), ((4054, 4093), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', 'self.C', '(5)'], {'padding': '(2)'}), '(self.C, self.C, 5, padding=2)\n', (4063, 4093), True, 'import torch.nn as nn\n'), ((4113, 4135), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.C'], {}), '(self.C)\n', (4127, 4135), True, 'import torch.nn as nn\n'), ((4157, 4166), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4164, 4166), True, 'import torch.nn as nn\n'), ((4188, 4211), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.C', '(1)', '(1)'], {}), '(self.C, 1, 1)\n', (4197, 4211), True, 'import torch.nn as nn\n'), ((4239, 4288), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': 'self.Tmin', 'max_val': 'self.Tmax'}), '(min_val=self.Tmin, max_val=self.Tmax)\n', (4250, 4288), True, 'import torch.nn as nn\n'), ((4315, 4348), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(0)', 'max_val': '(1)'}), '(min_val=0, max_val=1)\n', (4326, 4348), True, 'import torch.nn as nn\n'), ((4391, 4403), 'torch.log', 'torch.log', (['t'], {}), '(t)\n', (4400, 4403), False, 'import torch\n'), ((4931, 4946), 'torch.exp', 'torch.exp', (['logt'], {}), '(logt)\n', (4940, 4946), False, 'import torch\n'), ((4959, 4975), 'torch.pow', 'torch.pow', (['t', '(-1)'], {}), '(t, -1)\n', (4968, 4975), False, 'import torch\n')]
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import jams.const as const
def tcherkez(Rstar, Phi=0.3, T=0.056,
a2=1.0012, a3=1.0058, a4=1.0161,
t1=0.9924, t2=1.0008, g=20e-3,
RG=False, Rchl=False, Rcyt=False, fullmodel=True):
"""
Calculates the Tcherkez model of 13C-discrimiantion in the Calvin cycle.
Definition
----------
def tcherkez(Rstar, Phi=0.3, T=0.056, a2=1.0012, a3=1.0058, a4=1.0161,
t1=0.9924, t2=1.0008, g=20e-3
RG=False, Rchl=False, Rcyt=False, fullmodel=True):
Input
-----
Rstar Isotope ratio of assimilated carbon, e.g. of Farquhar et al. (1982) model
Optional Input
--------------
Phi Vo/Vc: ratio of carboxylateion to oxygentation of Rubisco (default: 0.3)
T Relative flux of starch synthesis [mol(C6 of starch)/mol(CO2 assimilated)] (default: 0.056)
a2 Inverse fractionation associated with aldolase
for the C-2 position of FBP (Fructose-1,6-bisphosphate) (default: 1.0012)
a3 Same for C-3 of FBP (default: 1.0058)
a4 Same for C-4 of FBP (default: 1.0161)
t1 Inverse fractionation associated with trankelotase
for C-1 in E4P (erythrose-4-phosphate) and R5P (ribose-5-phosphate) (default: 0.9924)
t2 Same for C-2 of X5P (xylulose-5-phosphate) (default: 1.0008)
g Isotope discrimination of photorespiratory decarboxylation of Gly (Glycine) (default: 20e-3)
RG If True, output isotope ratio of G3P (3-phosphoglyceraldehyde
or glyceraldehyde-3-phosphate) (default: False)
Rchl If True, output isotope ratio of chloroplastic hexoses and transitory starch (default: False)
Rcyt If True, output isotope ratio of cytoplasmic hexoses (default: False)
fullmodel If True, output RG, Rchl and Rcyt (default: True)
Output
------
RG, Rchl, Rcyt if fullmodel=True
Restrictions
------------
If at least one of RG, Rchl or Rcyt is given then fullmode=False.
References
----------
<NAME>, <NAME>, <NAME> & <NAME>, Theoretical considerations about carbon isotope
distribution in glucose of C3 plants, Functional Plant Biology 31, 857-877, 2004
<NAME>, <NAME>, <NAME>, <NAME> & <NAME>, Experimental evidence for diel variations
of the carbon isotope composition in leaf, stem and phloem sap organic matter in Ricinus communis,
Plant, Cell and Environment 31, 941-953, 2008
Examples
--------
>>> a = -4.4e-3
>>> b = -27e-3
>>> ca = 353e-6
>>> ci = 0.7*ca
>>> Delta = a+(b-a)*ci/ca
>>> delta_a1 = -8e-3
>>> Ra1 = (delta_a1+1.)*const.R13VPDB
>>> Rstar1 = (1.-Delta)*Ra1
>>> from autostring import astr
>>> print(astr((np.array(tcherkez(Rstar1, Phi=0.3, T=0.056))/const.R13VPDB-1.)*1000.,3,pp=True))
['12.764' '17.125' '12.978']
>>> delta_a2 = -7.8e-3
>>> Ra2 = (delta_a2+1.)*const.R13VPDB
>>> Rstar2 = (1.-Delta)*Ra2
>>> R1 = (np.array(tcherkez([Rstar1, Rstar2], Rcyt=True))/const.R13VPDB-1.)*1000.
>>> print(astr(R1,3,pp=True))
[['12.978' '13.182']]
>>> R1, R2 = tcherkez([Rstar1, Rstar2], Rchl=True, Rcyt=True)
>>> print(astr((R1/const.R13VPDB-1.)*1000.,3,pp=True))
['17.125' '17.330']
>>> print(astr((R2/const.R13VPDB-1.)*1000.,3,pp=True))
['12.978' '13.182']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Jan 2012
Modified, MC, Feb 2013 - ported to Python 3
"""
#
if (RG | Rchl | Rcyt):
fullmodel = False
if fullmodel:
RG = True
Rchl = True
Rcyt = True
#
a2tilde = (1.+0.5*Phi-T) / ((2.*a2+1.)/3.+Phi*(2.*a2-0.5)/3.+T*(a2-2.))
a3tilde = (1.+0.5*Phi-T) / ((2.*a3+1.)/3.+Phi*(2.*a3-0.5)/3.+T*(a3-2.))
t1tilde = (1.+3.*T)/(t1+3.*T)*t1
t2tilde = (1.+3.*T)/(t2+3.*T)*t2
eps = a3*a3tilde
epsdash = (t1tilde+1.5*Phi)*a3*a3tilde/(3.*(1.+0.5*Phi-(1.+t2tilde)*a2*a2tilde/3.))
iRG = np.array(Rstar) / (1.+Phi*(0.5-(1.+g)/(2.+g)*(eps+2.*a2*a2tilde*epsdash)/3.)+T*(a4-1.))
iRchl = 1./6.*(epsdash*(1.+(a2*a2tilde*t2tilde)/t2)+eps*(2.+t1tilde/t1)+a4) * iRG
iRcyt = 1./6.*(2.*eps+3.*(a2+1.)/(a2+2.)*epsdash*a2tilde+3.*a3tilde/(2.+a3)*(a3+2.*a4/(1.+a4))) * iRG
out = []
if RG:
out += [iRG]
if Rchl:
out += [iRchl]
if Rcyt:
out += [iRcyt]
return out
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
[
"numpy.array",
"doctest.testmod"
] |
[((6383, 6440), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': 'doctest.NORMALIZE_WHITESPACE'}), '(optionflags=doctest.NORMALIZE_WHITESPACE)\n', (6398, 6440), False, 'import doctest\n'), ((5918, 5933), 'numpy.array', 'np.array', (['Rstar'], {}), '(Rstar)\n', (5926, 5933), True, 'import numpy as np\n')]
|
import random
import numpy as np
import cv2
import torch
import torch.utils.data as data
import logging
from . import util
class LQGTDataset3D(data.Dataset):
'''
Read LQ (Low Quality, here is LR) and GT vti file pairs.
If only GT image is provided, generate LQ vti on-the-fly.
The pair is ensured by 'sorted' function, so please check the name convention.
'''
logger = logging.getLogger('base')
def __init__(self, opt):
super(LQGTDataset3D, self).__init__()
self.opt = opt
self.paths_GT = None, None
if opt['set_type'] == 'vtk':
self.paths_GT = util.get_vtk_paths(opt['dataroot_GT'])
# self.paths_LQ = util.get_vtk_paths(opt['dataroot_LQ'])
elif opt['set_type'] == 'tecplot':
self.paths_GT = util.get_tecplot_paths(opt['dataroot_GT'])
# self.paths_LQ = util.get_tecplot_paths(opt['dataroot_LQ'])
else:
ex = Exception("Type '%s' is not supported" % opt['type'])
raise ex
assert self.paths_GT, 'Error: GT path is empty.'
# if self.paths_LQ and self.paths_GT:
# assert len(self.paths_LQ) == len(
# self.paths_GT
# ), 'GT and LQ datasets have different number of images - {}, {}.'.format(
# len(self.paths_LQ), len(self.paths_GT))
self.random_scale_list = [1]
def __getitem__(self, index):
# cv2.setNumThreads(0)
scale = self.opt['scale']
GT_size = self.opt['GT_size']
attr_id = self.opt.get('attr_id', 0)
# get GT image
GT_path = self.paths_GT[index]
vti_GT_generator = util.getTensorGenerator(GT_path, self.opt['data_type'])
vti_GT, component_GT = vti_GT_generator.get_array_by_id(attr_id)
print('origin GT shape: {}'.format(vti_GT.shape))
if self.opt['phase'] != 'train':
vti_GT = util.modcrop_3d(vti_GT, scale)
# if self.paths_LQ:
# LQ_path = self.paths_LQ[index]
# vti_LQ_generator = util.getTensorGenerator(LQ_path)
# vti_LQ_generator.set_type(self.opt['type'])
# vti_LQ, component_LQ = vti_LQ_generator.get_array_by_id(attr_id)
# else:
# if self.opt['phase'] == 'train':
# # random_scale = random.choice(self.random_scale_list)
# # Z_s, Y_s, X_s = vti_GT.shape
#
# # def _mod(n, random_scale, scale, thres):
# # rlt = int(n * random_scale)
# # rlt = (rlt // scale) * scale
# # return thres if rlt < thres else rlt
#
# # Z_s = _mod(Z_s, random_scale, scale, GT_size)
# # Y_s = _mod(Y_s, random_scale, scale, GT_size)
# # X_s = _mod(X_s, random_scale, scale, GT_size)
# vti_GT = util.resize_3d(arr=np.copy(vti_GT), newsize=GT_size)
#
# # using matlab imresize3
# vti_LQ = util.imresize3_np(vti_GT, 1 / scale, True)
# if vti_LQ.ndim != 3:
# ex = Exception("Error: dims not right")
# raise ex
if self.opt['phase'] == 'train':
Z, Y, X = vti_GT.shape
if Z < GT_size or Y < GT_size or X < GT_size:
vti_GT = util.resize_3d(np.copy(vti_GT), newsize=GT_size)
elif Z > GT_size or Y > GT_size or X > GT_size:
vti_GT = util.modcrop_3d(vti_GT, scale)
# using matlab imresize3
# vti_LQ = util.imresize3_np(vti_GT, 1 / scale, True)
# if vti_LQ.ndim != 2:
# ex = Exception("Error: dims not right")
# raise ex
# Z, Y, X = vti_LQ.shape
# LQ_size = GT_size // scale
#
# # randomly crop
# rnd_Z = random.randint(0, max(0, Z - LQ_size))
# rnd_Y = random.randint(0, max(0, Y - LQ_size))
# rnd_X = random.randint(0, max(0, X - LQ_size))
# vti_LQ = vti_LQ[rnd_Z: rnd_Z + LQ_size, rnd_Y: rnd_Y + LQ_size, rnd_X: rnd_X + LQ_size]
# rnd_Z_GT, rnd_Y_GT, rnd_X_GT = int(rnd_Z * scale), int(rnd_Y * scale), int(rnd_X * scale)
# vti_GT = vti_GT[rnd_Z_GT: rnd_Z_GT + GT_size, rnd_Y_GT: rnd_Y_GT + GT_size, rnd_X_GT: rnd_X_GT + GT_size]
# ZYX to XYZ
vti_GT = torch.from_numpy(np.ascontiguousarray(vti_GT)).float().unsqueeze(0)
print("vti_GT size: {}".format(vti_GT.size()))
# vti_LQ = torch.from_numpy(np.ascontiguousarray(vti_LQ)).float().unsqueeze(0)
# if LQ_path is None:
# LQ_path = GT_path
return {'GT': vti_GT, 'GT_path': GT_path}
def __len__(self):
return len(self.paths_GT)
|
[
"numpy.copy",
"numpy.ascontiguousarray",
"logging.getLogger"
] |
[((396, 421), 'logging.getLogger', 'logging.getLogger', (['"""base"""'], {}), "('base')\n", (413, 421), False, 'import logging\n'), ((3340, 3355), 'numpy.copy', 'np.copy', (['vti_GT'], {}), '(vti_GT)\n', (3347, 3355), True, 'import numpy as np\n'), ((4421, 4449), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['vti_GT'], {}), '(vti_GT)\n', (4441, 4449), True, 'import numpy as np\n')]
|
import multiprocessing
import threading
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
import sys
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from shared_adam import SharedAdam
import math, os
import cv2
import torchvision.transforms as transforms
import imageio
os.environ["OMP_NUM_THREADS"] = "1"
device=torch.device("cuda")
np.set_printoptions(precision=4,suppress=True)
simulation_dir = '../simulation'
sys.path.insert(0, simulation_dir)
from Wrench_Manipulation_Env import RobotEnv
ExName = "Wrench_Manipulation"
sys.path.insert(0,'../external/bullet3.git/build_cmake/examples/pybullet')
import pybullet
def v_wrap(np_array,dtype=np.float32):
if np_array.dtype != dtype:
np_array = np_array.astype(dtype)
return torch.from_numpy(np_array).to(device)
def push_and_pull(opt, lnet, gnet, done, s_, bs, ba, br, bdone, gamma):
if done:
v_s_ = 0.
else:
v_s_ = lnet.forward(v_wrap(s_[None,:]))[-1].data.cpu().numpy()[0,0]
buffer_v_target = []
for r, termination in zip(br[::-1], bdone[::-1]):
if termination:
v_s_ = 0
v_s_ = r + gamma * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
loss = lnet.loss_func(
v_wrap(np.vstack(bs)),
v_wrap(np.vstack(ba)),
v_wrap(np.array(buffer_v_target)[:, None]))
opt.zero_grad()
loss.backward()
nn.utils.clip_grad_norm(lnet.parameters(),1.0)
for lp, gp in zip(lnet.parameters(), gnet.parameters()):
gp._grad = lp.grad
opt.step()
# pull global parameters
lnet.load_state_dict(gnet.state_dict())
MAX_EP = 15000
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0., std=0.01)
nn.init.constant_(layer.bias, 0.)
class ACNet(nn.Module):
def __init__(self):
super(ACNet, self).__init__()
self.distribution = torch.distributions.Normal
self.block1 = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(32),
)
# 60, 80
self.block2 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(32),
)
# 30, 40
self.block3 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(64),
)
# 15, 20
self.block4 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=64,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(64),
)
# 8, 10
self.block5 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=128,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(128),
)
# 4, 5
self.block6 = nn.Sequential(
nn.Conv2d(in_channels=128,out_channels=128,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(128),
)
# 2, 3
self.fc_a = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.fc_s = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.fc_v = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.mu_layer = nn.Linear(24,6)
self.sigma_layer = nn.Linear(24,6)
self.v_layer = nn.Linear(24,1)
set_init([self.mu_layer, self.sigma_layer, self.v_layer])
def forward(self, im):
im = im.view(-1, 120, 160, 3)
im = im.permute(0,3,1,2)
im = self.block1(im)
im = self.block2(im)
im = self.block3(im)
im = self.block4(im)
im = self.block5(im)
im = self.block6(im)
im = im.reshape(-1, 2 * 3 * 128)
x_a = self.fc_a(im)
mu = self.mu_layer(x_a)
mu = F.tanh(mu)
x_s = self.fc_s(im)
sigma = self.sigma_layer(x_s)
sigma = F.softplus(sigma) * 0.06 + 0.005
x_v= self.fc_v(im)
values = self.v_layer(x_v)
return mu, sigma, values
def choose_action(self, s):
self.training = False
mu, sigma, _ = self.forward(s)
m = self.distribution(mu.view(-1,).data, sigma.view(-1,).data)
return m.sample().cpu().numpy(), mu.cpu().detach().numpy(), sigma.cpu().detach().numpy()
def loss_func(self, s, a, v_t):
self.train()
mu, sigma, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
m = self.distribution(mu, sigma)
log_prob = m.log_prob(a)
entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(m.scale)
exp_v = log_prob * td.detach() + ENTROPY_BETA * entropy
a_loss = -exp_v
total_loss = (a_loss + c_loss).mean()
return total_loss
class Worker(mp.Process):
def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, wid, SAVE_TOP_DIR):
super(Worker, self).__init__()
print("wid %d" % wid)
self.wid = wid
self.step = 0
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, opt
self.random_seed = 42 + self.wid + int(np.log(self.wid * 100 + 1))
print("random_seed",self.random_seed,"self.wid",self.wid)
np.random.seed(self.random_seed)
self.lnet = ACNet().to(device)
self.init_step = 0
self.SAVE_TOP_DIR = SAVE_TOP_DIR
def run(self):
mean=np.array([0.485, 0.456, 0.406])
std=np.array([0.229, 0.224, 0.225])
mean = np.reshape(mean,(1,1,3))
std = np.reshape(std,(1,1,3))
self.start_pos = [-0.1,-0.4,0.5]
self.dt = 1./30.0
if self.wid == 0:
self.p_id = pybullet.connect(pybullet.GUI)
else:
self.p_id = pybullet.connect(pybullet.DIRECT)
action_dir = os.path.join(self.SAVE_TOP_DIR,"action.npy")
fixture_action = np.zeros((3,))
self.env = RobotEnv(worker_id=self.wid,p_id=pybullet,dt=self.dt,maxSteps=20,fixture_offset=fixture_action)
total_step = 1 + self.init_step
suc_check = 0
reward_check = 0
episode_check = 0
sigma_check1 = 0
sigma_check2 = 0
total_episode = 0
buffer_s, buffer_a, buffer_r, buffer_done = [], [], [], []
while total_step < MAX_EP:
observation = self.env.reset()
observation = observation/255.0
observation = (observation - mean)/std
observation = np.reshape(observation,(-1,))
while True:
action, mu_r, sigma_r = self.lnet.choose_action(v_wrap(observation[None,:]))
action[:3] = action[:3].clip(-0.03,0.03)
action[3:] = action[3:].clip(-0.05,0.05)
#
# if action[2] > 0.005:
#w action[2] = 0.005
observation_next, reward, done, suc = self.env.step(action)
observation_next = observation_next/255.0
observation_next = (observation_next - mean)/std
recordGif = False
if recordGif and total_step > 10:
imageio.mimsave('pokingSthSlightly.gif',self.env.obs_list)
return
observation_next = np.reshape(observation_next,(-1,))
buffer_s.append(observation)
buffer_r.append(reward)
buffer_a.append(action)
buffer_done.append(done)
if total_step % (UPDATE_GLOBAL_ITER + self.wid) == 0 or done:
push_and_pull(self.opt, self.lnet, self.gnet, done, observation_next, buffer_s, buffer_a, buffer_r, buffer_done, GAMMA)
buffer_s, buffer_a, buffer_r, buffer_done = [], [], [], []
if done:
suc_check += suc
episode_check += 1
total_episode += 1
observation = observation_next
total_step += 1
reward_check += reward
if total_step % 100 == 0:
current_performance = float(suc_check)/episode_check
avg_sigma1 = sigma_check1 / 100.0
avg_sigma2 = sigma_check2 / 100.0
if self.wid == 0:
print(self.SAVE_TOP_DIR,"total step %d, avg suc %f, avg reward %f" % (total_step, suc_check / 100.0, reward_check / 100.0))
save_path = os.path.join(self.SAVE_TOP_DIR,str(total_step)+'model.pth.tar')
if self.wid == 0 and int(total_step) % 1000 == 0:
print("saving to",save_path)
torch.save(self.gnet.state_dict(), save_path)
suc_check = 0
episode_check = 0
sigma_check1 = 0.0
sigma_check2 = 0.0
if done:
break
reward_dir = os.path.join(self.SAVE_TOP_DIR,"reward.txt")
np.savetxt(reward_dir,np.array([reward_check/100.0]),fmt='%f')
print("finising the learning!")
torch.cuda.empty_cache()
print("empyting the cache!")
sys.exit()
os._exit(1)
if __name__ == "__main__":
ExName = 'optimal'#sys.argv[1]
#print(ExName)
SAVE_TOP_DIR = os.path.join('./wrench/',ExName)
if not os.path.exists(SAVE_TOP_DIR):
os.makedirs(SAVE_TOP_DIR)
mp.set_start_method('spawn')
gnet = ACNet() # global network
## loading
Load_model_id = '2000'
Load_path = os.path.join(SAVE_TOP_DIR,Load_model_id + 'model.pth.tar')
#checkpoint = torch.load(Load_path)
#gnet.load_state_dict(checkpoint)
gnet.to(device)
gnet.share_memory()
opt = SharedAdam(gnet.parameters(),lr=0.0001)
global_ep, global_ep_r, res_queue = mp.Value('i',0), mp.Value('d',0.), mp.Queue()
workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i, SAVE_TOP_DIR) for i in range(1)]
[w.start() for w in workers]
res = []
for worker in workers:
worker.init_step = 0
[w.join() for w in workers]
|
[
"numpy.random.seed",
"torch.nn.init.constant_",
"torch.device",
"pybullet.connect",
"torch.nn.functional.tanh",
"os.path.join",
"imageio.mimsave",
"numpy.set_printoptions",
"os.path.exists",
"Wrench_Manipulation_Env.RobotEnv",
"numpy.reshape",
"torch.nn.Linear",
"math.log",
"torch.log",
"torch.nn.Conv2d",
"torch.multiprocessing.set_start_method",
"torch.nn.BatchNorm2d",
"torch.multiprocessing.Value",
"sys.exit",
"torch.from_numpy",
"numpy.vstack",
"torch.nn.ReLU",
"os.makedirs",
"numpy.log",
"numpy.zeros",
"sys.path.insert",
"torch.nn.init.normal_",
"numpy.array",
"os._exit",
"torch.cuda.empty_cache",
"torch.multiprocessing.Queue",
"torch.nn.functional.softplus"
] |
[((410, 430), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (422, 430), False, 'import torch\n'), ((432, 479), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)'}), '(precision=4, suppress=True)\n', (451, 479), True, 'import numpy as np\n'), ((512, 546), 'sys.path.insert', 'sys.path.insert', (['(0)', 'simulation_dir'], {}), '(0, simulation_dir)\n', (527, 546), False, 'import sys\n'), ((624, 699), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../external/bullet3.git/build_cmake/examples/pybullet"""'], {}), "(0, '../external/bullet3.git/build_cmake/examples/pybullet')\n", (639, 699), False, 'import sys\n'), ((9137, 9170), 'os.path.join', 'os.path.join', (['"""./wrench/"""', 'ExName'], {}), "('./wrench/', ExName)\n", (9149, 9170), False, 'import math, os\n'), ((9242, 9270), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (9261, 9270), True, 'import torch.multiprocessing as mp\n'), ((9362, 9421), 'os.path.join', 'os.path.join', (['SAVE_TOP_DIR', "(Load_model_id + 'model.pth.tar')"], {}), "(SAVE_TOP_DIR, Load_model_id + 'model.pth.tar')\n", (9374, 9421), False, 'import math, os\n'), ((1759, 1808), 'torch.nn.init.normal_', 'nn.init.normal_', (['layer.weight'], {'mean': '(0.0)', 'std': '(0.01)'}), '(layer.weight, mean=0.0, std=0.01)\n', (1774, 1808), True, 'import torch.nn as nn\n'), ((1816, 1850), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias', '(0.0)'], {}), '(layer.bias, 0.0)\n', (1833, 1850), True, 'import torch.nn as nn\n'), ((3792, 3808), 'torch.nn.Linear', 'nn.Linear', (['(24)', '(6)'], {}), '(24, 6)\n', (3801, 3808), True, 'import torch.nn as nn\n'), ((3831, 3847), 'torch.nn.Linear', 'nn.Linear', (['(24)', '(6)'], {}), '(24, 6)\n', (3840, 3847), True, 'import torch.nn as nn\n'), ((3866, 3882), 'torch.nn.Linear', 'nn.Linear', (['(24)', '(1)'], {}), '(24, 1)\n', (3875, 3882), True, 'import torch.nn as nn\n'), ((4285, 4295), 'torch.nn.functional.tanh', 'F.tanh', (['mu'], {}), '(mu)\n', (4291, 4295), True, 'import torch.nn.functional as F\n'), ((5618, 5650), 'numpy.random.seed', 'np.random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (5632, 5650), True, 'import numpy as np\n'), ((5776, 5807), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (5784, 5807), True, 'import numpy as np\n'), ((5816, 5847), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (5824, 5847), True, 'import numpy as np\n'), ((5859, 5886), 'numpy.reshape', 'np.reshape', (['mean', '(1, 1, 3)'], {}), '(mean, (1, 1, 3))\n', (5869, 5886), True, 'import numpy as np\n'), ((5894, 5920), 'numpy.reshape', 'np.reshape', (['std', '(1, 1, 3)'], {}), '(std, (1, 1, 3))\n', (5904, 5920), True, 'import numpy as np\n'), ((6129, 6174), 'os.path.join', 'os.path.join', (['self.SAVE_TOP_DIR', '"""action.npy"""'], {}), "(self.SAVE_TOP_DIR, 'action.npy')\n", (6141, 6174), False, 'import math, os\n'), ((6195, 6209), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (6203, 6209), True, 'import numpy as np\n'), ((6228, 6331), 'Wrench_Manipulation_Env.RobotEnv', 'RobotEnv', ([], {'worker_id': 'self.wid', 'p_id': 'pybullet', 'dt': 'self.dt', 'maxSteps': '(20)', 'fixture_offset': 'fixture_action'}), '(worker_id=self.wid, p_id=pybullet, dt=self.dt, maxSteps=20,\n fixture_offset=fixture_action)\n', (6236, 6331), False, 'from Wrench_Manipulation_Env import RobotEnv\n'), ((8799, 8844), 'os.path.join', 'os.path.join', (['self.SAVE_TOP_DIR', '"""reward.txt"""'], {}), "(self.SAVE_TOP_DIR, 'reward.txt')\n", (8811, 8844), False, 'import math, os\n'), ((8952, 8976), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8974, 8976), False, 'import torch\n'), ((9014, 9024), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9022, 9024), False, 'import sys\n'), ((9029, 9040), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (9037, 9040), False, 'import math, os\n'), ((9179, 9207), 'os.path.exists', 'os.path.exists', (['SAVE_TOP_DIR'], {}), '(SAVE_TOP_DIR)\n', (9193, 9207), False, 'import math, os\n'), ((9213, 9238), 'os.makedirs', 'os.makedirs', (['SAVE_TOP_DIR'], {}), '(SAVE_TOP_DIR)\n', (9224, 9238), False, 'import math, os\n'), ((9624, 9640), 'torch.multiprocessing.Value', 'mp.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (9632, 9640), True, 'import torch.multiprocessing as mp\n'), ((9641, 9659), 'torch.multiprocessing.Value', 'mp.Value', (['"""d"""', '(0.0)'], {}), "('d', 0.0)\n", (9649, 9659), True, 'import torch.multiprocessing as mp\n'), ((9659, 9669), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (9667, 9669), True, 'import torch.multiprocessing as mp\n'), ((832, 858), 'torch.from_numpy', 'torch.from_numpy', (['np_array'], {}), '(np_array)\n', (848, 858), False, 'import torch\n'), ((1285, 1298), 'numpy.vstack', 'np.vstack', (['bs'], {}), '(bs)\n', (1294, 1298), True, 'import numpy as np\n'), ((1312, 1325), 'numpy.vstack', 'np.vstack', (['ba'], {}), '(ba)\n', (1321, 1325), True, 'import numpy as np\n'), ((2022, 2129), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(3)', 'out_channels': '(32)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=3, out_channels=32, kernel_size=(3, 3), stride=(2, 2),\n padding=(1, 1), bias=True)\n', (2031, 2129), True, 'import torch.nn as nn\n'), ((2125, 2134), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2132, 2134), True, 'import torch.nn as nn\n'), ((2142, 2160), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (2156, 2160), True, 'import torch.nn as nn\n'), ((2220, 2329), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(2, 2\n ), padding=(1, 1), bias=True)\n', (2229, 2329), True, 'import torch.nn as nn\n'), ((2324, 2333), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2331, 2333), True, 'import torch.nn as nn\n'), ((2341, 2359), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (2355, 2359), True, 'import torch.nn as nn\n'), ((2419, 2528), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(2, 2\n ), padding=(1, 1), bias=True)\n', (2428, 2528), True, 'import torch.nn as nn\n'), ((2523, 2532), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2530, 2532), True, 'import torch.nn as nn\n'), ((2540, 2558), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2554, 2558), True, 'import torch.nn as nn\n'), ((2618, 2727), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(2, 2\n ), padding=(1, 1), bias=True)\n', (2627, 2727), True, 'import torch.nn as nn\n'), ((2722, 2731), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2729, 2731), True, 'import torch.nn as nn\n'), ((2739, 2757), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2753, 2757), True, 'import torch.nn as nn\n'), ((2816, 2926), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(2, \n 2), padding=(1, 1), bias=True)\n', (2825, 2926), True, 'import torch.nn as nn\n'), ((2921, 2930), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2928, 2930), True, 'import torch.nn as nn\n'), ((2938, 2957), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2952, 2957), True, 'import torch.nn as nn\n'), ((3015, 3125), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(128)', 'kernel_size': '(3, 3)', 'stride': '(2, 2)', 'padding': '(1, 1)', 'bias': '(True)'}), '(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(2,\n 2), padding=(1, 1), bias=True)\n', (3024, 3125), True, 'import torch.nn as nn\n'), ((3121, 3130), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3128, 3130), True, 'import torch.nn as nn\n'), ((3138, 3157), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3152, 3157), True, 'import torch.nn as nn\n'), ((3217, 3244), 'torch.nn.Linear', 'nn.Linear', (['(2 * 3 * 128)', '(128)'], {}), '(2 * 3 * 128, 128)\n', (3226, 3244), True, 'import torch.nn as nn\n'), ((3256, 3265), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3263, 3265), True, 'import torch.nn as nn\n'), ((3277, 3295), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (3286, 3295), True, 'import torch.nn as nn\n'), ((3307, 3316), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3314, 3316), True, 'import torch.nn as nn\n'), ((3328, 3345), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(24)'], {}), '(64, 24)\n', (3337, 3345), True, 'import torch.nn as nn\n'), ((3357, 3366), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3364, 3366), True, 'import torch.nn as nn\n'), ((3415, 3442), 'torch.nn.Linear', 'nn.Linear', (['(2 * 3 * 128)', '(128)'], {}), '(2 * 3 * 128, 128)\n', (3424, 3442), True, 'import torch.nn as nn\n'), ((3454, 3463), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3461, 3463), True, 'import torch.nn as nn\n'), ((3475, 3493), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (3484, 3493), True, 'import torch.nn as nn\n'), ((3505, 3514), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3512, 3514), True, 'import torch.nn as nn\n'), ((3526, 3543), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(24)'], {}), '(64, 24)\n', (3535, 3543), True, 'import torch.nn as nn\n'), ((3555, 3564), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3562, 3564), True, 'import torch.nn as nn\n'), ((3614, 3641), 'torch.nn.Linear', 'nn.Linear', (['(2 * 3 * 128)', '(128)'], {}), '(2 * 3 * 128, 128)\n', (3623, 3641), True, 'import torch.nn as nn\n'), ((3653, 3662), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3660, 3662), True, 'import torch.nn as nn\n'), ((3674, 3692), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (3683, 3692), True, 'import torch.nn as nn\n'), ((3704, 3713), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3711, 3713), True, 'import torch.nn as nn\n'), ((3725, 3742), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(24)'], {}), '(64, 24)\n', (3734, 3742), True, 'import torch.nn as nn\n'), ((3754, 3763), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3761, 3763), True, 'import torch.nn as nn\n'), ((4989, 5007), 'torch.log', 'torch.log', (['m.scale'], {}), '(m.scale)\n', (4998, 5007), False, 'import torch\n'), ((6017, 6047), 'pybullet.connect', 'pybullet.connect', (['pybullet.GUI'], {}), '(pybullet.GUI)\n', (6033, 6047), False, 'import pybullet\n'), ((6076, 6109), 'pybullet.connect', 'pybullet.connect', (['pybullet.DIRECT'], {}), '(pybullet.DIRECT)\n', (6092, 6109), False, 'import pybullet\n'), ((6724, 6754), 'numpy.reshape', 'np.reshape', (['observation', '(-1,)'], {}), '(observation, (-1,))\n', (6734, 6754), True, 'import numpy as np\n'), ((8870, 8902), 'numpy.array', 'np.array', (['[reward_check / 100.0]'], {}), '([reward_check / 100.0])\n', (8878, 8902), True, 'import numpy as np\n'), ((1339, 1364), 'numpy.array', 'np.array', (['buffer_v_target'], {}), '(buffer_v_target)\n', (1347, 1364), True, 'import numpy as np\n'), ((4366, 4383), 'torch.nn.functional.softplus', 'F.softplus', (['sigma'], {}), '(sigma)\n', (4376, 4383), True, 'import torch.nn.functional as F\n'), ((5524, 5550), 'numpy.log', 'np.log', (['(self.wid * 100 + 1)'], {}), '(self.wid * 100 + 1)\n', (5530, 5550), True, 'import numpy as np\n'), ((7402, 7437), 'numpy.reshape', 'np.reshape', (['observation_next', '(-1,)'], {}), '(observation_next, (-1,))\n', (7412, 7437), True, 'import numpy as np\n'), ((4965, 4986), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (4973, 4986), False, 'import math, os\n'), ((7289, 7348), 'imageio.mimsave', 'imageio.mimsave', (['"""pokingSthSlightly.gif"""', 'self.env.obs_list'], {}), "('pokingSthSlightly.gif', self.env.obs_list)\n", (7304, 7348), False, 'import imageio\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
"'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
return img
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
#img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) + [16, 128, 128]
#out_img = _convert_output_type_range(out_img, img_type)
return out_img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
|
[
"numpy.dot",
"numpy.matmul"
] |
[((2452, 2490), 'numpy.dot', 'np.dot', (['img', '[24.966, 128.553, 65.481]'], {}), '(img, [24.966, 128.553, 65.481])\n', (2458, 2490), True, 'import numpy as np\n'), ((2526, 2628), 'numpy.matmul', 'np.matmul', (['img', '[[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, \n 112.0]]'], {}), '(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [\n 65.481, -37.797, 112.0]])\n', (2535, 2628), True, 'import numpy as np\n')]
|
import os
import pytest
import numpy as np
import easy_dna as dna
def test_extract_from_input(tmpdir):
parts = []
for i in range(10):
part_id = "part_%s" % ("ABCDEFGHAB"[i]) # id is nonunique on purpose
alias = "part_%d" % i # alias is unique
part_length = np.random.randint(1000, 1500)
sequence = dna.random_dna_sequence(part_length)
record = dna.sequence_to_biopython_record(sequence, id=part_id)
record.name = part_id
dna.annotate_record(record, label=part_id, alias=alias)
parts.append(record)
constructs = []
for position_of_last_part in [8, 10]:
# 8: parts A-H; 10: parts A--H and A, B again
construct_record = sum(parts[1:position_of_last_part], parts[0])
construct_record.id = "construct_%02d" % (position_of_last_part)
construct_record.name = construct_record.id
constructs.append(construct_record)
target_dir = os.path.join(str(tmpdir), "test_dir")
records_dict = dna.extract_from_input(
construct_list=constructs, output_path=target_dir
)
assert records_dict["processed_report"]["shared_with"].count() == 16
with pytest.raises(TypeError):
dna.extract_from_input(output_path=target_dir)
|
[
"easy_dna.annotate_record",
"easy_dna.sequence_to_biopython_record",
"easy_dna.random_dna_sequence",
"pytest.raises",
"numpy.random.randint",
"easy_dna.extract_from_input"
] |
[((1009, 1082), 'easy_dna.extract_from_input', 'dna.extract_from_input', ([], {'construct_list': 'constructs', 'output_path': 'target_dir'}), '(construct_list=constructs, output_path=target_dir)\n', (1031, 1082), True, 'import easy_dna as dna\n'), ((293, 322), 'numpy.random.randint', 'np.random.randint', (['(1000)', '(1500)'], {}), '(1000, 1500)\n', (310, 322), True, 'import numpy as np\n'), ((342, 378), 'easy_dna.random_dna_sequence', 'dna.random_dna_sequence', (['part_length'], {}), '(part_length)\n', (365, 378), True, 'import easy_dna as dna\n'), ((396, 450), 'easy_dna.sequence_to_biopython_record', 'dna.sequence_to_biopython_record', (['sequence'], {'id': 'part_id'}), '(sequence, id=part_id)\n', (428, 450), True, 'import easy_dna as dna\n'), ((489, 544), 'easy_dna.annotate_record', 'dna.annotate_record', (['record'], {'label': 'part_id', 'alias': 'alias'}), '(record, label=part_id, alias=alias)\n', (508, 544), True, 'import easy_dna as dna\n'), ((1180, 1204), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1193, 1204), False, 'import pytest\n'), ((1214, 1260), 'easy_dna.extract_from_input', 'dna.extract_from_input', ([], {'output_path': 'target_dir'}), '(output_path=target_dir)\n', (1236, 1260), True, 'import easy_dna as dna\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.