code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
#load model
model = model_from_json(open("fer.json", "r").read()) #change the path accoring to files
#load weights
model.load_weights('fer.h5') #change the path accoring to files
detection_model_path="C:/Users/panur/.spyder-py3/FaceMaskDetection/cascadeH5.xml" #change the path accoring to files
face_detection = cv2.CascadeClassifier(detection_model_path)
ret=1
flag=True
cap = cv2.VideoCapture(0) #default 0 for webcam
frameRate = cap.get(30)
while(cap.isOpened()):
ret, fm=cap.read()
fm = cv2.resize(fm, (224, 224))
file = cv2.cvtColor(fm, cv2.COLOR_BGR2RGB)
orig_frame = file
frame = file
faces = face_detection.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
if len(faces) :
faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
roi = frame[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (48, 48),3)
roi = frame.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds=model.predict_classes(roi)[0]
if preds==0:
print("Mask worn")
test='Mask worn'
elif preds==1:
print("Danger: No Mask")
test='Danger: No Mask'
cv2.putText(fm,test, (fX-15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(fm, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2)
cv2.imshow("Live Video", fm)
k=cv2.waitKey(25) #Press ESC to stop/exit
if k == 27:
ret=0
break
print("closed")
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"keras.preprocessing.image.img_to_array",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.expand_dims",
"cv2.CascadeClassifier",
"cv2.resize",
"cv2.waitKey"
] |
[((472, 515), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['detection_model_path'], {}), '(detection_model_path)\n', (493, 515), False, 'import cv2\n'), ((564, 583), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (580, 583), False, 'import cv2\n'), ((1747, 1770), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1768, 1770), False, 'import cv2\n'), ((685, 711), 'cv2.resize', 'cv2.resize', (['fm', '(224, 224)'], {}), '(fm, (224, 224))\n', (695, 711), False, 'import cv2\n'), ((720, 755), 'cv2.cvtColor', 'cv2.cvtColor', (['fm', 'cv2.COLOR_BGR2RGB'], {}), '(fm, cv2.COLOR_BGR2RGB)\n', (732, 755), False, 'import cv2\n'), ((1596, 1624), 'cv2.imshow', 'cv2.imshow', (['"""Live Video"""', 'fm'], {}), "('Live Video', fm)\n", (1606, 1624), False, 'import cv2\n'), ((1629, 1644), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (1640, 1644), False, 'import cv2\n'), ((1101, 1129), 'cv2.resize', 'cv2.resize', (['roi', '(48, 48)', '(3)'], {}), '(roi, (48, 48), 3)\n', (1111, 1129), False, 'import cv2\n'), ((1184, 1201), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1196, 1201), False, 'from keras.preprocessing.image import img_to_array\n'), ((1212, 1239), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1226, 1239), True, 'import numpy as np\n'), ((1435, 1529), 'cv2.putText', 'cv2.putText', (['fm', 'test', '(fX - 15, fY - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(fm, test, (fX - 15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (\n 0, 0, 255), 2)\n', (1446, 1529), False, 'import cv2\n'), ((1526, 1589), 'cv2.rectangle', 'cv2.rectangle', (['fm', '(fX, fY)', '(fX + fW, fY + fH)', '(0, 0, 255)', '(2)'], {}), '(fm, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)\n', (1539, 1589), False, 'import cv2\n')]
|
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import unittest
import logging
import warnings
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass
from paddle.fluid.dygraph.container import Sequential
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU
from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D
from paddle.fluid.dygraph.nn import Pool2D
from paddle.fluid.log_helper import get_logger
from paddle.fluid.dygraph import nn
paddle.enable_static()
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True})
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def get_vaild_warning_num(warning, w):
num = 0
for i in range(len(w)):
if warning in str(w[i].message):
num += 1
return num
def StaticLenet(data, num_classes=10, classifier_activation='softmax'):
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
conv1 = fluid.layers.conv2d(
data,
num_filters=6,
filter_size=3,
stride=1,
padding=1,
param_attr=conv2d_w1_attr,
bias_attr=False)
batch_norm1 = layers.batch_norm(conv1)
relu1 = layers.relu(batch_norm1)
pool1 = fluid.layers.pool2d(
relu1, pool_size=2, pool_type='max', pool_stride=2)
conv2 = fluid.layers.conv2d(
pool1,
num_filters=16,
filter_size=5,
stride=1,
padding=0,
param_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr)
batch_norm2 = layers.batch_norm(conv2)
prelu1 = layers.prelu(batch_norm2, mode='all')
pool2 = fluid.layers.pool2d(
prelu1, pool_size=2, pool_type='max', pool_stride=2)
fc1 = fluid.layers.fc(input=pool2,
size=120,
param_attr=fc_w1_attr,
bias_attr=fc_b1_attr)
leaky_relu1 = layers.leaky_relu(fc1, alpha=0.01)
fc2 = fluid.layers.fc(input=leaky_relu1,
size=84,
param_attr=fc_w2_attr,
bias_attr=fc_b2_attr)
sigmoid1 = layers.sigmoid(fc2)
fc3 = fluid.layers.fc(input=sigmoid1,
size=num_classes,
param_attr=fc_w3_attr,
bias_attr=fc_b3_attr)
softmax1 = layers.softmax(fc3, use_cudnn=True)
return softmax1
class ImperativeLenet(fluid.dygraph.Layer):
def __init__(self, num_classes=10):
super(ImperativeLenet, self).__init__()
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
self.features = Sequential(
Conv2D(
in_channels=1,
out_channels=6,
kernel_size=3,
stride=1,
padding=1,
weight_attr=conv2d_w1_attr,
bias_attr=False),
BatchNorm2D(6),
ReLU(),
Pool2D(
pool_size=2, pool_type='max', pool_stride=2),
Conv2D(
in_channels=6,
out_channels=16,
kernel_size=5,
stride=1,
padding=0,
weight_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr),
BatchNorm2D(16),
PReLU(),
MaxPool2D(
kernel_size=2, stride=2))
self.fc = Sequential(
Linear(
in_features=400,
out_features=120,
weight_attr=fc_w1_attr,
bias_attr=fc_b1_attr),
LeakyReLU(),
Linear(
in_features=120,
out_features=84,
weight_attr=fc_w2_attr,
bias_attr=fc_b2_attr),
Sigmoid(),
Linear(
in_features=84,
out_features=num_classes,
weight_attr=fc_w3_attr,
bias_attr=fc_b3_attr),
Softmax())
def forward(self, inputs):
x = self.features(inputs)
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x
class TestImperativeOutSclae(unittest.TestCase):
def test_out_scale_acc(self):
def _build_static_lenet(main, startup, is_test=False, seed=1000):
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
main.random_seed = seed
startup.random_seed = seed
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
prediction = StaticLenet(img)
if not is_test:
loss = fluid.layers.cross_entropy(
input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
else:
avg_loss = prediction
return img, label, avg_loss
reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=32, drop_last=True)
weight_quantize_type = 'abs_max'
activation_quantize_type = 'moving_average_abs_max'
param_init_map = {}
seed = 1000
lr = 0.001
dynamic_out_scale_list = []
static_out_scale_list = []
# imperative train
_logger.info(
"--------------------------dynamic graph qat--------------------------"
)
imperative_out_scale = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quantize_type)
with fluid.dygraph.guard():
np.random.seed(seed)
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
lenet = ImperativeLenet()
fixed_state = {}
for name, param in lenet.named_parameters():
p_shape = param.numpy().shape
p_value = param.numpy()
if name.endswith("bias"):
value = np.zeros_like(p_value).astype('float32')
else:
value = np.random.normal(
loc=0.0, scale=0.01, size=np.product(p_shape)).reshape(
p_shape).astype('float32')
fixed_state[name] = value
param_init_map[param.name] = value
lenet.set_dict(fixed_state)
imperative_out_scale.quantize(lenet)
adam = AdamOptimizer(
learning_rate=lr, parameter_list=lenet.parameters())
dynamic_loss_rec = []
lenet.train()
for batch_id, data in enumerate(reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = fluid.dygraph.to_variable(x_data)
label = fluid.dygraph.to_variable(y_data)
out = lenet(img)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
lenet.clear_gradients()
dynamic_loss_rec.append(avg_loss.numpy()[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', avg_loss.numpy()))
lenet.eval()
param_save_path = "test_save_quantized_model/lenet.pdparams"
save_dict = lenet.state_dict()
paddle.save(save_dict, param_save_path)
path = "./dynamic_outscale_infer_model/lenet"
dynamic_save_dir = "./dynamic_outscale_infer_model"
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
_logger.info(
"--------------------------static graph qat--------------------------"
)
static_loss_rec = []
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
infer = fluid.Program()
startup = fluid.Program()
static_img, static_label, static_loss = _build_static_lenet(
main, startup, False, seed)
infer_img, _, infer_pre = _build_static_lenet(infer, startup, True,
seed)
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
opt = AdamOptimizer(learning_rate=lr)
opt.minimize(static_loss)
scope = core.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
for param in main.all_parameters():
if "batch_norm" in param.name:
param_name = param.name.replace("norm", "norm2d")
elif 'prelu' in param.name:
param_name = param.name.replace("prelu", 'p_re_lu')
else:
param_name = param.name
param_tensor = scope.var(param.name).get_tensor()
param_tensor.set(param_init_map[param_name], place)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
infer_graph = IrGraph(core.Graph(infer.desc), for_test=True)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'])
transform_pass.apply(main_graph)
transform_pass.apply(infer_graph)
outscale_pass = OutScaleForTrainingPass(scope=scope, place=place)
outscale_pass.apply(main_graph)
build_strategy = fluid.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=static_loss.name, build_strategy=build_strategy)
feeder = fluid.DataFeeder(
feed_list=[static_img, static_label], place=place)
with fluid.scope_guard(scope):
for batch_id, data in enumerate(reader()):
loss_v, = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[static_loss])
static_loss_rec.append(loss_v[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', loss_v))
scale_inference_pass = OutScaleForInferencePass(scope=scope)
scale_inference_pass.apply(infer_graph)
save_program = infer_graph.to_program()
static_save_dir = "./static_outscale_infer_model"
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
dirname=static_save_dir,
feeded_var_names=[infer_img.name],
target_vars=[infer_pre],
executor=exe,
main_program=save_program,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX)
rtol = 1e-05
atol = 1e-08
for i, (loss_d,
loss_s) in enumerate(zip(dynamic_loss_rec, static_loss_rec)):
diff = np.abs(loss_d - loss_s)
if diff > (atol + rtol * np.abs(loss_s)):
_logger.info(
"diff({}) at {}, dynamic loss = {}, static loss = {}".
format(diff, i, loss_d, loss_s))
break
self.assertTrue(
np.allclose(
np.array(dynamic_loss_rec),
np.array(static_loss_rec),
rtol=rtol,
atol=atol,
equal_nan=True),
msg='Failed to do the imperative qat.')
# load dynamic model
[dynamic_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=dynamic_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
# load static model
[static_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=static_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
dynamic_ops = dynamic_inference_program.global_block().ops
static_ops = static_inference_program.global_block().ops
for op in dynamic_ops[:]:
if op.type == "flatten2" or 'fake' in op.type:
dynamic_ops.remove(op)
for op in static_ops[:]:
if 'fake' in op.type:
static_ops.remove(op)
op_count = 0
for i in range(len(dynamic_ops)):
if dynamic_ops[i].has_attr("out_threshold"):
op_count += 1
self.assertTrue(dynamic_ops[i].type == static_ops[i].type)
self.assertTrue(dynamic_ops[i].attr("out_threshold") ==
static_ops[i].attr("out_threshold"))
self.assertTrue(op_count == 13)
class TestSaveQuanztizedModelFromCheckPoint(unittest.TestCase):
def test_save_quantized_model(self):
weight_quantize_type = 'abs_max'
activation_quantize_type = 'moving_average_abs_max'
load_param_path = "test_save_quantized_model/lenet.pdparams"
path = "./dynamic_outscale_infer_model_from_checkpoint/lenet"
dynamic_model_save_dir = "./dynamic_outscale_infer_model_from_checkpoint"
static_model_save_dir = "./static_outscale_infer_model"
imperative_out_scale = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quantize_type)
with fluid.dygraph.guard():
lenet = ImperativeLenet()
load_dict = paddle.load(load_param_path)
imperative_out_scale.quantize(lenet)
lenet.set_dict(load_dict)
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
# load dynamic model
[dynamic_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=dynamic_model_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
# load static model
[static_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=static_model_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
dynamic_ops = dynamic_inference_program.global_block().ops
static_ops = static_inference_program.global_block().ops
for op in dynamic_ops[:]:
if op.type == "flatten2" or 'fake' in op.type:
dynamic_ops.remove(op)
for op in static_ops[:]:
if 'fake' in op.type:
static_ops.remove(op)
op_count = 0
for i in range(len(dynamic_ops)):
if dynamic_ops[i].has_attr("out_threshold"):
op_count += 1
self.assertTrue(dynamic_ops[i].type == static_ops[i].type)
self.assertTrue(dynamic_ops[i].attr("out_threshold") ==
static_ops[i].attr("out_threshold"))
self.assertTrue(op_count == 13)
class TestSaveQuantizedModel_Warning(unittest.TestCase):
def test_warning(self):
path = "./dynamic_outscale_infer_model_with_warnings/lenet"
imperative_out_scale = ImperativeQuantAware()
with fluid.dygraph.guard():
lenet = ImperativeLenet()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
warning_message = "Warning: No Layer of the model while to be saved contains the out_threshold attribute, " \
"so the generated inference model would not contain the out_threshold."
num = get_vaild_warning_num(warning_message, w)
assert num == 1
if __name__ == '__main__':
unittest.main()
|
[
"numpy.product",
"paddle.fluid.DataFeeder",
"paddle.nn.layer.ReLU",
"paddle.fluid.dygraph.guard",
"paddle.fluid.dygraph.to_variable",
"paddle.static.InputSpec",
"paddle.fluid.layers.cross_entropy",
"paddle.fluid.layers.data",
"numpy.array",
"paddle.fluid.Executor",
"paddle.fluid.log_helper.get_logger",
"paddle.fluid.core.Graph",
"unittest.main",
"paddle.dataset.mnist.test",
"paddle.fluid.layers.prelu",
"paddle.nn.layer.Sigmoid",
"paddle.fluid.default_startup_program",
"paddle.fluid.layers.mean",
"paddle.enable_static",
"paddle.fluid.default_main_program",
"paddle.fluid.layers.conv2d",
"numpy.random.seed",
"warnings.simplefilter",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.fluid.contrib.slim.quantization.ImperativeQuantAware",
"paddle.nn.MaxPool2D",
"paddle.fluid.contrib.slim.quantization.OutScaleForInferencePass",
"paddle.fluid.contrib.slim.quantization.QuantizationTransformPass",
"numpy.abs",
"paddle.nn.layer.PReLU",
"paddle.fluid.ParamAttr",
"paddle.fluid.Program",
"paddle.fluid.set_flags",
"paddle.nn.layer.LeakyReLU",
"paddle.fluid.BuildStrategy",
"paddle.fluid.layers.softmax",
"paddle.fluid.CompiledProgram",
"paddle.fluid.layers.sigmoid",
"paddle.fluid.layers.leaky_relu",
"paddle.fluid.io.save_inference_model",
"paddle.nn.Softmax",
"paddle.fluid.contrib.slim.quantization.OutScaleForTrainingPass",
"paddle.nn.Linear",
"paddle.fluid.layers.batch_norm",
"paddle.fluid.dygraph.nn.Pool2D",
"paddle.nn.BatchNorm2D",
"paddle.fluid.scope_guard",
"paddle.fluid.layers.flatten",
"paddle.fluid.layers.relu",
"paddle.fluid.core.Scope",
"paddle.fluid.io.load_inference_model",
"paddle.nn.Conv2D",
"paddle.fluid.optimizer.AdamOptimizer",
"paddle.fluid.layers.fc",
"warnings.catch_warnings",
"paddle.fluid.layers.pool2d",
"paddle.save",
"paddle.fluid.unique_name.guard",
"paddle.load",
"paddle.fluid.core.CUDAPlace",
"numpy.zeros_like",
"paddle.fluid.program_guard",
"paddle.fluid.core.CPUPlace"
] |
[((1541, 1563), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (1561, 1563), False, 'import paddle\n'), ((1596, 1624), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (1622, 1624), False, 'from paddle.fluid import core\n'), ((1694, 1779), 'paddle.fluid.log_helper.get_logger', 'get_logger', (['__name__', 'logging.INFO'], {'fmt': '"""%(asctime)s-%(levelname)s: %(message)s"""'}), "(__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'\n )\n", (1704, 1779), False, 'from paddle.fluid.log_helper import get_logger\n'), ((1630, 1682), 'paddle.fluid.set_flags', 'fluid.set_flags', (["{'FLAGS_cudnn_deterministic': True}"], {}), "({'FLAGS_cudnn_deterministic': True})\n", (1645, 1682), True, 'import paddle.fluid as fluid\n'), ((2033, 2067), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_1"""'}), "(name='conv2d_w_1')\n", (2048, 2067), True, 'import paddle.fluid as fluid\n'), ((2089, 2123), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_2"""'}), "(name='conv2d_w_2')\n", (2104, 2123), True, 'import paddle.fluid as fluid\n'), ((2141, 2171), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_1"""'}), "(name='fc_w_1')\n", (2156, 2171), True, 'import paddle.fluid as fluid\n'), ((2189, 2219), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_2"""'}), "(name='fc_w_2')\n", (2204, 2219), True, 'import paddle.fluid as fluid\n'), ((2237, 2267), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_3"""'}), "(name='fc_w_3')\n", (2252, 2267), True, 'import paddle.fluid as fluid\n'), ((2289, 2323), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_b_2"""'}), "(name='conv2d_b_2')\n", (2304, 2323), True, 'import paddle.fluid as fluid\n'), ((2341, 2371), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_1"""'}), "(name='fc_b_1')\n", (2356, 2371), True, 'import paddle.fluid as fluid\n'), ((2389, 2419), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_2"""'}), "(name='fc_b_2')\n", (2404, 2419), True, 'import paddle.fluid as fluid\n'), ((2437, 2467), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_3"""'}), "(name='fc_b_3')\n", (2452, 2467), True, 'import paddle.fluid as fluid\n'), ((2480, 2604), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', (['data'], {'num_filters': '(6)', 'filter_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'param_attr': 'conv2d_w1_attr', 'bias_attr': '(False)'}), '(data, num_filters=6, filter_size=3, stride=1, padding=1,\n param_attr=conv2d_w1_attr, bias_attr=False)\n', (2499, 2604), True, 'import paddle.fluid as fluid\n'), ((2676, 2700), 'paddle.fluid.layers.batch_norm', 'layers.batch_norm', (['conv1'], {}), '(conv1)\n', (2693, 2700), True, 'import paddle.fluid.layers as layers\n'), ((2713, 2737), 'paddle.fluid.layers.relu', 'layers.relu', (['batch_norm1'], {}), '(batch_norm1)\n', (2724, 2737), True, 'import paddle.fluid.layers as layers\n'), ((2750, 2821), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['relu1'], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(relu1, pool_size=2, pool_type='max', pool_stride=2)\n", (2769, 2821), True, 'import paddle.fluid as fluid\n'), ((2843, 2979), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', (['pool1'], {'num_filters': '(16)', 'filter_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'param_attr': 'conv2d_w2_attr', 'bias_attr': 'conv2d_b2_attr'}), '(pool1, num_filters=16, filter_size=5, stride=1, padding\n =0, param_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr)\n', (2862, 2979), True, 'import paddle.fluid as fluid\n'), ((3050, 3074), 'paddle.fluid.layers.batch_norm', 'layers.batch_norm', (['conv2'], {}), '(conv2)\n', (3067, 3074), True, 'import paddle.fluid.layers as layers\n'), ((3088, 3125), 'paddle.fluid.layers.prelu', 'layers.prelu', (['batch_norm2'], {'mode': '"""all"""'}), "(batch_norm2, mode='all')\n", (3100, 3125), True, 'import paddle.fluid.layers as layers\n'), ((3138, 3210), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['prelu1'], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(prelu1, pool_size=2, pool_type='max', pool_stride=2)\n", (3157, 3210), True, 'import paddle.fluid as fluid\n'), ((3231, 3319), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'pool2', 'size': '(120)', 'param_attr': 'fc_w1_attr', 'bias_attr': 'fc_b1_attr'}), '(input=pool2, size=120, param_attr=fc_w1_attr, bias_attr=\n fc_b1_attr)\n', (3246, 3319), True, 'import paddle.fluid as fluid\n'), ((3411, 3445), 'paddle.fluid.layers.leaky_relu', 'layers.leaky_relu', (['fc1'], {'alpha': '(0.01)'}), '(fc1, alpha=0.01)\n', (3428, 3445), True, 'import paddle.fluid.layers as layers\n'), ((3456, 3548), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'leaky_relu1', 'size': '(84)', 'param_attr': 'fc_w2_attr', 'bias_attr': 'fc_b2_attr'}), '(input=leaky_relu1, size=84, param_attr=fc_w2_attr,\n bias_attr=fc_b2_attr)\n', (3471, 3548), True, 'import paddle.fluid as fluid\n'), ((3638, 3657), 'paddle.fluid.layers.sigmoid', 'layers.sigmoid', (['fc2'], {}), '(fc2)\n', (3652, 3657), True, 'import paddle.fluid.layers as layers\n'), ((3668, 3766), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'sigmoid1', 'size': 'num_classes', 'param_attr': 'fc_w3_attr', 'bias_attr': 'fc_b3_attr'}), '(input=sigmoid1, size=num_classes, param_attr=fc_w3_attr,\n bias_attr=fc_b3_attr)\n', (3683, 3766), True, 'import paddle.fluid as fluid\n'), ((3856, 3891), 'paddle.fluid.layers.softmax', 'layers.softmax', (['fc3'], {'use_cudnn': '(True)'}), '(fc3, use_cudnn=True)\n', (3870, 3891), True, 'import paddle.fluid.layers as layers\n'), ((19436, 19451), 'unittest.main', 'unittest.main', ([], {}), '()\n', (19449, 19451), False, 'import unittest\n'), ((4071, 4105), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_1"""'}), "(name='conv2d_w_1')\n", (4086, 4105), True, 'import paddle.fluid as fluid\n'), ((4131, 4165), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_2"""'}), "(name='conv2d_w_2')\n", (4146, 4165), True, 'import paddle.fluid as fluid\n'), ((4187, 4217), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_1"""'}), "(name='fc_w_1')\n", (4202, 4217), True, 'import paddle.fluid as fluid\n'), ((4239, 4269), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_2"""'}), "(name='fc_w_2')\n", (4254, 4269), True, 'import paddle.fluid as fluid\n'), ((4291, 4321), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_3"""'}), "(name='fc_w_3')\n", (4306, 4321), True, 'import paddle.fluid as fluid\n'), ((4347, 4381), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_b_2"""'}), "(name='conv2d_b_2')\n", (4362, 4381), True, 'import paddle.fluid as fluid\n'), ((4403, 4433), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_1"""'}), "(name='fc_b_1')\n", (4418, 4433), True, 'import paddle.fluid as fluid\n'), ((4455, 4485), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_2"""'}), "(name='fc_b_2')\n", (4470, 4485), True, 'import paddle.fluid as fluid\n'), ((4507, 4537), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_3"""'}), "(name='fc_b_3')\n", (4522, 4537), True, 'import paddle.fluid as fluid\n'), ((6004, 6030), 'paddle.fluid.layers.flatten', 'fluid.layers.flatten', (['x', '(1)'], {}), '(x, 1)\n', (6024, 6030), True, 'import paddle.fluid as fluid\n'), ((7545, 7663), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {'weight_quantize_type': 'weight_quantize_type', 'activation_quantize_type': 'activation_quantize_type'}), '(weight_quantize_type=weight_quantize_type,\n activation_quantize_type=activation_quantize_type)\n', (7565, 7663), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((9736, 9775), 'paddle.save', 'paddle.save', (['save_dict', 'param_save_path'], {}), '(save_dict, param_save_path)\n', (9747, 9775), False, 'import paddle\n'), ((10290, 10318), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (10316, 10318), False, 'from paddle.fluid import core\n'), ((10422, 10443), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (10436, 10443), True, 'import paddle.fluid as fluid\n'), ((10460, 10475), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10473, 10475), True, 'import paddle.fluid as fluid\n'), ((10492, 10507), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10505, 10507), True, 'import paddle.fluid as fluid\n'), ((10526, 10541), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10539, 10541), True, 'import paddle.fluid as fluid\n'), ((10993, 11005), 'paddle.fluid.core.Scope', 'core.Scope', ([], {}), '()\n', (11003, 11005), False, 'from paddle.fluid import core\n'), ((11681, 11898), 'paddle.fluid.contrib.slim.quantization.QuantizationTransformPass', 'QuantizationTransformPass', ([], {'scope': 'scope', 'place': 'place', 'activation_quantize_type': 'activation_quantize_type', 'weight_quantize_type': 'weight_quantize_type', 'quantizable_op_type': "['conv2d', 'depthwise_conv2d', 'mul']"}), "(scope=scope, place=place,\n activation_quantize_type=activation_quantize_type, weight_quantize_type\n =weight_quantize_type, quantizable_op_type=['conv2d',\n 'depthwise_conv2d', 'mul'])\n", (11706, 11898), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((12054, 12103), 'paddle.fluid.contrib.slim.quantization.OutScaleForTrainingPass', 'OutScaleForTrainingPass', ([], {'scope': 'scope', 'place': 'place'}), '(scope=scope, place=place)\n', (12077, 12103), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((12169, 12190), 'paddle.fluid.BuildStrategy', 'fluid.BuildStrategy', ([], {}), '()\n', (12188, 12190), True, 'import paddle.fluid as fluid\n'), ((12408, 12475), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'feed_list': '[static_img, static_label]', 'place': 'place'}), '(feed_list=[static_img, static_label], place=place)\n', (12424, 12475), True, 'import paddle.fluid as fluid\n'), ((12930, 12967), 'paddle.fluid.contrib.slim.quantization.OutScaleForInferencePass', 'OutScaleForInferencePass', ([], {'scope': 'scope'}), '(scope=scope)\n', (12954, 12967), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((14350, 14519), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'dynamic_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=dynamic_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (14379, 14519), True, 'import paddle.fluid as fluid\n'), ((14691, 14859), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'static_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (14720, 14859), True, 'import paddle.fluid as fluid\n'), ((16223, 16341), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {'weight_quantize_type': 'weight_quantize_type', 'activation_quantize_type': 'activation_quantize_type'}), '(weight_quantize_type=weight_quantize_type,\n activation_quantize_type=activation_quantize_type)\n', (16243, 16341), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((16833, 16861), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (16859, 16861), False, 'from paddle.fluid import core\n'), ((16965, 16986), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (16979, 16986), True, 'import paddle.fluid as fluid\n'), ((17103, 17278), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'dynamic_model_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=dynamic_model_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (17132, 17278), True, 'import paddle.fluid as fluid\n'), ((17450, 17624), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'static_model_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_model_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (17479, 17624), True, 'import paddle.fluid as fluid\n'), ((18648, 18670), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {}), '()\n', (18668, 18670), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((4586, 4708), 'paddle.nn.Conv2D', 'Conv2D', ([], {'in_channels': '(1)', 'out_channels': '(6)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'weight_attr': 'conv2d_w1_attr', 'bias_attr': '(False)'}), '(in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1,\n weight_attr=conv2d_w1_attr, bias_attr=False)\n', (4592, 4708), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((4831, 4845), 'paddle.nn.BatchNorm2D', 'BatchNorm2D', (['(6)'], {}), '(6)\n', (4842, 4845), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((4859, 4865), 'paddle.nn.layer.ReLU', 'ReLU', ([], {}), '()\n', (4863, 4865), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((4879, 4930), 'paddle.fluid.dygraph.nn.Pool2D', 'Pool2D', ([], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(pool_size=2, pool_type='max', pool_stride=2)\n", (4885, 4930), False, 'from paddle.fluid.dygraph.nn import Pool2D\n'), ((4961, 5093), 'paddle.nn.Conv2D', 'Conv2D', ([], {'in_channels': '(6)', 'out_channels': '(16)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'weight_attr': 'conv2d_w2_attr', 'bias_attr': 'conv2d_b2_attr'}), '(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0,\n weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr)\n', (4967, 5093), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5216, 5231), 'paddle.nn.BatchNorm2D', 'BatchNorm2D', (['(16)'], {}), '(16)\n', (5227, 5231), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5245, 5252), 'paddle.nn.layer.PReLU', 'PReLU', ([], {}), '()\n', (5250, 5252), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5266, 5300), 'paddle.nn.MaxPool2D', 'MaxPool2D', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (5275, 5300), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5362, 5454), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(400)', 'out_features': '(120)', 'weight_attr': 'fc_w1_attr', 'bias_attr': 'fc_b1_attr'}), '(in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr\n =fc_b1_attr)\n', (5368, 5454), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5528, 5539), 'paddle.nn.layer.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (5537, 5539), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5553, 5644), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(120)', 'out_features': '(84)', 'weight_attr': 'fc_w2_attr', 'bias_attr': 'fc_b2_attr'}), '(in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=\n fc_b2_attr)\n', (5559, 5644), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5718, 5727), 'paddle.nn.layer.Sigmoid', 'Sigmoid', ([], {}), '()\n', (5725, 5727), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5741, 5839), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(84)', 'out_features': 'num_classes', 'weight_attr': 'fc_w3_attr', 'bias_attr': 'fc_b3_attr'}), '(in_features=84, out_features=num_classes, weight_attr=fc_w3_attr,\n bias_attr=fc_b3_attr)\n', (5747, 5839), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5914, 5923), 'paddle.nn.Softmax', 'Softmax', ([], {}), '()\n', (5921, 5923), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((7071, 7098), 'paddle.dataset.mnist.test', 'paddle.dataset.mnist.test', ([], {}), '()\n', (7096, 7098), False, 'import paddle\n'), ((7699, 7720), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (7718, 7720), True, 'import paddle.fluid as fluid\n'), ((7734, 7754), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7748, 7754), True, 'import numpy as np\n'), ((10340, 10357), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (10354, 10357), False, 'from paddle.fluid import core\n'), ((10392, 10407), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (10405, 10407), False, 'from paddle.fluid import core\n'), ((10800, 10825), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (10823, 10825), True, 'import paddle.fluid as fluid\n'), ((11019, 11043), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (11036, 11043), True, 'import paddle.fluid as fluid\n'), ((11548, 11569), 'paddle.fluid.core.Graph', 'core.Graph', (['main.desc'], {}), '(main.desc)\n', (11558, 11569), False, 'from paddle.fluid import core\n'), ((11617, 11639), 'paddle.fluid.core.Graph', 'core.Graph', (['infer.desc'], {}), '(infer.desc)\n', (11627, 11639), False, 'from paddle.fluid import core\n'), ((12502, 12526), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (12519, 12526), True, 'import paddle.fluid as fluid\n'), ((13136, 13160), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (13153, 13160), True, 'import paddle.fluid as fluid\n'), ((13174, 13435), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', ([], {'dirname': 'static_save_dir', 'feeded_var_names': '[infer_img.name]', 'target_vars': '[infer_pre]', 'executor': 'exe', 'main_program': 'save_program', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_save_dir, feeded_var_names=[\n infer_img.name], target_vars=[infer_pre], executor=exe, main_program=\n save_program, model_filename='lenet' + INFER_MODEL_SUFFIX,\n params_filename='lenet' + INFER_PARAMS_SUFFIX)\n", (13203, 13435), True, 'import paddle.fluid as fluid\n'), ((13699, 13722), 'numpy.abs', 'np.abs', (['(loss_d - loss_s)'], {}), '(loss_d - loss_s)\n', (13705, 13722), True, 'import numpy as np\n'), ((16377, 16398), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (16396, 16398), True, 'import paddle.fluid as fluid\n'), ((16462, 16490), 'paddle.load', 'paddle.load', (['load_param_path'], {}), '(load_param_path)\n', (16473, 16490), False, 'import paddle\n'), ((16883, 16900), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (16897, 16900), False, 'from paddle.fluid import core\n'), ((16935, 16950), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (16948, 16950), False, 'from paddle.fluid import core\n'), ((18684, 18705), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (18703, 18705), True, 'import paddle.fluid as fluid\n'), ((18759, 18795), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (18782, 18795), False, 'import warnings\n'), ((18814, 18845), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (18835, 18845), False, 'import warnings\n'), ((6247, 6272), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (6270, 6272), True, 'import paddle.fluid as fluid\n'), ((7767, 7795), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (7793, 7795), True, 'import paddle.fluid as fluid\n'), ((7827, 7858), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (7856, 7858), True, 'import paddle.fluid as fluid\n'), ((9061, 9094), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['x_data'], {}), '(x_data)\n', (9086, 9094), True, 'import paddle.fluid as fluid\n'), ((9119, 9152), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['y_data'], {}), '(y_data)\n', (9144, 9152), True, 'import paddle.fluid as fluid\n'), ((9210, 9248), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', (['out', 'label'], {}), '(out, label)\n', (9236, 9248), True, 'import paddle.fluid as fluid\n'), ((9276, 9299), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (9293, 9299), True, 'import paddle.fluid as fluid\n'), ((10844, 10878), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main', 'startup'], {}), '(main, startup)\n', (10863, 10878), True, 'import paddle.fluid as fluid\n'), ((10902, 10933), 'paddle.fluid.optimizer.AdamOptimizer', 'AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (10915, 10933), False, 'from paddle.fluid.optimizer import AdamOptimizer\n'), ((12259, 12298), 'paddle.fluid.CompiledProgram', 'fluid.CompiledProgram', (['main_graph.graph'], {}), '(main_graph.graph)\n', (12280, 12298), True, 'import paddle.fluid as fluid\n'), ((14024, 14050), 'numpy.array', 'np.array', (['dynamic_loss_rec'], {}), '(dynamic_loss_rec)\n', (14032, 14050), True, 'import numpy as np\n'), ((14068, 14093), 'numpy.array', 'np.array', (['static_loss_rec'], {}), '(static_loss_rec)\n', (14076, 14093), True, 'import numpy as np\n'), ((6295, 6329), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main', 'startup'], {}), '(main, startup)\n', (6314, 6329), True, 'import paddle.fluid as fluid\n'), ((6448, 6515), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""image"""', 'shape': '[1, 28, 28]', 'dtype': '"""float32"""'}), "(name='image', shape=[1, 28, 28], dtype='float32')\n", (6465, 6515), True, 'import paddle.fluid as fluid\n'), ((6569, 6626), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""label"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='label', shape=[1], dtype='int64')\n", (6586, 6626), True, 'import paddle.fluid as fluid\n'), ((10032, 10097), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (10055, 10097), False, 'import paddle\n'), ((16719, 16784), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (16742, 16784), False, 'import paddle\n'), ((6769, 6826), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'prediction', 'label': 'label'}), '(input=prediction, label=label)\n', (6795, 6826), True, 'import paddle.fluid as fluid\n'), ((6891, 6914), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (6908, 6914), True, 'import paddle.fluid as fluid\n'), ((13760, 13774), 'numpy.abs', 'np.abs', (['loss_s'], {}), '(loss_s)\n', (13766, 13774), True, 'import numpy as np\n'), ((19006, 19071), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (19029, 19071), False, 'import paddle\n'), ((8158, 8180), 'numpy.zeros_like', 'np.zeros_like', (['p_value'], {}), '(p_value)\n', (8171, 8180), True, 'import numpy as np\n'), ((8955, 8985), 'numpy.array', 'np.array', (['[x[1] for x in data]'], {}), '([x[1] for x in data])\n', (8963, 8985), True, 'import numpy as np\n'), ((8317, 8336), 'numpy.product', 'np.product', (['p_shape'], {}), '(p_shape)\n', (8327, 8336), True, 'import numpy as np\n')]
|
import os
import glob
import shutil
import yaml
from IPython import embed
import pytest
import numpy as np
from pypeit.par.util import parse_pypeit_file
from pypeit.pypeitsetup import PypeItSetup
from pypeit.tests.tstutils import dev_suite_required, data_path
from pypeit.metadata import PypeItMetaData
from pypeit.spectrographs.util import load_spectrograph
from pypeit.scripts.setup import Setup
def test_read_combid():
# ------------------------------------------------------------------
# In case of failed tests
setup_dir = data_path('setup_files')
if os.path.isdir(setup_dir):
shutil.rmtree(setup_dir)
config_dir = data_path('shane_kast_blue_A')
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
# ------------------------------------------------------------------
# Generate the pypeit file with the comb_id
droot = data_path('b')
pargs = Setup.parse_args(['-r', droot, '-s', 'shane_kast_blue', '-c=all', '-b',
'--extension=fits.gz', '--output_path={:s}'.format(data_path(''))])
Setup.main(pargs)
shutil.rmtree(setup_dir)
pypeit_file = os.path.join(config_dir, 'shane_kast_blue_A.pypeit')
cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(pypeit_file)
# Get the spectrograph
spectrograph = None
for l in cfg_lines:
if 'spectrograph' in l:
spectrograph = load_spectrograph(l.split(' ')[-1])
break
assert spectrograph is not None, 'Did not appropriately read spectrograph'
# Set the metadata
pmd = PypeItMetaData(spectrograph, spectrograph.default_pypeit_par(), files=data_files,
usrdata=usrdata, strict=False)
indx = pmd['filename'] == 'b27.fits.gz'
assert pmd['comb_id'][indx] == [1], 'Incorrect combination group ID'
assert pmd['comb_id'][np.where(~indx)[0]][0] == -1, 'Incorrect combination group ID'
shutil.rmtree(config_dir)
@dev_suite_required
def test_lris_red_multi_400():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi_400_8500_d560', '*.fits.gz'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
# Test
assert np.all(ps.fitstbl['setup'] == 'A')
@dev_suite_required
def test_lris_red_multi():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi*', '*.fits*'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
@dev_suite_required
def test_lris_red_multi_calib():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi_400_8500_d560', '*.fits.gz'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
cfile = data_path('test.calib')
ps.fitstbl.write_calib(cfile)
with open(cfile, 'r') as f:
calib = yaml.load(f, Loader=yaml.FullLoader)
assert np.array_equal(list(calib['A'].keys()), ['--', 1]), \
'Calibrations dictionary read incorrectly.'
os.remove(cfile)
@dev_suite_required
def test_lris_red_multi_run():
# Perform the setup
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi*', '*.fits*'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.run(setup_only=True)
# Test
#assert len(ps.setup_dict) == 2, 'Should find two setups'
assert len(ps.fitstbl) >= 40, 'Should find 40+ files'
arcs = ps.fitstbl['filename'][ps.fitstbl.find_frames('arc')]
assert len(arcs) >= 2, 'Should find two or more arcs'
assert 'r170320_2017.fits.gz' in arcs, \
'Should have identified r170320_2017.fits.gz as an arc'
assert 'r170816_0057.fits' in ps.fitstbl['filename'][ps.fitstbl.find_frames('science')], \
'Should have identified r170816_0057.fits as a science frame'
# Clean-up
#os.remove('keck_lris_red.lst')
#os.remove('keck_lris_red.setups')
os.remove('keck_lris_red.sorted')
@dev_suite_required
def test_lris_blue_pypeit_overwrite():
f = os.path.join(os.environ['PYPEIT_DEV'],
'pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit')
assert os.path.isfile(f), 'Could not find pypeit file.'
cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(f, file_check=False)
# Change the dev path
for i in range(len(data_files)):
path_list = data_files[i].split('/')
for j,p in enumerate(path_list):
if p == 'RAW_DATA':
break
data_files[i] = os.path.join(os.environ['PYPEIT_DEV'], '/'.join(path_list[j:]))
# Read the fits table with and without the user data
spectrograph = load_spectrograph('keck_lris_blue')
par = spectrograph.default_pypeit_par()
fitstbl = PypeItMetaData(spectrograph, par, files=data_files)
fitstbl_usr = PypeItMetaData(spectrograph, par, files=data_files, usrdata=usrdata)
assert fitstbl['target'][0] == 'unknown', 'Grating name changed in file header'
assert fitstbl_usr['target'][0] == 'test', 'Grating name changed in pypeit file'
assert fitstbl['target'][0] != fitstbl_usr['target'][0], \
'Fits header value and input pypeit file value expected to be different.'
|
[
"pypeit.scripts.setup.Setup.main",
"numpy.where",
"os.path.join",
"yaml.load",
"pypeit.par.util.parse_pypeit_file",
"os.path.isfile",
"pypeit.tests.tstutils.data_path",
"os.path.isdir",
"pypeit.metadata.PypeItMetaData",
"shutil.rmtree",
"numpy.all",
"pypeit.spectrographs.util.load_spectrograph",
"pypeit.pypeitsetup.PypeItSetup",
"os.remove"
] |
[((547, 571), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""setup_files"""'], {}), "('setup_files')\n", (556, 571), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((579, 603), 'os.path.isdir', 'os.path.isdir', (['setup_dir'], {}), '(setup_dir)\n', (592, 603), False, 'import os\n'), ((655, 685), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""shane_kast_blue_A"""'], {}), "('shane_kast_blue_A')\n", (664, 685), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((693, 718), 'os.path.isdir', 'os.path.isdir', (['config_dir'], {}), '(config_dir)\n', (706, 718), False, 'import os\n'), ((888, 902), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""b"""'], {}), "('b')\n", (897, 902), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((1088, 1105), 'pypeit.scripts.setup.Setup.main', 'Setup.main', (['pargs'], {}), '(pargs)\n', (1098, 1105), False, 'from pypeit.scripts.setup import Setup\n'), ((1110, 1134), 'shutil.rmtree', 'shutil.rmtree', (['setup_dir'], {}), '(setup_dir)\n', (1123, 1134), False, 'import shutil\n'), ((1154, 1206), 'os.path.join', 'os.path.join', (['config_dir', '"""shane_kast_blue_A.pypeit"""'], {}), "(config_dir, 'shane_kast_blue_A.pypeit')\n", (1166, 1206), False, 'import os\n'), ((1266, 1296), 'pypeit.par.util.parse_pypeit_file', 'parse_pypeit_file', (['pypeit_file'], {}), '(pypeit_file)\n', (1283, 1296), False, 'from pypeit.par.util import parse_pypeit_file\n'), ((1949, 1974), 'shutil.rmtree', 'shutil.rmtree', (['config_dir'], {}), '(config_dir)\n', (1962, 1974), False, 'import shutil\n'), ((2268, 2311), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (2279, 2311), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((2558, 2592), 'numpy.all', 'np.all', (["(ps.fitstbl['setup'] == 'A')"], {}), "(ps.fitstbl['setup'] == 'A')\n", (2564, 2592), True, 'import numpy as np\n'), ((2868, 2911), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (2879, 2911), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((3432, 3475), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (3443, 3475), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((3713, 3736), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""test.calib"""'], {}), "('test.calib')\n", (3722, 3736), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((3984, 4000), 'os.remove', 'os.remove', (['cfile'], {}), '(cfile)\n', (3993, 4000), False, 'import os\n'), ((4304, 4347), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (4315, 4347), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((5008, 5041), 'os.remove', 'os.remove', (['"""keck_lris_red.sorted"""'], {}), "('keck_lris_red.sorted')\n", (5017, 5041), False, 'import os\n'), ((5111, 5210), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit"""'], {}), "(os.environ['PYPEIT_DEV'],\n 'pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit')\n", (5123, 5210), False, 'import os\n'), ((5239, 5256), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (5253, 5256), False, 'import os\n'), ((5356, 5394), 'pypeit.par.util.parse_pypeit_file', 'parse_pypeit_file', (['f'], {'file_check': '(False)'}), '(f, file_check=False)\n', (5373, 5394), False, 'from pypeit.par.util import parse_pypeit_file\n'), ((5764, 5799), 'pypeit.spectrographs.util.load_spectrograph', 'load_spectrograph', (['"""keck_lris_blue"""'], {}), "('keck_lris_blue')\n", (5781, 5799), False, 'from pypeit.spectrographs.util import load_spectrograph\n'), ((5858, 5909), 'pypeit.metadata.PypeItMetaData', 'PypeItMetaData', (['spectrograph', 'par'], {'files': 'data_files'}), '(spectrograph, par, files=data_files)\n', (5872, 5909), False, 'from pypeit.metadata import PypeItMetaData\n'), ((5928, 5996), 'pypeit.metadata.PypeItMetaData', 'PypeItMetaData', (['spectrograph', 'par'], {'files': 'data_files', 'usrdata': 'usrdata'}), '(spectrograph, par, files=data_files, usrdata=usrdata)\n', (5942, 5996), False, 'from pypeit.metadata import PypeItMetaData\n'), ((613, 637), 'shutil.rmtree', 'shutil.rmtree', (['setup_dir'], {}), '(setup_dir)\n', (626, 637), False, 'import shutil\n'), ((728, 753), 'shutil.rmtree', 'shutil.rmtree', (['config_dir'], {}), '(config_dir)\n', (741, 753), False, 'import shutil\n'), ((2053, 2160), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi_400_8500_d560"""', '"""*.fits.gz"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi_400_8500_d560', '*.fits.gz')\n", (2065, 2160), False, 'import os\n'), ((2668, 2760), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi*"""', '"""*.fits*"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi*', '*.fits*')\n", (2680, 2760), False, 'import os\n'), ((3217, 3324), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi_400_8500_d560"""', '"""*.fits.gz"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi_400_8500_d560', '*.fits.gz')\n", (3229, 3324), False, 'import os\n'), ((3820, 3856), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3829, 3856), False, 'import yaml\n'), ((4104, 4196), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi*"""', '"""*.fits*"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi*', '*.fits*')\n", (4116, 4196), False, 'import os\n'), ((1067, 1080), 'pypeit.tests.tstutils.data_path', 'data_path', (['""""""'], {}), "('')\n", (1076, 1080), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((1881, 1896), 'numpy.where', 'np.where', (['(~indx)'], {}), '(~indx)\n', (1889, 1896), True, 'import numpy as np\n')]
|
import numpy as np
import pickle
import os
def GenerateFeature_alpha(ligand_name, working_dir):
Cut = 12.0
LIGELE = ['C','N','O','S','CN','CO','CS','NO','NS','OS','CCl','CBr','CP','CF','CNO','CNS','COS','NOS','CNOS','CNOSPFClBrI','H','CH','NH','OH','SH','CNH','COH','CSH','NOH','NSH','OSH','CNOH','CNSH','COSH','NOSH','CNOSH','CNOSPFClBrIH','CClH','CBrH','CPH','CFH']
Feature_i = []
pdb = ligand_name
InFile = open(working_dir+'/'+ligand_name+'_alpha.pkl')
BarCollection = pickle.load(InFile)
for el in LIGELE:
if 'lig_'+el in BarCollection.keys():
Bars = BarCollection['lig_'+el]
Bar0Birth = []; Bar0Death = []; Bar1Birth = []; Bar1Death = []; Bar2Birth = []; Bar2Death = [];
for Bar in Bars:
if Bar[2] < Bar[1]:
continue
if Bar[2] > 12.0 and Bar[0] == 0: continue
if Bar[2] > 12.0 and Bar[0] > 0: Bar[2] = 12.0
if Bar[0] == 0:
Bar0Birth.append(Bar[1])
Bar0Death.append(Bar[2])
if Bar[0] == 1:
Bar1Birth.append(Bar[1])
Bar1Death.append(Bar[2])
if Bar[0] == 2:
Bar2Birth.append(Bar[1])
Bar2Death.append(Bar[2])
if len(Bar0Birth) > 0:
Bar0Birth = np.asarray(Bar0Birth, float)
Bar0Death = np.asarray(Bar0Death, float)
if len(Bar1Birth) > 0:
Bar1Birth = np.asarray(Bar1Birth, float)
Bar1Death = np.asarray(Bar1Death, float)
if len(Bar2Birth) > 0:
Bar2Birth = np.asarray(Bar2Birth, float)
Bar2Death = np.asarray(Bar2Death, float)
if len(Bar0Death) > 0:
Feature_i.append(np.mean(Bar0Death[:]))
Feature_i.append(np.std(Bar0Death[:]))
Feature_i.append(np.max(Bar0Death[:]))
Feature_i.append(np.min(Bar0Death[:]))
Feature_i.append(np.sum(Bar0Death[:]))
Feature_i.append(len(Bar0Death))
else:
Feature_i.extend([0.]*6)
if len(Bar1Death) > 0:
Feature_i.append(np.mean(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.std(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.max(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.min(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(Bar1Birth[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(Bar1Death[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(np.mean(Bar1Birth[:]))
Feature_i.append(np.std(Bar1Birth[:]))
Feature_i.append(np.max(Bar1Birth[:]))
Feature_i.append(np.min(Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Birth[:]))
Feature_i.append(np.mean(Bar1Death[:]))
Feature_i.append(np.std(Bar1Death[:]))
Feature_i.append(np.max(Bar1Death[:]))
Feature_i.append(np.min(Bar1Death[:]))
Feature_i.append(np.sum(Bar1Death[:]))
Feature_i.append(len(Bar1Death))
else:
Feature_i.extend([0.]*18)
if len(Bar2Death) > 0:
Feature_i.append(np.mean(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.std(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.max(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.min(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(Bar2Birth[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(Bar2Death[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(np.mean(Bar2Birth[:]))
Feature_i.append(np.std(Bar2Birth[:]))
Feature_i.append(np.max(Bar2Birth[:]))
Feature_i.append(np.min(Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Birth[:]))
Feature_i.append(np.mean(Bar2Death[:]))
Feature_i.append(np.std(Bar2Death[:]))
Feature_i.append(np.max(Bar2Death[:]))
Feature_i.append(np.min(Bar2Death[:]))
Feature_i.append(np.sum(Bar2Death[:]))
Feature_i.append(len(Bar2Death))
else:
Feature_i.extend([0.]*18)
else:
Feature_i.extend([0.]*42)
Feature_i = np.asarray(Feature_i, float)
outfile = open(working_dir+'/'+ligand_name+'_feature_alpha_handcrafted.npy', 'w')
np.save(outfile, Feature_i)
outfile.close()
def GenerateFeature_level1(ligand_name, working_dir):
small = 0.01
Feature_i = []
Cut = 12.0
LIGELE = ['C','N','O','S','CN','CO','CS','NO','NS','OS','CCl','CBr','CP','CF','CNO','CNS','COS','NOS','CNOS','CNOSPFClBrI','H','CH','NH','OH','SH','CNH','COH','CSH','NOH','NSH','OSH','CNOH','CNSH','COSH','NOSH','CNOSH','CNOSPFClBrIH','CClH','CBrH','CPH','CFH']
pdb = ligand_name
for el in LIGELE:
if os.path.exists(working_dir+'/'+ligand_name+'_'+el+'_level1.PH'):
InFile = open(working_dir+'/'+ligand_name+'_'+el+'_level1.PH')
lines = InFile.read().splitlines()
Bars = []
for line in lines:
a,b,c = line.split()
Bars.append([int(a), float(b), float(c)])
InFile.close()
Bar0Birth = []; Bar0Death = []; Bar1Birth = []; Bar1Death = []; Bar2Birth = []; Bar2Death = [];
for Bar in Bars:
if Bar[2] < Bar[1]:
continue
if Bar[2] > 12.0 and Bar[0] == 0: continue
if Bar[2] > 12.0 and Bar[0] > 0: Bar[2] = 12.0
if Bar[0] == 0 and Bar[2]-Bar[1] >= small:
Bar0Birth.append(Bar[1])
Bar0Death.append(Bar[2])
if Bar[0] == 1 and Bar[2]-Bar[1] >= small:
Bar1Birth.append(Bar[1])
Bar1Death.append(Bar[2])
if Bar[0] == 2 and Bar[2]-Bar[1] >= small:
Bar2Birth.append(Bar[1])
Bar2Death.append(Bar[2])
if len(Bar0Birth) > 0:
Bar0Birth = np.asarray(Bar0Birth, float)
Bar0Death = np.asarray(Bar0Death, float)
if len(Bar1Birth) > 0:
Bar1Birth = np.asarray(Bar1Birth, float)
Bar1Death = np.asarray(Bar1Death, float)
if len(Bar2Birth) > 0:
Bar2Birth = np.asarray(Bar2Birth, float)
Bar2Death = np.asarray(Bar2Death, float)
if len(Bar0Death) > 0:
Feature_i.append(np.mean(Bar0Death[:]))
Feature_i.append(np.std(Bar0Death[:]))
Feature_i.append(np.max(Bar0Death[:]))
Feature_i.append(np.min(Bar0Death[:]))
Feature_i.append(np.sum(Bar0Death[:]))
Feature_i.append(len(Bar0Death))
else:
Feature_i.extend([0.]*6)
if len(Bar1Death) > 0:
Feature_i.append(np.mean(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.std(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.max(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.min(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Death[:] - Bar1Birth[:]))
Feature_i.append(Bar1Birth[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(Bar1Death[np.argmax(Bar1Death[:] - Bar1Birth[:])])
Feature_i.append(np.mean(Bar1Birth[:]))
Feature_i.append(np.std(Bar1Birth[:]))
Feature_i.append(np.max(Bar1Birth[:]))
Feature_i.append(np.min(Bar1Birth[:]))
Feature_i.append(np.sum(Bar1Birth[:]))
Feature_i.append(np.mean(Bar1Death[:]))
Feature_i.append(np.std(Bar1Death[:]))
Feature_i.append(np.max(Bar1Death[:]))
Feature_i.append(np.min(Bar1Death[:]))
Feature_i.append(np.sum(Bar1Death[:]))
Feature_i.append(len(Bar1Death))
else:
Feature_i.extend([0.]*18)
if len(Bar2Death) > 0:
Feature_i.append(np.mean(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.std(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.max(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.min(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Death[:] - Bar2Birth[:]))
Feature_i.append(Bar2Birth[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(Bar2Death[np.argmax(Bar2Death[:] - Bar2Birth[:])])
Feature_i.append(np.mean(Bar2Birth[:]))
Feature_i.append(np.std(Bar2Birth[:]))
Feature_i.append(np.max(Bar2Birth[:]))
Feature_i.append(np.min(Bar2Birth[:]))
Feature_i.append(np.sum(Bar2Birth[:]))
Feature_i.append(np.mean(Bar2Death[:]))
Feature_i.append(np.std(Bar2Death[:]))
Feature_i.append(np.max(Bar2Death[:]))
Feature_i.append(np.min(Bar2Death[:]))
Feature_i.append(np.sum(Bar2Death[:]))
Feature_i.append(len(Bar2Death))
else:
Feature_i.extend([0.]*18)
else:
Feature_i.extend([0.]*42)
Feature_i = np.asarray(Feature_i, float)
outfile = open(working_dir+'/'+ligand_name+'_feature_ligand_level1_handcrafted.npy', 'w')
np.save(outfile, Feature_i)
outfile.close()
|
[
"os.path.exists",
"numpy.mean",
"numpy.asarray",
"pickle.load",
"numpy.min",
"numpy.max",
"numpy.argmax",
"numpy.sum",
"numpy.std",
"numpy.save"
] |
[((500, 519), 'pickle.load', 'pickle.load', (['InFile'], {}), '(InFile)\n', (511, 519), False, 'import pickle\n'), ((4686, 4714), 'numpy.asarray', 'np.asarray', (['Feature_i', 'float'], {}), '(Feature_i, float)\n', (4696, 4714), True, 'import numpy as np\n'), ((4806, 4833), 'numpy.save', 'np.save', (['outfile', 'Feature_i'], {}), '(outfile, Feature_i)\n', (4813, 4833), True, 'import numpy as np\n'), ((9780, 9808), 'numpy.asarray', 'np.asarray', (['Feature_i', 'float'], {}), '(Feature_i, float)\n', (9790, 9808), True, 'import numpy as np\n'), ((9908, 9935), 'numpy.save', 'np.save', (['outfile', 'Feature_i'], {}), '(outfile, Feature_i)\n', (9915, 9935), True, 'import numpy as np\n'), ((5283, 5356), 'os.path.exists', 'os.path.exists', (["(working_dir + '/' + ligand_name + '_' + el + '_level1.PH')"], {}), "(working_dir + '/' + ligand_name + '_' + el + '_level1.PH')\n", (5297, 5356), False, 'import os\n'), ((1385, 1413), 'numpy.asarray', 'np.asarray', (['Bar0Birth', 'float'], {}), '(Bar0Birth, float)\n', (1395, 1413), True, 'import numpy as np\n'), ((1442, 1470), 'numpy.asarray', 'np.asarray', (['Bar0Death', 'float'], {}), '(Bar0Death, float)\n', (1452, 1470), True, 'import numpy as np\n'), ((1534, 1562), 'numpy.asarray', 'np.asarray', (['Bar1Birth', 'float'], {}), '(Bar1Birth, float)\n', (1544, 1562), True, 'import numpy as np\n'), ((1591, 1619), 'numpy.asarray', 'np.asarray', (['Bar1Death', 'float'], {}), '(Bar1Death, float)\n', (1601, 1619), True, 'import numpy as np\n'), ((1683, 1711), 'numpy.asarray', 'np.asarray', (['Bar2Birth', 'float'], {}), '(Bar2Birth, float)\n', (1693, 1711), True, 'import numpy as np\n'), ((1740, 1768), 'numpy.asarray', 'np.asarray', (['Bar2Death', 'float'], {}), '(Bar2Death, float)\n', (1750, 1768), True, 'import numpy as np\n'), ((6479, 6507), 'numpy.asarray', 'np.asarray', (['Bar0Birth', 'float'], {}), '(Bar0Birth, float)\n', (6489, 6507), True, 'import numpy as np\n'), ((6536, 6564), 'numpy.asarray', 'np.asarray', (['Bar0Death', 'float'], {}), '(Bar0Death, float)\n', (6546, 6564), True, 'import numpy as np\n'), ((6628, 6656), 'numpy.asarray', 'np.asarray', (['Bar1Birth', 'float'], {}), '(Bar1Birth, float)\n', (6638, 6656), True, 'import numpy as np\n'), ((6685, 6713), 'numpy.asarray', 'np.asarray', (['Bar1Death', 'float'], {}), '(Bar1Death, float)\n', (6695, 6713), True, 'import numpy as np\n'), ((6777, 6805), 'numpy.asarray', 'np.asarray', (['Bar2Birth', 'float'], {}), '(Bar2Birth, float)\n', (6787, 6805), True, 'import numpy as np\n'), ((6834, 6862), 'numpy.asarray', 'np.asarray', (['Bar2Death', 'float'], {}), '(Bar2Death, float)\n', (6844, 6862), True, 'import numpy as np\n'), ((1837, 1858), 'numpy.mean', 'np.mean', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1844, 1858), True, 'import numpy as np\n'), ((1893, 1913), 'numpy.std', 'np.std', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1899, 1913), True, 'import numpy as np\n'), ((1948, 1968), 'numpy.max', 'np.max', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1954, 1968), True, 'import numpy as np\n'), ((2003, 2023), 'numpy.min', 'np.min', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (2009, 2023), True, 'import numpy as np\n'), ((2058, 2078), 'numpy.sum', 'np.sum', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (2064, 2078), True, 'import numpy as np\n'), ((2256, 2292), 'numpy.mean', 'np.mean', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2263, 2292), True, 'import numpy as np\n'), ((2327, 2362), 'numpy.std', 'np.std', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2333, 2362), True, 'import numpy as np\n'), ((2397, 2432), 'numpy.max', 'np.max', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2403, 2432), True, 'import numpy as np\n'), ((2467, 2502), 'numpy.min', 'np.min', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2473, 2502), True, 'import numpy as np\n'), ((2537, 2572), 'numpy.sum', 'np.sum', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2543, 2572), True, 'import numpy as np\n'), ((2775, 2796), 'numpy.mean', 'np.mean', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2782, 2796), True, 'import numpy as np\n'), ((2831, 2851), 'numpy.std', 'np.std', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2837, 2851), True, 'import numpy as np\n'), ((2886, 2906), 'numpy.max', 'np.max', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2892, 2906), True, 'import numpy as np\n'), ((2941, 2961), 'numpy.min', 'np.min', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2947, 2961), True, 'import numpy as np\n'), ((2996, 3016), 'numpy.sum', 'np.sum', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (3002, 3016), True, 'import numpy as np\n'), ((3051, 3072), 'numpy.mean', 'np.mean', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3058, 3072), True, 'import numpy as np\n'), ((3107, 3127), 'numpy.std', 'np.std', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3113, 3127), True, 'import numpy as np\n'), ((3162, 3182), 'numpy.max', 'np.max', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3168, 3182), True, 'import numpy as np\n'), ((3217, 3237), 'numpy.min', 'np.min', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3223, 3237), True, 'import numpy as np\n'), ((3272, 3292), 'numpy.sum', 'np.sum', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3278, 3292), True, 'import numpy as np\n'), ((3471, 3507), 'numpy.mean', 'np.mean', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3478, 3507), True, 'import numpy as np\n'), ((3542, 3577), 'numpy.std', 'np.std', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3548, 3577), True, 'import numpy as np\n'), ((3612, 3647), 'numpy.max', 'np.max', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3618, 3647), True, 'import numpy as np\n'), ((3682, 3717), 'numpy.min', 'np.min', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3688, 3717), True, 'import numpy as np\n'), ((3752, 3787), 'numpy.sum', 'np.sum', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3758, 3787), True, 'import numpy as np\n'), ((3990, 4011), 'numpy.mean', 'np.mean', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (3997, 4011), True, 'import numpy as np\n'), ((4046, 4066), 'numpy.std', 'np.std', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4052, 4066), True, 'import numpy as np\n'), ((4101, 4121), 'numpy.max', 'np.max', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4107, 4121), True, 'import numpy as np\n'), ((4156, 4176), 'numpy.min', 'np.min', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4162, 4176), True, 'import numpy as np\n'), ((4211, 4231), 'numpy.sum', 'np.sum', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4217, 4231), True, 'import numpy as np\n'), ((4266, 4287), 'numpy.mean', 'np.mean', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4273, 4287), True, 'import numpy as np\n'), ((4322, 4342), 'numpy.std', 'np.std', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4328, 4342), True, 'import numpy as np\n'), ((4377, 4397), 'numpy.max', 'np.max', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4383, 4397), True, 'import numpy as np\n'), ((4432, 4452), 'numpy.min', 'np.min', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4438, 4452), True, 'import numpy as np\n'), ((4487, 4507), 'numpy.sum', 'np.sum', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4493, 4507), True, 'import numpy as np\n'), ((6931, 6952), 'numpy.mean', 'np.mean', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (6938, 6952), True, 'import numpy as np\n'), ((6987, 7007), 'numpy.std', 'np.std', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (6993, 7007), True, 'import numpy as np\n'), ((7042, 7062), 'numpy.max', 'np.max', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7048, 7062), True, 'import numpy as np\n'), ((7097, 7117), 'numpy.min', 'np.min', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7103, 7117), True, 'import numpy as np\n'), ((7152, 7172), 'numpy.sum', 'np.sum', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7158, 7172), True, 'import numpy as np\n'), ((7350, 7386), 'numpy.mean', 'np.mean', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7357, 7386), True, 'import numpy as np\n'), ((7421, 7456), 'numpy.std', 'np.std', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7427, 7456), True, 'import numpy as np\n'), ((7491, 7526), 'numpy.max', 'np.max', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7497, 7526), True, 'import numpy as np\n'), ((7561, 7596), 'numpy.min', 'np.min', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7567, 7596), True, 'import numpy as np\n'), ((7631, 7666), 'numpy.sum', 'np.sum', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7637, 7666), True, 'import numpy as np\n'), ((7869, 7890), 'numpy.mean', 'np.mean', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7876, 7890), True, 'import numpy as np\n'), ((7925, 7945), 'numpy.std', 'np.std', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7931, 7945), True, 'import numpy as np\n'), ((7980, 8000), 'numpy.max', 'np.max', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7986, 8000), True, 'import numpy as np\n'), ((8035, 8055), 'numpy.min', 'np.min', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (8041, 8055), True, 'import numpy as np\n'), ((8090, 8110), 'numpy.sum', 'np.sum', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (8096, 8110), True, 'import numpy as np\n'), ((8145, 8166), 'numpy.mean', 'np.mean', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8152, 8166), True, 'import numpy as np\n'), ((8201, 8221), 'numpy.std', 'np.std', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8207, 8221), True, 'import numpy as np\n'), ((8256, 8276), 'numpy.max', 'np.max', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8262, 8276), True, 'import numpy as np\n'), ((8311, 8331), 'numpy.min', 'np.min', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8317, 8331), True, 'import numpy as np\n'), ((8366, 8386), 'numpy.sum', 'np.sum', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8372, 8386), True, 'import numpy as np\n'), ((8565, 8601), 'numpy.mean', 'np.mean', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8572, 8601), True, 'import numpy as np\n'), ((8636, 8671), 'numpy.std', 'np.std', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8642, 8671), True, 'import numpy as np\n'), ((8706, 8741), 'numpy.max', 'np.max', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8712, 8741), True, 'import numpy as np\n'), ((8776, 8811), 'numpy.min', 'np.min', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8782, 8811), True, 'import numpy as np\n'), ((8846, 8881), 'numpy.sum', 'np.sum', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8852, 8881), True, 'import numpy as np\n'), ((9084, 9105), 'numpy.mean', 'np.mean', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9091, 9105), True, 'import numpy as np\n'), ((9140, 9160), 'numpy.std', 'np.std', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9146, 9160), True, 'import numpy as np\n'), ((9195, 9215), 'numpy.max', 'np.max', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9201, 9215), True, 'import numpy as np\n'), ((9250, 9270), 'numpy.min', 'np.min', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9256, 9270), True, 'import numpy as np\n'), ((9305, 9325), 'numpy.sum', 'np.sum', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9311, 9325), True, 'import numpy as np\n'), ((9360, 9381), 'numpy.mean', 'np.mean', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9367, 9381), True, 'import numpy as np\n'), ((9416, 9436), 'numpy.std', 'np.std', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9422, 9436), True, 'import numpy as np\n'), ((9471, 9491), 'numpy.max', 'np.max', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9477, 9491), True, 'import numpy as np\n'), ((9526, 9546), 'numpy.min', 'np.min', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9532, 9546), True, 'import numpy as np\n'), ((9581, 9601), 'numpy.sum', 'np.sum', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9587, 9601), True, 'import numpy as np\n'), ((2617, 2655), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2626, 2655), True, 'import numpy as np\n'), ((2701, 2739), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2710, 2739), True, 'import numpy as np\n'), ((3832, 3870), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3841, 3870), True, 'import numpy as np\n'), ((3916, 3954), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3925, 3954), True, 'import numpy as np\n'), ((7711, 7749), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7720, 7749), True, 'import numpy as np\n'), ((7795, 7833), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7804, 7833), True, 'import numpy as np\n'), ((8926, 8964), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8935, 8964), True, 'import numpy as np\n'), ((9010, 9048), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (9019, 9048), True, 'import numpy as np\n')]
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
with self.cached_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval())
class AssertScalarIntTest(test.TestCase):
def test_assert_scalar_int(self):
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32))
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64))
tensor_util.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
tensor_util.assert_scalar_int(
constant_op.constant(
3, dtype=dtypes.float32))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
tensor_util.assert_scalar_int(
constant_op.constant(
[3, 4], dtype=dtypes.int32))
class WithShapeTest(test.TestCase):
def _assert_with_shape(self, tensor, expected_value, expected_shape,
unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape,
tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name, " ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_shape(
constant_op.constant(unexpected_shape),
tensor).eval)
expected_placeholder = array_ops.placeholder(dtypes.float32)
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_same_shape(expected_placeholder,
tensor).eval,
{expected_placeholder: np.ones(unexpected_shape)})
self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor))
self.assertIs(
tensor,
tensor_util.with_same_shape(
constant_op.constant(
1, shape=expected_shape), tensor))
tensor_with_shape = tensor_util.with_shape(
constant_op.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder,
tensor)
np.testing.assert_array_equal(expected_value,
tensor_with_same_shape.eval({
expected_placeholder:
np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
np.array([1.1]), constant_op.constant(1.0))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
constant_op.constant(np.array([1.1])),
constant_op.constant(1.0))
def test_with_shape_0(self):
with self.cached_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_1(self):
with self.cached_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2(self):
with self.cached_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2_with_partial_expected_shape(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
actual_shape = [2, 2]
tensor = constant_op.constant(value, shape=actual_shape)
partial_expected_shape = tensor_shape.TensorShape([None, 2])
# Won't raise any exception here:
tensor_with_shape = tensor_util.with_shape(partial_expected_shape, tensor)
np.testing.assert_array_equal(value, tensor_with_shape.eval())
def test_with_shape_none(self):
with self.cached_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_no_shape: array_2x2
}))
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval, {tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.cached_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 2 and 1",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(ValueError, "Dimensions must be equal",
tensor_util.with_shape, incompatible_shape,
tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError,
r"Dimension 1 in both shapes must be equal, but are 2 and 1. "
r"Shapes are \[\?,2\] and \[2,1\].",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_partial_shape: array_2x2
}))
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(self, predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = ([[p] for p in predictions_value] if
predictions_have_extra_dim else
predictions_value)
input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim
else labels_value)
with ops.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = constant_op.constant(
input_predictions_value, dtype=dtypes.int32)
else:
predictions = array_ops.placeholder(
dtype=dtypes.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = constant_op.constant(input_labels_value, dtype=dtypes.int32)
else:
labels = array_ops.placeholder(dtype=dtypes.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tensor_util.remove_squeezable_dimensions(predictions, labels))
with self.session(g):
variables_lib.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
|
[
"tensorflow.contrib.framework.python.ops.variables.local_variable",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.contrib.framework.python.framework.tensor_util.reduce_sum_n",
"numpy.ones",
"re.compile",
"tensorflow.python.ops.variables.local_variables_initializer",
"tensorflow.python.ops.variables.local_variables",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.contrib.framework.python.framework.tensor_util.remove_squeezable_dimensions",
"tensorflow.contrib.framework.python.framework.tensor_util.assert_scalar_int",
"tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape",
"numpy.array",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.ops.Graph",
"tensorflow.contrib.framework.python.framework.tensor_util.with_shape",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.variables_initializer"
] |
[((16270, 16281), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (16279, 16281), False, 'from tensorflow.python.platform import test\n'), ((2684, 2716), 'tensorflow.contrib.framework.python.framework.tensor_util.assert_scalar_int', 'tensor_util.assert_scalar_int', (['(3)'], {}), '(3)\n', (2713, 2716), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4772, 4829), 'tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape', 'tensor_util.with_same_shape', (['expected_placeholder', 'tensor'], {}), '(expected_placeholder, tensor)\n', (4799, 4829), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((1730, 1767), 'tensorflow.contrib.framework.python.ops.variables.local_variable', 'variables_lib2.local_variable', (['value0'], {}), '(value0)\n', (1759, 1767), True, 'from tensorflow.contrib.framework.python.ops import variables as variables_lib2\n'), ((1794, 1831), 'tensorflow.contrib.framework.python.ops.variables.local_variable', 'variables_lib2.local_variable', (['value1'], {}), '(value1)\n', (1823, 1831), True, 'from tensorflow.contrib.framework.python.ops import variables as variables_lib2\n'), ((1851, 1882), 'tensorflow.python.ops.variables.local_variables', 'variables_lib.local_variables', ([], {}), '()\n', (1880, 1882), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((2250, 2273), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1)'], {}), '(1)\n', (2270, 2273), False, 'from tensorflow.python.framework import constant_op\n'), ((2285, 2310), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[2]'], {}), '([2])\n', (2305, 2310), False, 'from tensorflow.python.framework import constant_op\n'), ((2322, 2360), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[3, 4], [5, 6]]'], {}), '([[3, 4], [5, 6]])\n', (2342, 2360), False, 'from tensorflow.python.framework import constant_op\n'), ((2554, 2597), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.int32'}), '(3, dtype=dtypes.int32)\n', (2574, 2597), False, 'from tensorflow.python.framework import constant_op\n'), ((2634, 2677), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.int64'}), '(3, dtype=dtypes.int64)\n', (2654, 2677), False, 'from tensorflow.python.framework import constant_op\n'), ((3944, 3981), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (3965, 3981), False, 'from tensorflow.python.ops import array_ops\n'), ((4351, 4397), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['expected_shape', 'tensor'], {}), '(expected_shape, tensor)\n', (4373, 4397), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4619, 4655), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['expected_shape'], {}), '(expected_shape)\n', (4639, 4655), False, 'from tensorflow.python.framework import constant_op\n'), ((7613, 7660), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'actual_shape'}), '(value, shape=actual_shape)\n', (7633, 7660), False, 'from tensorflow.python.framework import constant_op\n'), ((7693, 7728), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[None, 2]'], {}), '([None, 2])\n', (7717, 7728), False, 'from tensorflow.python.framework import tensor_shape\n'), ((7797, 7851), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['partial_expected_shape', 'tensor'], {}), '(partial_expected_shape, tensor)\n', (7819, 7851), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((8017, 8054), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (8038, 8054), False, 'from tensorflow.python.ops import array_ops\n'), ((8116, 8173), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['compatible_shape', 'tensor_no_shape'], {}), '(compatible_shape, tensor_no_shape)\n', (8138, 8173), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((9184, 9221), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (9205, 9221), False, 'from tensorflow.python.ops import array_ops\n'), ((10121, 10183), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['compatible_shape', 'tensor_partial_shape'], {}), '(compatible_shape, tensor_partial_shape)\n', (10143, 10183), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((15874, 15935), 'tensorflow.contrib.framework.python.framework.tensor_util.remove_squeezable_dimensions', 'tensor_util.remove_squeezable_dimensions', (['predictions', 'labels'], {}), '(predictions, labels)\n', (15914, 15935), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((1671, 1702), 'tensorflow.python.ops.variables.local_variables', 'variables_lib.local_variables', ([], {}), '()\n', (1700, 1702), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((2833, 2878), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.float32'}), '(3, dtype=dtypes.float32)\n', (2853, 2878), False, 'from tensorflow.python.framework import constant_op\n'), ((3011, 3059), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[3, 4]'], {'dtype': 'dtypes.int32'}), '([3, 4], dtype=dtypes.int32)\n', (3031, 3059), False, 'from tensorflow.python.framework import constant_op\n'), ((3714, 3733), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (3724, 3733), False, 'import re\n'), ((4065, 4084), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (4075, 4084), False, 'import re\n'), ((4487, 4532), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1)'], {'shape': 'expected_shape'}), '(1, shape=expected_shape)\n', (4507, 4532), False, 'from tensorflow.python.framework import constant_op\n'), ((5416, 5441), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (5436, 5441), False, 'from tensorflow.python.framework import constant_op\n'), ((5674, 5701), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (5694, 5701), False, 'from tensorflow.python.framework import constant_op\n'), ((5849, 5864), 'numpy.array', 'np.array', (['[1.1]'], {}), '([1.1])\n', (5857, 5864), True, 'import numpy as np\n'), ((5866, 5891), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (5886, 5891), False, 'from tensorflow.python.framework import constant_op\n'), ((6109, 6134), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (6129, 6134), False, 'from tensorflow.python.framework import constant_op\n'), ((6329, 6369), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6349, 6369), False, 'from tensorflow.python.framework import constant_op\n'), ((6648, 6688), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6668, 6688), False, 'from tensorflow.python.framework import constant_op\n'), ((6971, 7011), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6991, 7011), False, 'from tensorflow.python.framework import constant_op\n'), ((7311, 7351), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (7331, 7351), False, 'from tensorflow.python.framework import constant_op\n'), ((8361, 8399), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['compatible_shape'], {}), '(compatible_shape)\n', (8381, 8399), False, 'from tensorflow.python.framework import constant_op\n'), ((10371, 10409), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['compatible_shape'], {}), '(compatible_shape)\n', (10391, 10409), False, 'from tensorflow.python.framework import constant_op\n'), ((15311, 15376), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['input_predictions_value'], {'dtype': 'dtypes.int32'}), '(input_predictions_value, dtype=dtypes.int32)\n', (15331, 15376), False, 'from tensorflow.python.framework import constant_op\n'), ((15427, 15488), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.int32', 'name': '"""predictions"""'}), "(dtype=dtypes.int32, name='predictions')\n", (15448, 15488), False, 'from tensorflow.python.ops import array_ops\n'), ((15615, 15675), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['input_labels_value'], {'dtype': 'dtypes.int32'}), '(input_labels_value, dtype=dtypes.int32)\n', (15635, 15675), False, 'from tensorflow.python.framework import constant_op\n'), ((15707, 15763), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.int32', 'name': '"""labels"""'}), "(dtype=dtypes.int32, name='labels')\n", (15728, 15763), False, 'from tensorflow.python.ops import array_ops\n'), ((2001, 2047), 'tensorflow.python.ops.variables.variables_initializer', 'variables_lib.variables_initializer', (['variables'], {}), '(variables)\n', (2036, 2047), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((4117, 4174), 'tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape', 'tensor_util.with_same_shape', (['expected_placeholder', 'tensor'], {}), '(expected_placeholder, tensor)\n', (4144, 4174), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4294, 4319), 'numpy.ones', 'np.ones', (['unexpected_shape'], {}), '(unexpected_shape)\n', (4301, 4319), True, 'import numpy as np\n'), ((5108, 5131), 'numpy.ones', 'np.ones', (['expected_shape'], {}), '(expected_shape)\n', (5115, 5131), True, 'import numpy as np\n'), ((6060, 6075), 'numpy.array', 'np.array', (['[1.1]'], {}), '([1.1])\n', (6068, 6075), True, 'import numpy as np\n'), ((15194, 15205), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (15203, 15205), False, 'from tensorflow.python.framework import ops\n'), ((2389, 2424), 'tensorflow.contrib.framework.python.framework.tensor_util.reduce_sum_n', 'tensor_util.reduce_sum_n', (['[a, b, c]'], {}), '([a, b, c])\n', (2413, 2424), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((3825, 3863), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['unexpected_shape'], {}), '(unexpected_shape)\n', (3845, 3863), False, 'from tensorflow.python.framework import constant_op\n'), ((15975, 16018), 'tensorflow.python.ops.variables.local_variables_initializer', 'variables_lib.local_variables_initializer', ([], {}), '()\n', (16016, 16018), True, 'from tensorflow.python.ops import variables as variables_lib\n')]
|
""" Code for wrapping the motion primitive action in an object. """
from __future__ import division
from __future__ import absolute_import
import attr
import numpy as np
from bc_gym_planning_env.utilities.serialize import Serializable
@attr.s(cmp=False)
class Action(Serializable):
""" Object representing an 'action' - a motion primitive to execute in the environment """
VERSION = 1
command = attr.ib(type=np.ndarray)
@classmethod
def from_cmds(cls, wanted_linear_velocity_of_baselink, wanted_front_wheel_angle):
return cls(command=np.array([wanted_linear_velocity_of_baselink, wanted_front_wheel_angle]))
def __eq__(self, other):
if not isinstance(other, Action):
return False
if (self.command != other.command).any():
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
|
[
"numpy.array",
"attr.s",
"attr.ib"
] |
[((240, 257), 'attr.s', 'attr.s', ([], {'cmp': '(False)'}), '(cmp=False)\n', (246, 257), False, 'import attr\n'), ((411, 435), 'attr.ib', 'attr.ib', ([], {'type': 'np.ndarray'}), '(type=np.ndarray)\n', (418, 435), False, 'import attr\n'), ((567, 639), 'numpy.array', 'np.array', (['[wanted_linear_velocity_of_baselink, wanted_front_wheel_angle]'], {}), '([wanted_linear_velocity_of_baselink, wanted_front_wheel_angle])\n', (575, 639), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
from collections import defaultdict
import numpy as np
from pgmpy.base import UndirectedGraph
from pgmpy.factors import factor_product
class ClusterGraph(UndirectedGraph):
r"""
Base class for representing Cluster Graph.
Cluster graph is an undirected graph which is associated with a subset of variables. The graph contains undirected
edges that connects clusters whose scopes have a non-empty intersection.
Formally, a cluster graph is :math:`\mathcal{U}` for a set of factors :math:`\Phi` over :math:`\mathcal{X}` is an
undirected graph, each of whose nodes :math:`i` is associated with a subset :math:`C_i \subseteq X`. A cluster
graph must be family-preserving - each factor :math:`\phi \in \Phi` must be associated with a cluster C, denoted
:math:`\alpha(\phi)`, such that :math:`Scope[\phi] \subseteq C_i`. Each edge between a pair of clusters :math:`C_i`
and :math:`C_j` is associated with a sepset :math:`S_{i,j} \subseteq C_i \cap C_j`.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is created. The data is an edge list
Examples
--------
Create an empty ClusterGraph with no nodes and no edges
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
G can be grown by adding clique nodes.
**Nodes:**
Add a tuple (or list or set) of nodes as single clique node.
>>> G.add_node(('a', 'b', 'c'))
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
**Edges:**
G can also be grown by adding edges.
>>> G.add_edge(('a', 'b', 'c'), ('a', 'b'))
or a list of edges
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
def __init__(self, ebunch=None):
super(ClusterGraph, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
def add_node(self, node, **kwargs):
"""
Add a single node to the cluster graph.
Parameters
----------
node: node
A node should be a collection of nodes forming a clique. It can be
a list, set or tuple of nodes
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_node(('a', 'b', 'c'))
"""
if not isinstance(node, (list, set, tuple)):
raise TypeError(
"Node can only be a list, set or tuple of nodes forming a clique"
)
node = tuple(node)
super(ClusterGraph, self).add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the cluster graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between two clique nodes.
Parameters
----------
u, v: nodes
Nodes can be any list or set or tuple of nodes forming a clique.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
set_u = set(u)
set_v = set(v)
if set_u.isdisjoint(set_v):
raise ValueError("No sepset found between these two edges.")
super(ClusterGraph, self).add_edge(u, v)
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[3, 2],
... values=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError(
"Factors defined on clusters of variable not" "present in model"
)
self.factors.append(factor)
def get_factors(self, node=None):
"""
Return the factors that have been added till now to the graph.
If node is not None, it would return the factor corresponding to the
given node.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_factors()
>>> G.get_factors(node=('a', 'b', 'c'))
"""
if node is None:
return self.factors
else:
nodes = [set(n) for n in self.nodes()]
if set(node) not in nodes:
raise ValueError("Node not present in Cluster Graph")
factors = filter(lambda x: set(x.scope()) == set(node), self.factors)
return next(factors)
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... value=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_cardinality(self, node=None):
"""
Returns the cardinality of the node
Parameters
----------
node: any hashable python object (optional)
The node whose cardinality we want. If node is not specified returns a
dictionary with the given variable as keys and their respective cardinality
as values.
Returns
-------
int or dict : If node is specified returns the cardinality of the node.
If node is not specified returns a dictionary with the given
variable as keys and their respective cardinality as values.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = ClusterGraph()
>>> factor = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> student.add_node(('Alice', 'Bob'))
>>> student.add_factors(factor)
>>> student.get_cardinality()
defaultdict(<class 'int'>, {'Bob': 2, 'Alice': 2})
>>> student.get_cardinality(node='Alice')
2
"""
if node:
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if node == variable:
return cardinality
else:
cardinalities = defaultdict(int)
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
cardinalities[variable] = cardinality
return cardinalities
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = DiscreteFactor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor_product(
factor, *[self.factors[i] for i in range(1, len(self.factors))]
)
return np.sum(factor.values)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if factors are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
* Checks if cardinality information for all the variables is availble or not. If
not it raises an error.
* Check if cardinality of random variable remains same across all the
factors.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
factors = filter(lambda x: set(x.scope()) == set(clique), self.factors)
if not any(factors):
raise ValueError("Factors for all the cliques or clusters not defined.")
cardinalities = self.get_cardinality()
if len(set((x for clique in self.nodes() for x in clique))) != len(
cardinalities
):
raise ValueError("Factors for all the variables not defined.")
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if cardinalities[variable] != cardinality:
raise ValueError(
"Cardinality of variable {var} not matching among factors".format(
var=variable
)
)
return True
def copy(self):
"""
Returns a copy of ClusterGraph.
Returns
-------
ClusterGraph: copy of ClusterGraph
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('b', 'c')])
>>> G.add_edge(('a', 'b'), ('b', 'c'))
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2)
>>> graph_copy = G.copy()
>>> graph_copy.factors
[<DiscreteFactor representing phi(a:2, b:2) at 0xb71b19cc>,
<DiscreteFactor representing phi(b:2, c:2) at 0xb4eaf3ac>]
>>> graph_copy.edges()
[(('a', 'b'), ('b', 'c'))]
>>> graph_copy.nodes()
[('a', 'b'), ('b', 'c')]
"""
copy = ClusterGraph(self.edges())
if self.factors:
factors_copy = [factor.copy() for factor in self.factors]
copy.add_factors(*factors_copy)
return copy
|
[
"numpy.sum",
"collections.defaultdict"
] |
[((8516, 8532), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8527, 8532), False, 'from collections import defaultdict\n'), ((9995, 10016), 'numpy.sum', 'np.sum', (['factor.values'], {}), '(factor.values)\n', (10001, 10016), True, 'import numpy as np\n')]
|
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
import pybullet as p
from . import kuka
import random
import pybullet_data
from pkg_resources import parse_version
maxSteps = 1000
RENDER_HEIGHT = 720
RENDER_WIDTH = 960
class KukaCamGymEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self,
urdfRoot=pybullet_data.getDataPath(),
actionRepeat=1,
isEnableSelfCollision=True,
renders=False,
isDiscrete=False):
self._timeStep = 1./240.
self._urdfRoot = urdfRoot
self._actionRepeat = actionRepeat
self._isEnableSelfCollision = isEnableSelfCollision
self._observation = []
self._envStepCounter = 0
self._renders = renders
self._width = 341
self._height = 256
self._isDiscrete=isDiscrete
self.terminated = 0
self._p = p
if self._renders:
cid = p.connect(p.SHARED_MEMORY)
if (cid<0):
p.connect(p.GUI)
p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33])
else:
p.connect(p.DIRECT)
#timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json")
self._seed()
self.reset()
observationDim = len(self.getExtendedObservation())
#print("observationDim")
#print(observationDim)
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
if (self._isDiscrete):
self.action_space = spaces.Discrete(7)
else:
action_dim = 3
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(low=0, high=255, shape=(self._height, self._width, 4))
self.viewer = None
def _reset(self):
self.terminated = 0
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(self._timeStep)
p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
xpos = 0.5 +0.2*random.random()
ypos = 0 +0.25*random.random()
ang = 3.1415925438*random.random()
orn = p.getQuaternionFromEuler([0,0,ang])
self.blockUid =p.loadURDF(os.path.join(self._urdfRoot,"block.urdf"), xpos,ypos,-0.1,orn[0],orn[1],orn[2],orn[3])
p.setGravity(0,0,-10)
self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
self._envStepCounter = 0
p.stepSimulation()
self._observation = self.getExtendedObservation()
return np.array(self._observation)
def __del__(self):
p.disconnect()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def getExtendedObservation(self):
#camEyePos = [0.03,0.236,0.54]
#distance = 1.06
#pitch=-56
#yaw = 258
#roll=0
#upAxisIndex = 2
#camInfo = p.getDebugVisualizerCamera()
#print("width,height")
#print(camInfo[0])
#print(camInfo[1])
#print("viewMatrix")
#print(camInfo[2])
#print("projectionMatrix")
#print(camInfo[3])
#viewMat = camInfo[2]
#viewMat = p.computeViewMatrixFromYawPitchRoll(camEyePos,distance,yaw, pitch,roll,upAxisIndex)
viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0]
#projMatrix = camInfo[3]#[0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
img_arr = p.getCameraImage(width=self._width,height=self._height,viewMatrix=viewMat,projectionMatrix=projMatrix)
rgb=img_arr[2]
np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
self._observation = np_img_arr
return self._observation
def _step(self, action):
if (self._isDiscrete):
dv = 0.01
dx = [0,-dv,dv,0,0,0,0][action]
dy = [0,0,0,-dv,dv,0,0][action]
da = [0,0,0,0,0,-0.1,0.1][action]
f = 0.3
realAction = [dx,dy,-0.002,da,f]
else:
dv = 0.01
dx = action[0] * dv
dy = action[1] * dv
da = action[2] * 0.1
f = 0.3
realAction = [dx,dy,-0.002,da,f]
return self.step2( realAction)
def step2(self, action):
for i in range(self._actionRepeat):
self._kuka.applyAction(action)
p.stepSimulation()
if self._termination():
break
#self._observation = self.getExtendedObservation()
self._envStepCounter += 1
self._observation = self.getExtendedObservation()
if self._renders:
time.sleep(self._timeStep)
#print("self._envStepCounter")
#print(self._envStepCounter)
done = self._termination()
reward = self._reward()
#print("len=%r" % len(self._observation))
return np.array(self._observation), reward, done, {}
def _render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
base_pos,orn = self._p.getBasePositionAndOrientation(self._racecar.racecarUniqueId)
view_matrix = self._p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = self._p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _termination(self):
#print (self._kuka.endEffectorPos[2])
state = p.getLinkState(self._kuka.kukaUid,self._kuka.kukaEndEffectorIndex)
actualEndEffectorPos = state[0]
#print("self._envStepCounter")
#print(self._envStepCounter)
if (self.terminated or self._envStepCounter>maxSteps):
self._observation = self.getExtendedObservation()
return True
maxDist = 0.005
closestPoints = p.getClosestPoints(self._kuka.trayUid, self._kuka.kukaUid,maxDist)
if (len(closestPoints)):#(actualEndEffectorPos[2] <= -0.43):
self.terminated = 1
#print("closing gripper, attempting grasp")
#start grasp and terminate
fingerAngle = 0.3
for i in range (100):
graspAction = [0,0,0.0001,0,fingerAngle]
self._kuka.applyAction(graspAction)
p.stepSimulation()
fingerAngle = fingerAngle-(0.3/100.)
if (fingerAngle<0):
fingerAngle=0
for i in range (1000):
graspAction = [0,0,0.001,0,fingerAngle]
self._kuka.applyAction(graspAction)
p.stepSimulation()
blockPos,blockOrn=p.getBasePositionAndOrientation(self.blockUid)
if (blockPos[2] > 0.23):
#print("BLOCKPOS!")
#print(blockPos[2])
break
state = p.getLinkState(self._kuka.kukaUid,self._kuka.kukaEndEffectorIndex)
actualEndEffectorPos = state[0]
if (actualEndEffectorPos[2]>0.5):
break
self._observation = self.getExtendedObservation()
return True
return False
def _reward(self):
#rewards is height of target object
blockPos,blockOrn=p.getBasePositionAndOrientation(self.blockUid)
closestPoints = p.getClosestPoints(self.blockUid,self._kuka.kukaUid,1000, -1, self._kuka.kukaEndEffectorIndex)
reward = -1000
numPt = len(closestPoints)
#print(numPt)
if (numPt>0):
#print("reward:")
reward = -closestPoints[0][8]*10
if (blockPos[2] >0.2):
#print("grasped a block!!!")
#print("self._envStepCounter")
#print(self._envStepCounter)
reward = reward+1000
#print("reward")
#print(reward)
return reward
if parse_version(gym.__version__)>=parse_version('0.9.6'):
render = _render
reset = _reset
seed = _seed
step = _step
|
[
"pybullet_data.getDataPath",
"os.sys.path.insert",
"pybullet.setTimeStep",
"pybullet.setGravity",
"numpy.array",
"pybullet.setPhysicsEngineParameter",
"pybullet.disconnect",
"time.sleep",
"gym.utils.seeding.np_random",
"numpy.reshape",
"pybullet.connect",
"pybullet.getCameraImage",
"pybullet.getQuaternionFromEuler",
"pybullet.resetDebugVisualizerCamera",
"pybullet.resetSimulation",
"gym.spaces.Discrete",
"os.path.dirname",
"pkg_resources.parse_version",
"numpy.finfo",
"pybullet.getClosestPoints",
"pybullet.getLinkState",
"pybullet.getBasePositionAndOrientation",
"inspect.currentframe",
"os.path.join",
"gym.spaces.Box",
"pybullet.stepSimulation",
"random.random"
] |
[((164, 196), 'os.sys.path.insert', 'os.sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (182, 196), False, 'import os, inspect\n'), ((135, 162), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (150, 162), False, 'import os, inspect\n'), ((656, 683), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (681, 683), False, 'import pybullet_data\n'), ((1987, 2052), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(self._height, self._width, 4)'}), '(low=0, high=255, shape=(self._height, self._width, 4))\n', (1997, 2052), False, 'from gym import spaces\n'), ((2125, 2144), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (2142, 2144), True, 'import pybullet as p\n'), ((2149, 2201), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'numSolverIterations': '(150)'}), '(numSolverIterations=150)\n', (2176, 2201), True, 'import pybullet as p\n'), ((2206, 2235), 'pybullet.setTimeStep', 'p.setTimeStep', (['self._timeStep'], {}), '(self._timeStep)\n', (2219, 2235), True, 'import pybullet as p\n'), ((2543, 2580), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, ang]'], {}), '([0, 0, ang])\n', (2567, 2580), True, 'import pybullet as p\n'), ((2701, 2724), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (2713, 2724), True, 'import pybullet as p\n'), ((2837, 2855), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2853, 2855), True, 'import pybullet as p\n'), ((2921, 2948), 'numpy.array', 'np.array', (['self._observation'], {}), '(self._observation)\n', (2929, 2948), True, 'import numpy as np\n'), ((2975, 2989), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (2987, 2989), True, 'import pybullet as p\n'), ((3048, 3071), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (3065, 3071), False, 'from gym.utils import seeding\n'), ((4188, 4297), 'pybullet.getCameraImage', 'p.getCameraImage', ([], {'width': 'self._width', 'height': 'self._height', 'viewMatrix': 'viewMat', 'projectionMatrix': 'projMatrix'}), '(width=self._width, height=self._height, viewMatrix=viewMat,\n projectionMatrix=projMatrix)\n', (4204, 4297), True, 'import pybullet as p\n'), ((4329, 4376), 'numpy.reshape', 'np.reshape', (['rgb', '(self._height, self._width, 4)'], {}), '(rgb, (self._height, self._width, 4))\n', (4339, 4376), True, 'import numpy as np\n'), ((6273, 6285), 'numpy.array', 'np.array', (['px'], {}), '(px)\n', (6281, 6285), True, 'import numpy as np\n'), ((6424, 6491), 'pybullet.getLinkState', 'p.getLinkState', (['self._kuka.kukaUid', 'self._kuka.kukaEndEffectorIndex'], {}), '(self._kuka.kukaUid, self._kuka.kukaEndEffectorIndex)\n', (6438, 6491), True, 'import pybullet as p\n'), ((6769, 6836), 'pybullet.getClosestPoints', 'p.getClosestPoints', (['self._kuka.trayUid', 'self._kuka.kukaUid', 'maxDist'], {}), '(self._kuka.trayUid, self._kuka.kukaUid, maxDist)\n', (6787, 6836), True, 'import pybullet as p\n'), ((7971, 8017), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.blockUid'], {}), '(self.blockUid)\n', (8002, 8017), True, 'import pybullet as p\n'), ((8038, 8139), 'pybullet.getClosestPoints', 'p.getClosestPoints', (['self.blockUid', 'self._kuka.kukaUid', '(1000)', '(-1)', 'self._kuka.kukaEndEffectorIndex'], {}), '(self.blockUid, self._kuka.kukaUid, 1000, -1, self._kuka.\n kukaEndEffectorIndex)\n', (8056, 8139), True, 'import pybullet as p\n'), ((8509, 8539), 'pkg_resources.parse_version', 'parse_version', (['gym.__version__'], {}), '(gym.__version__)\n', (8522, 8539), False, 'from pkg_resources import parse_version\n'), ((8541, 8563), 'pkg_resources.parse_version', 'parse_version', (['"""0.9.6"""'], {}), "('0.9.6')\n", (8554, 8563), False, 'from pkg_resources import parse_version\n'), ((81, 103), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (101, 103), False, 'import os, inspect\n'), ((1211, 1237), 'pybullet.connect', 'p.connect', (['p.SHARED_MEMORY'], {}), '(p.SHARED_MEMORY)\n', (1220, 1237), True, 'import pybullet as p\n'), ((1288, 1352), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', (['(1.3)', '(180)', '(-41)', '[0.52, -0.2, -0.33]'], {}), '(1.3, 180, -41, [0.52, -0.2, -0.33])\n', (1316, 1352), True, 'import pybullet as p\n'), ((1364, 1383), 'pybullet.connect', 'p.connect', (['p.DIRECT'], {}), '(p.DIRECT)\n', (1373, 1383), True, 'import pybullet as p\n'), ((1751, 1769), 'gym.spaces.Discrete', 'spaces.Discrete', (['(7)'], {}), '(7)\n', (1766, 1769), False, 'from gym import spaces\n'), ((1850, 1893), 'numpy.array', 'np.array', (['([self._action_bound] * action_dim)'], {}), '([self._action_bound] * action_dim)\n', (1858, 1893), True, 'import numpy as np\n'), ((1920, 1957), 'gym.spaces.Box', 'spaces.Box', (['(-action_high)', 'action_high'], {}), '(-action_high, action_high)\n', (1930, 1957), False, 'from gym import spaces\n'), ((2251, 2293), 'os.path.join', 'os.path.join', (['self._urdfRoot', '"""plane.urdf"""'], {}), "(self._urdfRoot, 'plane.urdf')\n", (2263, 2293), False, 'import os, inspect\n'), ((2319, 2367), 'os.path.join', 'os.path.join', (['self._urdfRoot', '"""table/table.urdf"""'], {}), "(self._urdfRoot, 'table/table.urdf')\n", (2331, 2367), False, 'import os, inspect\n'), ((2517, 2532), 'random.random', 'random.random', ([], {}), '()\n', (2530, 2532), False, 'import random\n'), ((2609, 2651), 'os.path.join', 'os.path.join', (['self._urdfRoot', '"""block.urdf"""'], {}), "(self._urdfRoot, 'block.urdf')\n", (2621, 2651), False, 'import os, inspect\n'), ((4988, 5006), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (5004, 5006), True, 'import pybullet as p\n'), ((5225, 5251), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (5235, 5251), False, 'import time\n'), ((5439, 5466), 'numpy.array', 'np.array', (['self._observation'], {}), '(self._observation)\n', (5447, 5466), True, 'import numpy as np\n'), ((5575, 5587), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5583, 5587), True, 'import numpy as np\n'), ((1265, 1281), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (1274, 1281), True, 'import pybullet as p\n'), ((2443, 2458), 'random.random', 'random.random', ([], {}), '()\n', (2456, 2458), False, 'import random\n'), ((2478, 2493), 'random.random', 'random.random', ([], {}), '()\n', (2491, 2493), False, 'import random\n'), ((7165, 7183), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (7181, 7183), True, 'import pybullet as p\n'), ((7411, 7429), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (7427, 7429), True, 'import pybullet as p\n'), ((7456, 7502), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.blockUid'], {}), '(self.blockUid)\n', (7487, 7502), True, 'import pybullet as p\n'), ((7628, 7695), 'pybullet.getLinkState', 'p.getLinkState', (['self._kuka.kukaUid', 'self._kuka.kukaEndEffectorIndex'], {}), '(self._kuka.kukaUid, self._kuka.kukaEndEffectorIndex)\n', (7642, 7695), True, 'import pybullet as p\n'), ((1654, 1674), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1662, 1674), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
# These tiling implementations are adapted from PANDA Kaggle solutions, for example:
# https://github.com/kentaroy47/Kaggle-PANDA-1st-place-solution/blob/master/src/data_process/a00_save_tiles.py
from typing import Any, Optional, Tuple
import numpy as np
def get_1d_padding(length: int, tile_size: int) -> Tuple[int, int]:
"""Computes symmetric padding for `length` to be divisible by `tile_size`."""
pad = (tile_size - length % tile_size) % tile_size
return (pad // 2, pad - pad // 2)
def pad_for_tiling_2d(array: np.ndarray, tile_size: int, channels_first: Optional[bool] = True,
**pad_kwargs: Any) -> Tuple[np.ndarray, np.ndarray]:
"""Symmetrically pads a 2D `array` such that both dimensions are divisible by `tile_size`.
:param array: 2D image array.
:param tile_size: Width/height of each tile in pixels.
:param channels_first: Whether `array` is in CHW (`True`, default) or HWC (`False`) layout.
:param pad_kwargs: Keyword arguments to be passed to `np.pad()` (e.g. `constant_values=0`).
:return: A tuple containing:
- `padded_array`: Resulting array, in the same CHW/HWC layout as the input.
- `offset`: XY offset introduced by the padding. Add this to coordinates relative to the
original array to obtain indices for the padded array.
"""
height, width = array.shape[1:] if channels_first else array.shape[:-1]
padding_h = get_1d_padding(height, tile_size)
padding_w = get_1d_padding(width, tile_size)
padding = [padding_h, padding_w]
channels_axis = 0 if channels_first else 2
padding.insert(channels_axis, (0, 0)) # zero padding on channels axis
padded_array = np.pad(array, padding, **pad_kwargs)
offset = (padding_w[0], padding_h[0])
return padded_array, np.array(offset)
def tile_array_2d(array: np.ndarray, tile_size: int, channels_first: Optional[bool] = True,
**pad_kwargs: Any) -> Tuple[np.ndarray, np.ndarray]:
"""Split an image array into square non-overlapping tiles.
The array will be padded symmetrically if its dimensions are not exact multiples of `tile_size`.
:param array: Image array.
:param tile_size: Width/height of each tile in pixels.
:param pad_kwargs: Keyword arguments to be passed to `np.pad()` (e.g. `constant_values=0`).
:param channels_first: Whether `array` is in CHW (`True`, default) or HWC (`False`) layout.
:return: A tuple containing:
- `tiles`: A batch of tiles in NCHW layout.
- `coords`: XY coordinates of each tile, in the same order.
"""
padded_array, (offset_w, offset_h) = pad_for_tiling_2d(array, tile_size, channels_first, **pad_kwargs)
if channels_first:
channels, height, width = padded_array.shape
else:
height, width, channels = padded_array.shape
n_tiles_h = height // tile_size
n_tiles_w = width // tile_size
if channels_first:
intermediate_shape = (channels, n_tiles_h, tile_size, n_tiles_w, tile_size)
axis_order = (1, 3, 0, 2, 4) # (n_tiles_h, n_tiles_w, channels, tile_size, tile_size)
output_shape = (n_tiles_h * n_tiles_w, channels, tile_size, tile_size)
else:
intermediate_shape = (n_tiles_h, tile_size, n_tiles_w, tile_size, channels)
axis_order = (0, 2, 1, 3, 4) # (n_tiles_h, n_tiles_w, tile_size, tile_size, channels)
output_shape = (n_tiles_h * n_tiles_w, tile_size, tile_size, channels)
tiles = padded_array.reshape(intermediate_shape) # Split width and height axes
tiles = tiles.transpose(axis_order)
tiles = tiles.reshape(output_shape) # Flatten tile batch dimension
# Compute top-left coordinates of every tile, relative to the original array's origin
coords_h = tile_size * np.arange(n_tiles_h) - offset_h
coords_w = tile_size * np.arange(n_tiles_w) - offset_w
# Shape: (n_tiles_h * n_tiles_w, 2)
coords = np.stack(np.meshgrid(coords_w, coords_h), axis=-1).reshape(-1, 2)
return tiles, coords
def assemble_tiles_2d(tiles: np.ndarray, coords: np.ndarray, fill_value: Optional[float] = np.nan,
channels_first: Optional[bool] = True) -> Tuple[np.ndarray, np.ndarray]:
"""Assembles a 2D array from sequences of tiles and coordinates.
:param tiles: Stack of tiles with batch dimension first.
:param coords: XY tile coordinates, assumed to be spaced by multiples of `tile_size` (shape: [N, 2]).
:param tile_size: Size of each tile; must be >0.
:param fill_value: Value to assign to empty elements (default: `NaN`).
:param channels_first: Whether each tile is in CHW (`True`, default) or HWC (`False`) layout.
:return: A tuple containing:
- `array`: The reassembled 2D array with the smallest dimensions to contain all given tiles.
- `offset`: The lowest XY coordinates.
- `offset`: XY offset introduced by the assembly. Add this to tile coordinates to obtain
indices for the assembled array.
"""
if coords.shape[0] != tiles.shape[0]:
raise ValueError(f"Tile coordinates and values must have the same length, "
f"got {coords.shape[0]} and {tiles.shape[0]}")
if channels_first:
n_tiles, channels, tile_size, _ = tiles.shape
else:
n_tiles, tile_size, _, channels = tiles.shape
tile_xs, tile_ys = coords.T
x_min, x_max = min(tile_xs), max(tile_xs + tile_size)
y_min, y_max = min(tile_ys), max(tile_ys + tile_size)
width = x_max - x_min
height = y_max - y_min
output_shape = (channels, height, width) if channels_first else (height, width, channels)
array = np.full(output_shape, fill_value)
offset = np.array([-x_min, -y_min])
for idx in range(n_tiles):
row = coords[idx, 1] + offset[1]
col = coords[idx, 0] + offset[0]
if channels_first:
array[:, row:row + tile_size, col:col + tile_size] = tiles[idx]
else:
array[row:row + tile_size, col:col + tile_size, :] = tiles[idx]
return array, offset
|
[
"numpy.array",
"numpy.meshgrid",
"numpy.full",
"numpy.pad",
"numpy.arange"
] |
[((2038, 2074), 'numpy.pad', 'np.pad', (['array', 'padding'], {}), '(array, padding, **pad_kwargs)\n', (2044, 2074), True, 'import numpy as np\n'), ((5985, 6018), 'numpy.full', 'np.full', (['output_shape', 'fill_value'], {}), '(output_shape, fill_value)\n', (5992, 6018), True, 'import numpy as np\n'), ((6033, 6059), 'numpy.array', 'np.array', (['[-x_min, -y_min]'], {}), '([-x_min, -y_min])\n', (6041, 6059), True, 'import numpy as np\n'), ((2142, 2158), 'numpy.array', 'np.array', (['offset'], {}), '(offset)\n', (2150, 2158), True, 'import numpy as np\n'), ((4115, 4135), 'numpy.arange', 'np.arange', (['n_tiles_h'], {}), '(n_tiles_h)\n', (4124, 4135), True, 'import numpy as np\n'), ((4174, 4194), 'numpy.arange', 'np.arange', (['n_tiles_w'], {}), '(n_tiles_w)\n', (4183, 4194), True, 'import numpy as np\n'), ((4268, 4299), 'numpy.meshgrid', 'np.meshgrid', (['coords_w', 'coords_h'], {}), '(coords_w, coords_h)\n', (4279, 4299), True, 'import numpy as np\n')]
|
from math import pi
from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace
from numpy.linalg import eig, norm
class HartreeFock():
zeta = array([38.474970, 5.782948, 1.242567, 0.298073])
num_aos = len(zeta)
num_mos = 0
energy_tolerance = 0.0001; density_tolerance = 0.001
prev_energy = 0
prev_density = []
def __init__(self, num_elec):
# Make sure we can pair electrons
if num_elec % 2 != 0:
raise Exception("Can't do a RHF with", num_elec, "electrons.")
else:
print("Restricted Hartree-Fock with", num_elec, "electron(s).")
# We're RHF, so pair up spins in each molecular orbital
self.num_mos = int(num_elec / 2)
if self.num_mos > self.num_aos:
raise Exception("Can't create", self.num_mos, "molecular orbital(s) from", self.num_aos, "atomic orbital(s).")
else:
print(self.num_aos, "atomic orbital(s) and", self.num_mos, "molecular orbital(s).")
print("Zeta: ", self.zeta)
self.prev_density = ndarray(shape=(self.num_aos,self.num_aos),dtype=float, order='C')
def one_electron_integrals(self):
def overlap_kernel(zeta_i, zeta_j):
return pow(pi / (zeta_i + zeta_j), 1.5)
def kinetic_kernel(zeta_i, zeta_j):
return 3 * pow(pi, 1.5) * (zeta_i * zeta_j) / pow(zeta_i + zeta_j, 2.5)
def nucattr_kernel(zeta_i, zeta_j):
return (-4 * pi) / (zeta_i + zeta_j)
# Initialise our matrices
overlap = ndarray(shape=(self.num_aos,self.num_aos), dtype=float, order='C')
kinetic = ndarray(shape=(self.num_aos,self.num_aos), dtype=float, order='C')
nucattr = ndarray(shape=(self.num_aos,self.num_aos), dtype=float, order='C')
for i_ao in range(self.num_aos):
for j_ao in range(self.num_aos):
overlap[i_ao,j_ao] = overlap_kernel(self.zeta[i_ao], self.zeta[j_ao])
kinetic[i_ao,j_ao] = kinetic_kernel(self.zeta[i_ao], self.zeta[j_ao])
nucattr[i_ao,j_ao] = nucattr_kernel(self.zeta[i_ao], self.zeta[j_ao])
return overlap, kinetic, nucattr
def two_electron_integrals(self):
def tei_kernel(zeta_i, zeta_j, zeta_k, zeta_l):
temp_1 = (zeta_i + zeta_j) * (zeta_k + zeta_l)
temp_2 = sqrt(zeta_i + zeta_j + zeta_k + zeta_l)
return 2 * pow(pi, 2.5) / (temp_1 * temp_2)
teis = ndarray(shape=(self.num_aos,self.num_aos,self.num_aos,self.num_aos), dtype=float, order='C')
for i_ao in range(self.num_aos):
for j_ao in range(self.num_aos):
for k_ao in range(self.num_aos):
for l_ao in range(self.num_aos):
teis[i_ao,j_ao,k_ao,l_ao] = tei_kernel(self.zeta[i_ao], self.zeta[j_ao], self.zeta[k_ao], self.zeta[l_ao])
return teis
def basis_transformation_matrix(self, overlap):
# Get the eigenvalues and eigenvectors of the overlap matrix
overlap_evals, overlap_evecs = eig(overlap)
# Create diagonal matrix with entries given by inverse of eigenvalues of
# overlap matrix
try:
inv_sqrt_evals = diag(divide(1., sqrt(overlap_evals)))
except:
raise Exception("Overlap matrix is not positive definite.")
# Construct the basis transformation matrix and return it
return overlap_evecs @ inv_sqrt_evals @ overlap_evecs.T
def fock_matrix(self, core_hamiltonian, teis, density):
fock = ndarray(shape=density.shape, dtype=float, order='C')
for i_ao in range(self.num_aos):
for j_ao in range(self.num_aos):
fock[i_ao,j_ao] = core_hamiltonian[i_ao,j_ao]
for k_ao in range(self.num_aos):
for l_ao in range(self.num_aos):
coulomb = teis[i_ao,k_ao,j_ao,l_ao]
exchange = teis[i_ao,k_ao,l_ao,j_ao]
fock[i_ao,j_ao] += density[k_ao,l_ao] * (coulomb - 0.5*exchange)
return fock
def density_matrix(self, overlap, basis_transform, fock):
def ordered_eigensystem(matrix):
# Generate the eigenvalues and eigenvectors of the matrix
evals, evecs = eig(matrix)
# Sort the eigenvalues in ascending order and keep a track of what index they
# were originally assigned
ordered_indices = argsort(evals)
ordered_evals = sort(evals)
# Order the eigenvectors in asceding order of their corresponding eigenvalues
ordered_evecs = ndarray(shape=evecs.shape, dtype=float, order='C')
ordered_transform = ndarray(shape=evecs.shape, dtype=float, order='C')
for i_evec in range(len(ordered_evals)):
ordered_evecs[:,i_evec] = evecs[:,ordered_indices[i_evec]]
ordered_transform[i_evec,:] = basis_transform[ordered_indices[i_evec],:]
# Return the ordered eigenvalues and corresponding eigenvectors
return ordered_evals, ordered_evecs, ordered_transform
# Transform Fock matrix to orthogonal basis
fock = basis_transform.T @ fock @ basis_transform
# Get the eigenvalues and eigenvectors of the input Fock matrix
fock_evals, fock_evecs, new_transform = ordered_eigensystem(fock)
# Transform the eigenvectors of the Fock matrix back to the original basis
fock_evecs = new_transform @ fock_evecs
# First of all we make sure the eigenvectors of the Fock matrix are normalised by the
# overlap matrix (these are molecular orbitals, afterall)
for i_mo in range(self.num_aos):
ao_coeffs = fock_evecs[:,i_mo]
norm = ao_coeffs.T @ overlap @ ao_coeffs
fock_evecs[:,i_mo] /= sqrt(norm)
# Initialise the density matrix
density = ndarray(shape=overlap.shape, dtype=float, order='C')
# Loop over all elements in the density matrix and accumulate
for i_ao in range(self.num_aos):
for j_ao in range(self.num_aos):
density[i_ao,j_ao] = 0.0
# We accumulate only over occupied molecular orbitals! Note that we also have
# access to the virtual orbitals at this point, but they're effectively discarded
for i_mo in range(self.num_mos):
density[i_ao,j_ao] += 2 * fock_evecs[i_ao,i_mo] * fock_evecs[j_ao,i_mo]
return fock_evecs, density
def scf_energy(self, density, core_hamiltonian, fock):
energy = 0.0
for i_ao in range(self.num_aos):
for j_ao in range(self.num_aos):
energy += 0.5 * density[i_ao,j_ao] * (core_hamiltonian[i_ao,j_ao] + fock[i_ao,j_ao])
return energy
def check_convergence(self, energy, density):
if abs(energy - self.prev_energy) < self.energy_tolerance:
energy_converged = True
else:
energy_converged = False
self.prev_energy = energy
if norm(density - self.prev_density) < self.density_tolerance:
density_converged = True
else:
density_converged = False
self.prev_density = density
return energy_converged, density_converged
def mulliken(self, overlap, density):
return trace(density @ overlap)
def run(self, num_cycles):
print("Hartree-Fock will run for a maximum of", num_cycles, "SCF iteration(s).")
overlap, kinetic, nucattr = self.one_electron_integrals()
core_hamiltonian = kinetic + nucattr
teis = self.two_electron_integrals()
basis_transform = self.basis_transformation_matrix(overlap)
_, density = self.density_matrix(overlap, basis_transform, core_hamiltonian)
energy = self.scf_energy(density, core_hamiltonian, core_hamiltonian)
for i in range(num_cycles):
fock = self.fock_matrix(core_hamiltonian, teis, density)
fock_evecs, density = self.density_matrix(overlap, basis_transform, fock)
energy = self.scf_energy(density, core_hamiltonian, fock)
print("Iteration", i, "SCF Energy:", energy)
energy_converged, density_converged = self.check_convergence(energy, density)
if energy_converged and density_converged:
print("SCF has converged!")
for i_mo in range(self.num_mos):
print("Molecular Orbital", i_mo, "Coefficients :", fock_evecs[:,i_mo])
print("Mulliken charge:", self.mulliken(overlap, density))
break
if i == num_cycles - 1:
print("SCF failed to converge.")
print("Energy Convergence Check:", energy_converged)
print("Density Convergence Check:", density_converged)
fock_mo_basis = ndarray(shape=(self.num_mos,self.num_mos), dtype=float, order='C')
for i_mo in range(self.num_mos):
for j_mo in range(self.num_mos):
fock_mo_basis[i_mo,j_mo] = 0.0
for i_ao in range(self.num_aos):
for j_ao in range(self.num_aos):
fock_mo_basis[i_mo,j_mo] += fock_evecs[i_ao,j_mo] * fock_evecs[j_ao,i_mo] * fock[i_ao,j_ao]
print(fock_mo_basis)
if __name__ == "__main__":
hf = HartreeFock(4)
hf.run(2000)
|
[
"numpy.trace",
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.sort",
"numpy.argsort",
"numpy.array",
"numpy.ndarray",
"numpy.linalg.norm"
] |
[((170, 217), 'numpy.array', 'array', (['[38.47497, 5.782948, 1.242567, 0.298073]'], {}), '([38.47497, 5.782948, 1.242567, 0.298073])\n', (175, 217), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((1103, 1170), 'numpy.ndarray', 'ndarray', ([], {'shape': '(self.num_aos, self.num_aos)', 'dtype': 'float', 'order': '"""C"""'}), "(shape=(self.num_aos, self.num_aos), dtype=float, order='C')\n", (1110, 1170), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((1595, 1662), 'numpy.ndarray', 'ndarray', ([], {'shape': '(self.num_aos, self.num_aos)', 'dtype': 'float', 'order': '"""C"""'}), "(shape=(self.num_aos, self.num_aos), dtype=float, order='C')\n", (1602, 1662), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((1681, 1748), 'numpy.ndarray', 'ndarray', ([], {'shape': '(self.num_aos, self.num_aos)', 'dtype': 'float', 'order': '"""C"""'}), "(shape=(self.num_aos, self.num_aos), dtype=float, order='C')\n", (1688, 1748), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((1767, 1834), 'numpy.ndarray', 'ndarray', ([], {'shape': '(self.num_aos, self.num_aos)', 'dtype': 'float', 'order': '"""C"""'}), "(shape=(self.num_aos, self.num_aos), dtype=float, order='C')\n", (1774, 1834), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((2526, 2625), 'numpy.ndarray', 'ndarray', ([], {'shape': '(self.num_aos, self.num_aos, self.num_aos, self.num_aos)', 'dtype': 'float', 'order': '"""C"""'}), "(shape=(self.num_aos, self.num_aos, self.num_aos, self.num_aos),\n dtype=float, order='C')\n", (2533, 2625), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((3135, 3147), 'numpy.linalg.eig', 'eig', (['overlap'], {}), '(overlap)\n', (3138, 3147), False, 'from numpy.linalg import eig, norm\n'), ((3645, 3697), 'numpy.ndarray', 'ndarray', ([], {'shape': 'density.shape', 'dtype': 'float', 'order': '"""C"""'}), "(shape=density.shape, dtype=float, order='C')\n", (3652, 3697), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((6077, 6129), 'numpy.ndarray', 'ndarray', ([], {'shape': 'overlap.shape', 'dtype': 'float', 'order': '"""C"""'}), "(shape=overlap.shape, dtype=float, order='C')\n", (6084, 6129), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((7583, 7607), 'numpy.trace', 'trace', (['(density @ overlap)'], {}), '(density @ overlap)\n', (7588, 7607), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((9159, 9226), 'numpy.ndarray', 'ndarray', ([], {'shape': '(self.num_mos, self.num_mos)', 'dtype': 'float', 'order': '"""C"""'}), "(shape=(self.num_mos, self.num_mos), dtype=float, order='C')\n", (9166, 9226), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((2411, 2450), 'numpy.sqrt', 'sqrt', (['(zeta_i + zeta_j + zeta_k + zeta_l)'], {}), '(zeta_i + zeta_j + zeta_k + zeta_l)\n', (2415, 2450), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((4409, 4420), 'numpy.linalg.eig', 'eig', (['matrix'], {}), '(matrix)\n', (4412, 4420), False, 'from numpy.linalg import eig, norm\n'), ((4585, 4599), 'numpy.argsort', 'argsort', (['evals'], {}), '(evals)\n', (4592, 4599), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((4629, 4640), 'numpy.sort', 'sort', (['evals'], {}), '(evals)\n', (4633, 4640), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((4763, 4813), 'numpy.ndarray', 'ndarray', ([], {'shape': 'evecs.shape', 'dtype': 'float', 'order': '"""C"""'}), "(shape=evecs.shape, dtype=float, order='C')\n", (4770, 4813), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((4847, 4897), 'numpy.ndarray', 'ndarray', ([], {'shape': 'evecs.shape', 'dtype': 'float', 'order': '"""C"""'}), "(shape=evecs.shape, dtype=float, order='C')\n", (4854, 4897), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((6004, 6014), 'numpy.sqrt', 'sqrt', (['norm'], {}), '(norm)\n', (6008, 6014), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n'), ((7277, 7310), 'numpy.linalg.norm', 'norm', (['(density - self.prev_density)'], {}), '(density - self.prev_density)\n', (7281, 7310), False, 'from numpy.linalg import eig, norm\n'), ((3318, 3337), 'numpy.sqrt', 'sqrt', (['overlap_evals'], {}), '(overlap_evals)\n', (3322, 3337), False, 'from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace\n')]
|
import os
import numpy as np
from skimage.io import imread
def get_file_count(paths, image_format='.tif'):
total_count = 0
for path in paths:
try:
path_list = [_ for _ in os.listdir(path) if _.endswith(image_format)]
total_count += len(path_list)
except OSError:
print("Directory does not exist. Returned file count for this path will be 0")
return total_count
# Function to load image
def load_image(img_path):
img = imread(img_path)
if img.shape[2] == 4:
img = img[:, :, :-1]
# img = np.roll(img, shift=1, axis=2) # CHECK IMAGE FORMAT
return img
# Function to load mask
def load_mask(mask_path):
mask = imread(mask_path)
return mask
def load_mask_from_img(cfg, img_path, img_name, suffixes):
a_mask = imread(os.path.join(img_path, img_name + suffixes[0]))
msk = np.zeros((a_mask.shape[0], a_mask.shape[1], len(suffixes) * cfg.NUMBER_MSK_CHANNELS))
i = 0
for suffix in suffixes:
msk_channel = imread(os.path.join(img_path, img_name + suffix))
if len(msk_channel.shape) == 2:
msk_channel = np.expand_dims(msk_channel, axis=-1)
if len(msk_channel.shape) != 3:
raise ValueError("Mask must be 3-dim here. Does your mask have 1 or more than 3 dimensions? "
"Check the masks.")
msk[:, :, i:i+cfg.NUMBER_MSK_CHANNELS] = msk_channel
i += cfg.NUMBER_MSK_CHANNELS
# print(msk, msk.shape)
return msk
def load_weights(cfg, img_path, img_name, weight_suffixes):
a_weights = np.load(os.path.join(img_path, img_name + weight_suffixes[0]))
weights = np.zeros((a_weights.shape[0], a_weights.shape[1], len(weight_suffixes) * cfg.NUMBER_MSK_CHANNELS))
i = 0
for suffix in weight_suffixes:
weights_channel = np.load(os.path.join(img_path, img_name + suffix))
if len(weights_channel.shape) == 2:
weights_channel = np.expand_dims(weights_channel, axis=-1)
if len(weights_channel.shape) != 3:
raise ValueError("Weights must be 3-dim here. Has your weights 1 or more than 3 dimensions? Check the weights.")
weights[:, :, i:i+cfg.NUMBER_MSK_CHANNELS] = weights_channel
i += cfg.NUMBER_MSK_CHANNELS
return weights
|
[
"skimage.io.imread",
"os.listdir",
"os.path.join",
"numpy.expand_dims"
] |
[((491, 507), 'skimage.io.imread', 'imread', (['img_path'], {}), '(img_path)\n', (497, 507), False, 'from skimage.io import imread\n'), ((705, 722), 'skimage.io.imread', 'imread', (['mask_path'], {}), '(mask_path)\n', (711, 722), False, 'from skimage.io import imread\n'), ((820, 866), 'os.path.join', 'os.path.join', (['img_path', '(img_name + suffixes[0])'], {}), '(img_path, img_name + suffixes[0])\n', (832, 866), False, 'import os\n'), ((1599, 1652), 'os.path.join', 'os.path.join', (['img_path', '(img_name + weight_suffixes[0])'], {}), '(img_path, img_name + weight_suffixes[0])\n', (1611, 1652), False, 'import os\n'), ((1031, 1072), 'os.path.join', 'os.path.join', (['img_path', '(img_name + suffix)'], {}), '(img_path, img_name + suffix)\n', (1043, 1072), False, 'import os\n'), ((1140, 1176), 'numpy.expand_dims', 'np.expand_dims', (['msk_channel'], {'axis': '(-1)'}), '(msk_channel, axis=-1)\n', (1154, 1176), True, 'import numpy as np\n'), ((1846, 1887), 'os.path.join', 'os.path.join', (['img_path', '(img_name + suffix)'], {}), '(img_path, img_name + suffix)\n', (1858, 1887), False, 'import os\n'), ((1963, 2003), 'numpy.expand_dims', 'np.expand_dims', (['weights_channel'], {'axis': '(-1)'}), '(weights_channel, axis=-1)\n', (1977, 2003), True, 'import numpy as np\n'), ((202, 218), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (212, 218), False, 'import os\n')]
|
# -*- encoding: utf8 -*-
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from lvq import SilvqModel
from lvq.utils import plot2d
def main():
# Load dataset
dataset = np.loadtxt('data/artificial_dataset1.csv', delimiter=',')
x = dataset[:, :-1].astype('float64')
y = dataset[:, -1].astype('int64')
# Split dataset into training set and test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=3, shuffle=True, stratify=y)
# Generating model
model = SilvqModel(x.shape[1], theta=0.8, bias_type='ls')
# Training the model
model.fit(x_train, y_train, epochs=30)
# Predict the response for test dataset
y_predict = model.predict(x_test)
# Evaluating the model
print('Accuracy: %.3f' %accuracy_score(y_test, y_predict))
# Plot prediction results and prototypes
plot2d(model, x, y, title='Artificial dataset1')
if __name__ == '__main__':
main()
|
[
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.train_test_split",
"lvq.SilvqModel",
"numpy.loadtxt",
"lvq.utils.plot2d"
] |
[((245, 302), 'numpy.loadtxt', 'np.loadtxt', (['"""data/artificial_dataset1.csv"""'], {'delimiter': '""","""'}), "('data/artificial_dataset1.csv', delimiter=',')\n", (255, 302), True, 'import numpy as np\n'), ((474, 553), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(3)', 'shuffle': '(True)', 'stratify': 'y'}), '(x, y, test_size=0.2, random_state=3, shuffle=True, stratify=y)\n', (490, 553), False, 'from sklearn.model_selection import train_test_split\n'), ((590, 639), 'lvq.SilvqModel', 'SilvqModel', (['x.shape[1]'], {'theta': '(0.8)', 'bias_type': '"""ls"""'}), "(x.shape[1], theta=0.8, bias_type='ls')\n", (600, 639), False, 'from lvq import SilvqModel\n'), ((930, 978), 'lvq.utils.plot2d', 'plot2d', (['model', 'x', 'y'], {'title': '"""Artificial dataset1"""'}), "(model, x, y, title='Artificial dataset1')\n", (936, 978), False, 'from lvq.utils import plot2d\n'), ((846, 879), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_predict'], {}), '(y_test, y_predict)\n', (860, 879), False, 'from sklearn.metrics import accuracy_score\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Importing all required libraries
# In[ ]:
from __future__ import absolute_import, division, print_function, unicode_literals
# In[ ]:
#Checking for correct cuda and tf versions
from tensorflow.python.platform import build_info as tf_build_info
print(tf_build_info.cuda_version_number)
# 9.0 in v1.10.0
print(tf_build_info.cudnn_version_number)
# 7 in v1.10.0
# In[ ]:
import tensorflow as tf
import pathlib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import numpy as np
import matplotlib.pyplot as plt
# In[ ]:
AUTOTUNE = tf.data.experimental.AUTOTUNE
# In[ ]:
import IPython.display as display
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import os
# In[ ]:
tf.__version__
# In[ ]:
#Train and test data folder
train_data_dir = "\\hyper-kvasir\\splits\\all\\1"
test_data_dir = "\\hyper-kvasir\\splits\\all\\0"
# In[ ]:
train_data_dir = pathlib.Path(train_data_dir)
test_data_dir = pathlib.Path(test_data_dir)
# In[ ]:
#count how many images are there
image_count = len(list(train_data_dir.glob('*/*.jpg')))
image_count
# In[ ]:
total_train = len(list(train_data_dir.glob('*/*.jpg')))
total_val = len(list(test_data_dir.glob('*/*.jpg')))
# In[ ]:
#get the class names
CLASS_NAMES = np.array([item.name for item in train_data_dir.glob('*') if item.name != "LICENSE.txt"])
CLASS_NAMES
# In[ ]:
#Define parameter for training
batch_size = 32
IMG_HEIGHT = 224
IMG_WIDTH = 224
STEPS_PER_EPOCH = np.ceil(image_count/batch_size)
epochs = 8
num_classes = len(CLASS_NAMES) #23
# In[ ]:
#We use image data generators to load the images and prepare them for the training
train_image_generator = ImageDataGenerator() # Generator for our training data
validation_image_generator = ImageDataGenerator() # Generator for our validation data
train_data_gen = train_image_generator.flow_from_directory(directory=str(train_data_dir),
batch_size=batch_size,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
classes = list(CLASS_NAMES),
class_mode='categorical'
)
val_data_gen = validation_image_generator.flow_from_directory(directory=str(test_data_dir),
batch_size=batch_size,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical',
classes = list(CLASS_NAMES)
)
#get class order from directories
print(train_data_gen.class_indices.keys())
print(val_data_gen.class_indices.keys())
# In[ ]:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
# base model from the pre-trained model. Resnet 50 in this case
base_model = tf.keras.applications.ResNet50(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
# In[ ]:
#add new classification layer
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(num_classes,activation='softmax')(x)
model = tf.keras.models.Model(inputs=base_model.input, outputs=x)
base_learning_rate = 0.001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
# In[ ]:
#fit the model
history = model.fit_generator(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=total_val // batch_size
)
# In[ ]:
#create training plots
history
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# In[ ]:
base_model.trainable = True #now we want to train the base model
# In[ ]:
# How many layers are in the base model
print("Layers base model: ", len(base_model.layers))
# Fine tune from layer x
fine_tune_at = 100
# Freeze all the layers before the fine tune starting layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
# In[ ]:
model.compile(loss='categorical_crossentropy',
optimizer = tf.keras.optimizers.RMSprop(lr=base_learning_rate/10),
metrics=['accuracy'])
# In[ ]:
model.summary()
# In[ ]:
#Fine tune step
initial_epochs = 7
fine_tune_epochs = 3
total_epochs = initial_epochs + fine_tune_epochs
train_batches = total_train // batch_size
print(total_val // batch_size)
validation_batches = total_val // batch_size
history_fine = model.fit_generator(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=total_epochs,
initial_epoch = history.epoch[-1],
validation_data=val_data_gen,
validation_steps=total_val // batch_size
)
# In[ ]:
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
# In[ ]:
#Plot fine tuning
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
# In[ ]:
#model save and load
import os
# In[ ]:
#some time stamp
from datetime import datetime
# current date and time.
now = datetime.now()
timestamp = datetime.timestamp(now)
print("timestamp =", timestamp)
# In[ ]:
mode_filename = str(timestamp)+'mymodel.h5'
model.save(model_filename)
# In[ ]:
#To apply the model on new data
new_model = tf.keras.models.load_model(model_filename)
# Show the model architecture
new_model.summary()
# In[ ]:
from tensorflow.keras.preprocessing import image
#image directory containing images to test
img_dir="\\polyps"
for i,img in enumerate(os.listdir(img_dir)):
tmpimage = image.load_img(os.path.join(img_dir,img), target_size=(IMG_SIZE,IMG_SIZE))
tmpimage = np.expand_dims(tmpimage, axis=0).astype('float32')
result_class=new_model.predict(tmpimage)
print(img,";",CLASS_NAMES[result_class.argmax(axis=-1)])
|
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.applications.ResNet50",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"os.listdir",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.keras.models.Model",
"matplotlib.pyplot.ylim",
"numpy.ceil",
"datetime.datetime.timestamp",
"matplotlib.pyplot.title",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"os.path.join",
"tensorflow.keras.optimizers.Adam",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.expand_dims",
"matplotlib.pyplot.subplot"
] |
[((1111, 1139), 'pathlib.Path', 'pathlib.Path', (['train_data_dir'], {}), '(train_data_dir)\n', (1123, 1139), False, 'import pathlib\n'), ((1156, 1183), 'pathlib.Path', 'pathlib.Path', (['test_data_dir'], {}), '(test_data_dir)\n', (1168, 1183), False, 'import pathlib\n'), ((1680, 1713), 'numpy.ceil', 'np.ceil', (['(image_count / batch_size)'], {}), '(image_count / batch_size)\n', (1687, 1713), True, 'import numpy as np\n'), ((1879, 1899), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '()\n', (1897, 1899), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1963, 1983), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '()\n', (1981, 1983), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3374, 3470), 'tensorflow.keras.applications.ResNet50', 'tf.keras.applications.ResNet50', ([], {'input_shape': 'IMG_SHAPE', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=IMG_SHAPE, include_top=False,\n weights='imagenet')\n", (3404, 3470), True, 'import tensorflow as tf\n'), ((3777, 3834), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'base_model.input', 'outputs': 'x'}), '(inputs=base_model.input, outputs=x)\n', (3798, 3834), True, 'import tensorflow as tf\n'), ((4467, 4493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4477, 4493), True, 'import matplotlib.pyplot as plt\n'), ((4494, 4514), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4505, 4514), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4569), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), "(epochs_range, acc, label='Training Accuracy')\n", (4523, 4569), True, 'import matplotlib.pyplot as plt\n'), ((4570, 4630), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_acc, label='Validation Accuracy')\n", (4578, 4630), True, 'import matplotlib.pyplot as plt\n'), ((4631, 4660), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4641, 4660), True, 'import matplotlib.pyplot as plt\n'), ((4661, 4706), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (4670, 4706), True, 'import matplotlib.pyplot as plt\n'), ((4708, 4728), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4719, 4728), True, 'import matplotlib.pyplot as plt\n'), ((4729, 4780), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (4737, 4780), True, 'import matplotlib.pyplot as plt\n'), ((4781, 4838), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (4789, 4838), True, 'import matplotlib.pyplot as plt\n'), ((4839, 4868), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4849, 4868), True, 'import matplotlib.pyplot as plt\n'), ((4869, 4910), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (4878, 4910), True, 'import matplotlib.pyplot as plt\n'), ((4911, 4921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4919, 4921), True, 'import matplotlib.pyplot as plt\n'), ((6194, 6220), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (6204, 6220), True, 'import matplotlib.pyplot as plt\n'), ((6221, 6241), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6232, 6241), True, 'import matplotlib.pyplot as plt\n'), ((6242, 6282), 'matplotlib.pyplot.plot', 'plt.plot', (['acc'], {'label': '"""Training Accuracy"""'}), "(acc, label='Training Accuracy')\n", (6250, 6282), True, 'import matplotlib.pyplot as plt\n'), ((6283, 6329), 'matplotlib.pyplot.plot', 'plt.plot', (['val_acc'], {'label': '"""Validation Accuracy"""'}), "(val_acc, label='Validation Accuracy')\n", (6291, 6329), True, 'import matplotlib.pyplot as plt\n'), ((6330, 6348), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.8, 1]'], {}), '([0.8, 1])\n', (6338, 6348), True, 'import matplotlib.pyplot as plt\n'), ((6444, 6473), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6454, 6473), True, 'import matplotlib.pyplot as plt\n'), ((6474, 6519), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (6483, 6519), True, 'import matplotlib.pyplot as plt\n'), ((6521, 6541), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6532, 6541), True, 'import matplotlib.pyplot as plt\n'), ((6542, 6579), 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'label': '"""Training Loss"""'}), "(loss, label='Training Loss')\n", (6550, 6579), True, 'import matplotlib.pyplot as plt\n'), ((6580, 6623), 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss'], {'label': '"""Validation Loss"""'}), "(val_loss, label='Validation Loss')\n", (6588, 6623), True, 'import matplotlib.pyplot as plt\n'), ((6624, 6642), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.0]'], {}), '([0, 1.0])\n', (6632, 6642), True, 'import matplotlib.pyplot as plt\n'), ((6737, 6766), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6747, 6766), True, 'import matplotlib.pyplot as plt\n'), ((6767, 6808), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (6776, 6808), True, 'import matplotlib.pyplot as plt\n'), ((6809, 6828), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6819, 6828), True, 'import matplotlib.pyplot as plt\n'), ((6829, 6839), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6837, 6839), True, 'import matplotlib.pyplot as plt\n'), ((6976, 6990), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6988, 6990), False, 'from datetime import datetime\n'), ((7003, 7026), 'datetime.datetime.timestamp', 'datetime.timestamp', (['now'], {}), '(now)\n', (7021, 7026), False, 'from datetime import datetime\n'), ((7200, 7242), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_filename'], {}), '(model_filename)\n', (7226, 7242), True, 'import tensorflow as tf\n'), ((3661, 3701), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (3699, 3701), True, 'import tensorflow as tf\n'), ((3709, 3765), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (3730, 3765), True, 'import tensorflow as tf\n'), ((6405, 6415), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (6413, 6415), True, 'import matplotlib.pyplot as plt\n'), ((6698, 6708), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (6706, 6708), True, 'import matplotlib.pyplot as plt\n'), ((7443, 7462), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (7453, 7462), False, 'import os\n'), ((3887, 3934), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'base_learning_rate'}), '(lr=base_learning_rate)\n', (3911, 3934), True, 'import tensorflow as tf\n'), ((5372, 5427), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'lr': '(base_learning_rate / 10)'}), '(lr=base_learning_rate / 10)\n', (5399, 5427), True, 'import tensorflow as tf\n'), ((7493, 7519), 'os.path.join', 'os.path.join', (['img_dir', 'img'], {}), '(img_dir, img)\n', (7505, 7519), False, 'import os\n'), ((7569, 7601), 'numpy.expand_dims', 'np.expand_dims', (['tmpimage'], {'axis': '(0)'}), '(tmpimage, axis=0)\n', (7583, 7601), True, 'import numpy as np\n')]
|
import torch.utils.data as data
import numpy as np
from imageio import imread
from path import Path
import pdb
def crawl_folders(folders_list):
imgs = []
depth = []
for folder in folders_list:
current_imgs = sorted(folder.files('*.jpg'))
current_depth = []
for img in current_imgs:
d = img.dirname()/(img.name[:-4] + '.npy')
assert(d.isfile()), "depth file {} not found".format(str(d))
depth.append(d)
imgs.extend(current_imgs)
depth.extend(current_depth)
return imgs, depth
def load_as_float(path):
return imread(path).astype(np.float32)
class ValidationSet(data.Dataset):
"""A sequence data loader where the files are arranged in this way:
root/scene_1/0000000.jpg
root/scene_1/0000000.npy
root/scene_1/0000001.jpg
root/scene_1/0000001.npy
..
root/scene_2/0000000.jpg
root/scene_2/0000000.npy
.
transform functions must take in a list a images and a numpy array which can be None
"""
def __init__(self, root, transform=None):
self.root = Path(root)
scene_list_path = self.root/'val.txt'
self.scenes = [self.root/folder[:-1] for folder in open(scene_list_path)]
self.imgs, self.depth = crawl_folders(self.scenes)
self.transform = transform
def __getitem__(self, index):
img = load_as_float(self.imgs[index])
depth = np.load(self.depth[index]).astype(np.float32) #;pdb.set_trace()
if self.transform is not None:
img, _, _ = self.transform([img], depth, None); #this depth is just used to fill the compose transform that is shared(no need for the result)
img = img[0]
return img, depth
def __len__(self):
return len(self.imgs)
|
[
"imageio.imread",
"path.Path",
"numpy.load"
] |
[((1183, 1193), 'path.Path', 'Path', (['root'], {}), '(root)\n', (1187, 1193), False, 'from path import Path\n'), ((654, 666), 'imageio.imread', 'imread', (['path'], {}), '(path)\n', (660, 666), False, 'from imageio import imread\n'), ((1513, 1539), 'numpy.load', 'np.load', (['self.depth[index]'], {}), '(self.depth[index])\n', (1520, 1539), True, 'import numpy as np\n')]
|
from DD.utils import PoolByteArray2NumpyArray, NumpyArray2PoolByteArray
from DD.Entity import Entity
import numpy as np
class Terrain(Entity):
def __init__(self, json, width, height, scale=4, terrain_types=4):
super(Terrain, self).__init__(json)
self._scale = scale
self.terrain_types = terrain_types
self.splat = PoolByteArray2NumpyArray(self._json['splat']).reshape(height*self._scale, width*self._scale, self.terrain_types, order='C')
def get_json(self):
json = self._json
json['splat'] = NumpyArray2PoolByteArray(self.splat.reshape(np.prod(self.splat.shape), order='C'))
return json
def pad(self, top, bottom, left, right):
self.splat = np.pad(self.splat,
((top*self._scale, bottom*self._scale), (left*self._scale, right*self._scale), (0,0)),
mode='edge')
def crop(self, top, bottom, left, right):
self.splat = self._crop_map_safe(self.splat, top, bottom, left, right, self._scale)
def fliplr(self, width):
self.splat = np.fliplr(self.splat)
def flipud(self, height):
self.splat = np.flipud(self.splat)
def rot90(self, width, height):
self.splat = self._rot90_map(self.splat)
def rot180(self, width, height):
self.splat = self._rot180_map(self.splat)
def rot270(self, width, height):
self.splat = self._rot270_map(self.splat)
|
[
"numpy.prod",
"numpy.flipud",
"numpy.fliplr",
"DD.utils.PoolByteArray2NumpyArray",
"numpy.pad"
] |
[((720, 852), 'numpy.pad', 'np.pad', (['self.splat', '((top * self._scale, bottom * self._scale), (left * self._scale, right *\n self._scale), (0, 0))'], {'mode': '"""edge"""'}), "(self.splat, ((top * self._scale, bottom * self._scale), (left * self\n ._scale, right * self._scale), (0, 0)), mode='edge')\n", (726, 852), True, 'import numpy as np\n'), ((1090, 1111), 'numpy.fliplr', 'np.fliplr', (['self.splat'], {}), '(self.splat)\n', (1099, 1111), True, 'import numpy as np\n'), ((1168, 1189), 'numpy.flipud', 'np.flipud', (['self.splat'], {}), '(self.splat)\n', (1177, 1189), True, 'import numpy as np\n'), ((351, 396), 'DD.utils.PoolByteArray2NumpyArray', 'PoolByteArray2NumpyArray', (["self._json['splat']"], {}), "(self._json['splat'])\n", (375, 396), False, 'from DD.utils import PoolByteArray2NumpyArray, NumpyArray2PoolByteArray\n'), ((594, 619), 'numpy.prod', 'np.prod', (['self.splat.shape'], {}), '(self.splat.shape)\n', (601, 619), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
import os,sys,time
"""
Copied from orphics.mpi
"""
try:
disable_mpi_env = os.environ['DISABLE_MPI']
disable_mpi = True if disable_mpi_env.lower().strip() == "true" else False
except:
disable_mpi = False
"""
Use the below cleanup stuff only for intel-mpi!
If you use it on openmpi, you will have no traceback for errors
causing hours of endless confusion and frustration! - Sincerely, past frustrated Mat
"""
# From Sigurd's enlib.mpi:
# Uncaught exceptions don't cause mpi to abort. This can lead to thousands of
# wasted CPU hours
# def cleanup(type, value, traceback):
# sys.__excepthook__(type, value, traceback)
# MPI.COMM_WORLD.Abort(1)
# sys.excepthook = cleanup
class fakeMpiComm:
"""
A Simple Fake MPI implementation
"""
def __init__(self):
pass
def Get_rank(self):
return 0
def Get_size(self):
return 1
def Barrier(self):
pass
def Abort(self,dummy):
pass
try:
if disable_mpi: raise
from mpi4py import MPI
except:
if not(disable_mpi): print("WARNING: mpi4py could not be loaded. Falling back to fake MPI. This means that if you submitted multiple processes, they will all be assigned the same rank of 0, and they are potentially doing the same thing.")
class template:
pass
MPI = template()
MPI.COMM_WORLD = fakeMpiComm()
def mpi_distribute(num_tasks,avail_cores,allow_empty=False):
# copied to mapsims.convert_noise_templates
if not(allow_empty): assert avail_cores<=num_tasks
min_each, rem = divmod(num_tasks,avail_cores)
num_each = np.array([min_each]*avail_cores) # first distribute equally
if rem>0: num_each[-rem:] += 1 # add the remainder to the last set of cores (so that rank 0 never gets extra jobs)
task_range = list(range(num_tasks)) # the full range of tasks
cumul = np.cumsum(num_each).tolist() # the end indices for each task
task_dist = [task_range[x:y] for x,y in zip([0]+cumul[:-1],cumul)] # a list containing the tasks for each core
assert sum(num_each)==num_tasks
assert len(num_each)==avail_cores
assert len(task_dist)==avail_cores
return num_each,task_dist
def distribute(njobs,verbose=True,**kwargs):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numcores = comm.Get_size()
num_each,each_tasks = mpi_distribute(njobs,numcores,**kwargs)
if rank==0: print ("At most ", max(num_each) , " tasks...")
my_tasks = each_tasks[rank]
return comm,rank,my_tasks
|
[
"numpy.array",
"numpy.cumsum"
] |
[((1658, 1692), 'numpy.array', 'np.array', (['([min_each] * avail_cores)'], {}), '([min_each] * avail_cores)\n', (1666, 1692), True, 'import numpy as np\n'), ((1917, 1936), 'numpy.cumsum', 'np.cumsum', (['num_each'], {}), '(num_each)\n', (1926, 1936), True, 'import numpy as np\n')]
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import math
import sys
import paddle.compat as cpt
from op_test import OpTest
class TestROIPoolOp(OpTest):
def set_data(self):
self.init_test_case()
self.make_rois()
self.calc_roi_pool()
self.inputs = {'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod)}
self.attrs = {
'spatial_scale': self.spatial_scale,
'pooled_height': self.pooled_height,
'pooled_width': self.pooled_width
}
self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes}
def init_test_case(self):
self.batch_size = 3
self.channels = 3
self.height = 6
self.width = 4
# n, c, h, w
self.x_dim = (self.batch_size, self.channels, self.height, self.width)
self.spatial_scale = 1.0 / 4.0
self.pooled_height = 2
self.pooled_width = 2
self.x = np.random.random(self.x_dim).astype('float32')
def calc_roi_pool(self):
out_data = np.zeros((self.rois_num, self.channels, self.pooled_height,
self.pooled_width))
argmax_data = np.zeros((self.rois_num, self.channels,
self.pooled_height, self.pooled_width))
for i in range(self.rois_num):
roi = self.rois[i]
roi_batch_id = roi[0]
roi_start_w = int(cpt.round(roi[1] * self.spatial_scale))
roi_start_h = int(cpt.round(roi[2] * self.spatial_scale))
roi_end_w = int(cpt.round(roi[3] * self.spatial_scale))
roi_end_h = int(cpt.round(roi[4] * self.spatial_scale))
roi_height = int(max(roi_end_h - roi_start_h + 1, 1))
roi_width = int(max(roi_end_w - roi_start_w + 1, 1))
x_i = self.x[roi_batch_id]
bin_size_h = float(roi_height) / float(self.pooled_height)
bin_size_w = float(roi_width) / float(self.pooled_width)
for c in range(self.channels):
for ph in range(self.pooled_height):
for pw in range(self.pooled_width):
hstart = int(math.floor(ph * bin_size_h))
wstart = int(math.floor(pw * bin_size_w))
hend = int(math.ceil((ph + 1) * bin_size_h))
wend = int(math.ceil((pw + 1) * bin_size_w))
hstart = min(max(hstart + roi_start_h, 0), self.height)
hend = min(max(hend + roi_start_h, 0), self.height)
wstart = min(max(wstart + roi_start_w, 0), self.width)
wend = min(max(wend + roi_start_w, 0), self.width)
is_empty = (hend <= hstart) or (wend <= wstart)
if is_empty:
out_data[i, c, ph, pw] = 0
else:
out_data[i, c, ph, pw] = -sys.float_info.max
argmax_data[i, c, ph, pw] = -1
for h in range(hstart, hend):
for w in range(wstart, wend):
if x_i[c, h, w] > out_data[i, c, ph, pw]:
out_data[i, c, ph, pw] = x_i[c, h, w]
argmax_data[i, c, ph,
pw] = h * self.width + w
self.outs = out_data.astype('float32')
self.argmaxes = argmax_data.astype('int64')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.random_integers(
0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers(
0, self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width,
self.width // self.spatial_scale)
y2 = np.random.random_integers(
y1 + self.pooled_height, self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2]
rois.append(roi)
self.rois_num = len(rois)
self.rois = np.array(rois).astype("int64")
def setUp(self):
self.op_type = "roi_pool"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
unittest.main()
|
[
"math.ceil",
"math.floor",
"numpy.random.random_integers",
"numpy.random.random",
"numpy.array",
"numpy.zeros",
"paddle.compat.round",
"unittest.main"
] |
[((5305, 5320), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5318, 5320), False, 'import unittest\n'), ((1679, 1758), 'numpy.zeros', 'np.zeros', (['(self.rois_num, self.channels, self.pooled_height, self.pooled_width)'], {}), '((self.rois_num, self.channels, self.pooled_height, self.pooled_width))\n', (1687, 1758), True, 'import numpy as np\n'), ((1810, 1889), 'numpy.zeros', 'np.zeros', (['(self.rois_num, self.channels, self.pooled_height, self.pooled_width)'], {}), '((self.rois_num, self.channels, self.pooled_height, self.pooled_width))\n', (1818, 1889), True, 'import numpy as np\n'), ((1583, 1611), 'numpy.random.random', 'np.random.random', (['self.x_dim'], {}), '(self.x_dim)\n', (1599, 1611), True, 'import numpy as np\n'), ((2057, 2095), 'paddle.compat.round', 'cpt.round', (['(roi[1] * self.spatial_scale)'], {}), '(roi[1] * self.spatial_scale)\n', (2066, 2095), True, 'import paddle.compat as cpt\n'), ((2127, 2165), 'paddle.compat.round', 'cpt.round', (['(roi[2] * self.spatial_scale)'], {}), '(roi[2] * self.spatial_scale)\n', (2136, 2165), True, 'import paddle.compat as cpt\n'), ((2195, 2233), 'paddle.compat.round', 'cpt.round', (['(roi[3] * self.spatial_scale)'], {}), '(roi[3] * self.spatial_scale)\n', (2204, 2233), True, 'import paddle.compat as cpt\n'), ((2263, 2301), 'paddle.compat.round', 'cpt.round', (['(roi[4] * self.spatial_scale)'], {}), '(roi[4] * self.spatial_scale)\n', (2272, 2301), True, 'import paddle.compat as cpt\n'), ((4385, 4472), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(self.width // self.spatial_scale - self.pooled_width)'], {}), '(0, self.width // self.spatial_scale - self.\n pooled_width)\n', (4410, 4472), True, 'import numpy as np\n'), ((4510, 4599), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(self.height // self.spatial_scale - self.pooled_height)'], {}), '(0, self.height // self.spatial_scale - self.\n pooled_height)\n', (4535, 4599), True, 'import numpy as np\n'), ((4638, 4726), 'numpy.random.random_integers', 'np.random.random_integers', (['(x1 + self.pooled_width)', '(self.width // self.spatial_scale)'], {}), '(x1 + self.pooled_width, self.width // self.\n spatial_scale)\n', (4663, 4726), True, 'import numpy as np\n'), ((4790, 4880), 'numpy.random.random_integers', 'np.random.random_integers', (['(y1 + self.pooled_height)', '(self.height // self.spatial_scale)'], {}), '(y1 + self.pooled_height, self.height // self.\n spatial_scale)\n', (4815, 4880), True, 'import numpy as np\n'), ((5029, 5043), 'numpy.array', 'np.array', (['rois'], {}), '(rois)\n', (5037, 5043), True, 'import numpy as np\n'), ((2806, 2833), 'math.floor', 'math.floor', (['(ph * bin_size_h)'], {}), '(ph * bin_size_h)\n', (2816, 2833), False, 'import math\n'), ((2872, 2899), 'math.floor', 'math.floor', (['(pw * bin_size_w)'], {}), '(pw * bin_size_w)\n', (2882, 2899), False, 'import math\n'), ((2936, 2968), 'math.ceil', 'math.ceil', (['((ph + 1) * bin_size_h)'], {}), '((ph + 1) * bin_size_h)\n', (2945, 2968), False, 'import math\n'), ((3005, 3037), 'math.ceil', 'math.ceil', (['((pw + 1) * bin_size_w)'], {}), '((pw + 1) * bin_size_w)\n', (3014, 3037), False, 'import math\n')]
|
import random
import numpy as np
import math
from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa
from skimage.io import imsave
from skimage.util import random_noise
maxSlope = 10 # restrict the maximum slope of generated lines for stability
minLength = 20 # restrict the minimum length of line segments
class ICircleDataset:
'''
Generator of circle segment images.
Images will have 1 random circle each, filled with noise and distractor lines.
Class also offers functionality for drawing line parameters, hypotheses and point predictions.
'''
def __init__(self, imgW = 64, imgH = 64, margin = -5, bg_clr = 0.5):
'''
Constructor.
imgW -- image width (default 64)
imgH -- image height (default 64)
margin -- lines segments are sampled within this margin, negative value means that a line segment can start or end outside the image (default -5)
bg_clr -- background intensity (default 0.5)
'''
self.imgW = imgW
self.imgH = imgH
self.margin = margin
self.bg_clr = bg_clr
def draw_circle(self, data, cX, cY, r, clr, alpha=1.0):
'''
Draw a circle with the given color and opacity.
data -- image to draw to
cX -- x value of circle center
cY -- y value of circle center
r -- radius of circle
clr -- line color, triple of values
alpha -- opacity (default 1.0)
'''
cY = int(cY * self.imgH)
cX = int(cX * self.imgW)
r = int(r * self.imgW)
rr, cc, val = circle_perimeter_aa(cY, cX, r)
set_color(data, (rr, cc), clr, val)
def draw_hyps(self, labels, scores, data=None):
'''
Draw a set of line hypothesis for a batch of images.
labels -- line parameters, array shape (NxMx2) where
N is the number of images in the batch
M is the number of hypotheses per image
2 is the number of line parameters (intercept, slope)
scores -- hypotheses scores, array shape (NxM), see above, higher score will be drawn with higher opacity
data -- batch of images to draw to, if empty a new batch wil be created according to the shape of labels
'''
n = labels.shape[0] # number of images
m = labels.shape[1] # number of hypotheses
if data is None: # create new batch of images
data = np.zeros((n, self.imgH, self.imgW, 3), dtype=np.float32)
data.fill(self.bg_clr)
clr = (0, 0, 1)
for i in range (0, n):
for j in range (0, m):
lY1 = int(labels[i, j, 0] * self.imgH)
lY2 = int(labels[i, j, 1] * self.imgW + labels[i, j, 0] * self.imgH)
self.draw_line(data[i], 0, lY1, self.imgW, lY2, clr, scores[i, j])
return data
def draw_models(self, labels, data=None, correct=None):
'''
Draw circles for a batch of images.
labels -- circle parameters, array shape (Nx3) where
N is the number of images in the batch
3 is the number of circles parameters (center x, center y, radius)
data -- batch of images to draw to, if empty a new batch wil be created according to the shape of labels
and circles will be green, circles will be blue otherwise
correct -- array of shape (N) indicating whether a circle estimate is correct
'''
n = labels.shape[0]
if data is None:
data = np.zeros((n, self.imgH, self.imgW, 3), dtype=np.float32)
data.fill(self.bg_clr)
clr = (0, 1, 0)
else:
clr = (0, 0, 1)
for i in range (0, n):
self.draw_circle(data[i], labels[i, 0], labels[i, 1], labels[i, 2], clr)
if correct is not None:
# draw border green if estiamte is correct, red otherwise
if correct[i]: borderclr = (0, 1, 0)
else: borderclr = (1, 0, 0)
set_color(data[i], line(0, 0, 0, self.imgW-1), borderclr)
set_color(data[i], line(0, 0, self.imgH-1, 0), borderclr)
set_color(data[i], line(self.imgH-1, 0, self.imgH-1, self.imgW-1), borderclr)
set_color(data[i], line(0, self.imgW-1, self.imgH-1, self.imgW-1), borderclr)
return data
def draw_points(self, points, data, inliers=None):
'''
Draw 2D points for a batch of images.
points -- 2D points, array shape (Nx2xM) where
N is the number of images in the batch
2 is the number of point dimensions (x, y)
M is the number of points
data -- batch of images to draw to
inliers -- soft inlier score for each point,
if given and score < 0.5 point will be drawn green, red otherwise
'''
n = points.shape[0] # number of images
m = points.shape[2] # number of points
for i in range (0, n):
for j in range(0, m):
clr = (0.2, 0.2, 0.2) # draw predicted points as dark circles
if inliers is not None and inliers[i, j] > 0.5:
clr = (0.7, 0.7, 0.7) # draw inliers as light circles
r = int(points[i, 0, j] * self.imgH)
c = int(points[i, 1, j] * self.imgW)
rr, cc = circle(r, c, 2)
set_color(data[i], (rr, cc), clr)
return data
def samples(self, n):
'''
Create new input images of random line segments and distractors along with ground truth parameters.
n -- number of images to create
'''
data = np.zeros((n, self.imgH, self.imgW, 3), dtype=np.float32)
data.fill(self.bg_clr)
labels = np.zeros((n, 3), dtype=np.float32)
for i in range (0, n):
data[i] = random_noise(data[i], mode='speckle')
return data, labels
|
[
"skimage.draw.circle",
"skimage.draw.circle_perimeter_aa",
"skimage.draw.set_color",
"numpy.zeros",
"skimage.util.random_noise",
"skimage.draw.line"
] |
[((1439, 1469), 'skimage.draw.circle_perimeter_aa', 'circle_perimeter_aa', (['cY', 'cX', 'r'], {}), '(cY, cX, r)\n', (1458, 1469), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n'), ((1472, 1507), 'skimage.draw.set_color', 'set_color', (['data', '(rr, cc)', 'clr', 'val'], {}), '(data, (rr, cc), clr, val)\n', (1481, 1507), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n'), ((4940, 4996), 'numpy.zeros', 'np.zeros', (['(n, self.imgH, self.imgW, 3)'], {'dtype': 'np.float32'}), '((n, self.imgH, self.imgW, 3), dtype=np.float32)\n', (4948, 4996), True, 'import numpy as np\n'), ((5033, 5067), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {'dtype': 'np.float32'}), '((n, 3), dtype=np.float32)\n', (5041, 5067), True, 'import numpy as np\n'), ((2190, 2246), 'numpy.zeros', 'np.zeros', (['(n, self.imgH, self.imgW, 3)'], {'dtype': 'np.float32'}), '((n, self.imgH, self.imgW, 3), dtype=np.float32)\n', (2198, 2246), True, 'import numpy as np\n'), ((3128, 3184), 'numpy.zeros', 'np.zeros', (['(n, self.imgH, self.imgW, 3)'], {'dtype': 'np.float32'}), '((n, self.imgH, self.imgW, 3), dtype=np.float32)\n', (3136, 3184), True, 'import numpy as np\n'), ((5107, 5144), 'skimage.util.random_noise', 'random_noise', (['data[i]'], {'mode': '"""speckle"""'}), "(data[i], mode='speckle')\n", (5119, 5144), False, 'from skimage.util import random_noise\n'), ((4688, 4703), 'skimage.draw.circle', 'circle', (['r', 'c', '(2)'], {}), '(r, c, 2)\n', (4694, 4703), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n'), ((4708, 4741), 'skimage.draw.set_color', 'set_color', (['data[i]', '(rr, cc)', 'clr'], {}), '(data[i], (rr, cc), clr)\n', (4717, 4741), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n'), ((3556, 3584), 'skimage.draw.line', 'line', (['(0)', '(0)', '(0)', '(self.imgW - 1)'], {}), '(0, 0, 0, self.imgW - 1)\n', (3560, 3584), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n'), ((3621, 3649), 'skimage.draw.line', 'line', (['(0)', '(0)', '(self.imgH - 1)', '(0)'], {}), '(0, 0, self.imgH - 1, 0)\n', (3625, 3649), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n'), ((3686, 3738), 'skimage.draw.line', 'line', (['(self.imgH - 1)', '(0)', '(self.imgH - 1)', '(self.imgW - 1)'], {}), '(self.imgH - 1, 0, self.imgH - 1, self.imgW - 1)\n', (3690, 3738), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n'), ((3771, 3823), 'skimage.draw.line', 'line', (['(0)', '(self.imgW - 1)', '(self.imgH - 1)', '(self.imgW - 1)'], {}), '(0, self.imgW - 1, self.imgH - 1, self.imgW - 1)\n', (3775, 3823), False, 'from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa\n')]
|
import numpy as np
import copy
import combo.misc
import cPickle as pickle
from results import history
from .. import utility
from ...variable import variable
from ..call_simulator import call_simulator
from ... import predictor
from ...gp import predictor as gp_predictor
from ...blm import predictor as blm_predictor
import combo.search.score
MAX_SEACH = int(20000)
class policy:
def __init__(self, test_X, config=None):
self.predictor = None
self.training = variable()
self.test = self._set_test(test_X)
self.actions = np.arange(0, self.test.X.shape[0])
self.history = history()
self.config = self._set_config(config)
def set_seed(self, seed):
self.seed = seed
np.random.seed(self.seed)
def delete_actions(self, index, actions=None):
actions = self._set_unchosed_actions(actions)
return np.delete(actions, index)
def write(self, action, t, X=None):
if X is None:
X = self.test.X[action, :]
Z = self.test.Z[action, :] if self.test.Z is not None else None
else:
Z = self.predictor.get_basis(X) \
if self.predictor is not None else None
self.new_data = variable(X, t, Z)
self.history.write(t, action)
self.training.add(X=X, t=t, Z=Z)
def random_search(self, max_num_probes, num_search_each_probe=1,
simulator=None, is_disp=True):
N = int(num_search_each_probe)
if int(max_num_probes) * N > len(self.actions):
raise ValueError('max_num_probes * num_search_each_probe must \
be smaller than the length of candidates')
if is_disp:
utility.show_interactive_mode(simulator, self.history)
for n in xrange(0, max_num_probes):
if is_disp and N > 1:
utility.show_start_message_multi_search(self.history.num_runs)
action = self.get_random_action(N)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def bayes_search(self, training=None, max_num_probes=None,
num_search_each_probe=1,
predictor=None, is_disp=True,
simulator=None, score='TS', interval=0,
num_rand_basis=0):
if max_num_probes is None:
max_num_probes = 1
simulator = None
is_rand_expans = False if num_rand_basis == 0 else True
self.training = self._set_training(training)
if predictor is None:
self.predictor = self._init_predictor(is_rand_expans)
else:
self.predictor = predictor
N = int(num_search_each_probe)
for n in xrange(max_num_probes):
if utility.is_learning(n, interval):
self.predictor.fit(self.training, num_rand_basis)
self.test.Z = self.predictor.get_basis(self.test.X)
self.training.Z = self.predictor.get_basis(self.training.X)
self.predictor.prepare(self.training)
else:
try:
self.predictor.update(self.training, self.new_data)
except:
self.predictor.prepare(self.training)
if num_search_each_probe != 1:
utility.show_start_message_multi_search(self.history.num_runs,
score)
K = self.config.search.multi_probe_num_sampling
alpha = self.config.search.alpha
action = self.get_actions(score, N, K, alpha)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def get_score(self, mode, predictor=None, training=None, alpha=1):
self._set_training(training)
self._set_predictor(predictor)
actions = self.actions
test = self.test.get_subset(actions)
if mode == 'EI':
f = combo.search.score.EI(predictor, training, test)
elif mode == 'PI':
f = combo.search.score.PI(predictor, training, test)
elif mode == 'TS':
f = combo.search.score.TS(predictor, training, test, alpha)
else:
raise NotImplementedError('mode must be EI, PI or TS.')
return f
def get_marginal_score(self, mode, chosed_actions, N, alpha):
f = np.zeros((N, len(self.actions)))
new_test = self.test.get_subset(chosed_actions)
virtual_t \
= self.predictor.get_predict_samples(self.training, new_test, N)
for n in xrange(N):
predictor = copy.deepcopy(self.predictor)
train = copy.deepcopy(self.training)
virtual_train = new_test
virtual_train.t = virtual_t[n, :]
if virtual_train.Z is None:
train.add(virtual_train.X, virtual_train.t)
else:
train.add(virtual_train.X, virtual_train.t, virtual_train.Z)
try:
predictor.update(train, virtual_train)
except:
predictor.prepare(train)
f[n, :] = self.get_score(mode, predictor, train)
return f
def get_actions(self, mode, N, K, alpha):
f = self.get_score(mode, self.predictor, self.training, alpha)
temp = np.argmax(f)
action = self.actions[temp]
self.actions = self.delete_actions(temp)
chosed_actions = np.zeros(N, dtype=int)
chosed_actions[0] = action
for n in xrange(1, N):
f = self.get_marginal_score(mode, chosed_actions[0:n], K, alpha)
temp = np.argmax(np.mean(f, 0))
chosed_actions[n] = self.actions[temp]
self.actions = self.delete_actions(temp)
return chosed_actions
def get_random_action(self, N):
random_index = np.random.permutation(xrange(self.actions.shape[0]))
index = random_index[0:N]
action = self.actions[index]
self.actions = self.delete_actions(index)
return action
def load(self, file_history, file_training=None, file_predictor=None):
self.history.load(file_history)
if file_training is None:
N = self.history.total_num_search
X = self.test.X[self.history.chosed_actions[0:N], :]
t = self.history.fx[0:N]
self.training = variable(X=X, t=t)
else:
self.training = variable()
self.training.load(file_training)
if file_predictor is not None:
with open(file_predictor) as f:
self.predictor = pickle.load(f)
def export_predictor(self):
return self.predictor
def export_training(self):
return self.training
def export_history(self):
return self.history
def _set_predictor(self, predictor=None):
if predictor is None:
predictor = self.predictor
return predictor
def _init_predictor(self, is_rand_expans, predictor=None):
self.predictor = self._set_predictor(predictor)
if self.predictor is None:
if is_rand_expans:
self.predictor = blm_predictor(self.config)
else:
self.predictor = gp_predictor(self.config)
return self.predictor
def _set_training(self, training=None):
if training is None:
training = self.training
return training
def _set_unchosed_actions(self, actions=None):
if actions is None:
actions = self.actions
return actions
def _set_test(self, test_X):
if isinstance(test_X, np.ndarray):
test = variable(X=test_X)
elif isinstance(test_X, variable):
test = test_X
else:
raise TypeError('The type of test_X must \
take ndarray or combo.variable')
return test
def _set_config(self, config=None):
if config is None:
config = combo.misc.set_config()
return config
|
[
"numpy.mean",
"results.history",
"numpy.delete",
"numpy.argmax",
"numpy.zeros",
"numpy.random.seed",
"copy.deepcopy",
"cPickle.load",
"numpy.arange"
] |
[((559, 593), 'numpy.arange', 'np.arange', (['(0)', 'self.test.X.shape[0]'], {}), '(0, self.test.X.shape[0])\n', (568, 593), True, 'import numpy as np\n'), ((617, 626), 'results.history', 'history', ([], {}), '()\n', (624, 626), False, 'from results import history\n'), ((738, 763), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (752, 763), True, 'import numpy as np\n'), ((885, 910), 'numpy.delete', 'np.delete', (['actions', 'index'], {}), '(actions, index)\n', (894, 910), True, 'import numpy as np\n'), ((2236, 2263), 'copy.deepcopy', 'copy.deepcopy', (['self.history'], {}), '(self.history)\n', (2249, 2263), False, 'import copy\n'), ((4089, 4116), 'copy.deepcopy', 'copy.deepcopy', (['self.history'], {}), '(self.history)\n', (4102, 4116), False, 'import copy\n'), ((5744, 5756), 'numpy.argmax', 'np.argmax', (['f'], {}), '(f)\n', (5753, 5756), True, 'import numpy as np\n'), ((5868, 5890), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (5876, 5890), True, 'import numpy as np\n'), ((5040, 5069), 'copy.deepcopy', 'copy.deepcopy', (['self.predictor'], {}), '(self.predictor)\n', (5053, 5069), False, 'import copy\n'), ((5090, 5118), 'copy.deepcopy', 'copy.deepcopy', (['self.training'], {}), '(self.training)\n', (5103, 5118), False, 'import copy\n'), ((6064, 6077), 'numpy.mean', 'np.mean', (['f', '(0)'], {}), '(f, 0)\n', (6071, 6077), True, 'import numpy as np\n'), ((7032, 7046), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7043, 7046), True, 'import cPickle as pickle\n')]
|
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
font_location = './wordcloud_file/malgun.ttf' # For Windows
font_name = fm.FontProperties(fname=font_location).get_name()
plt.rc('font', family=font_name)
def percent_graph2(movie_review) :
b = movie_review
labelss = sorted(b['score'].unique())## 라벨설정함. 한글이 적용이 안됨!!!
c = b['score'].value_counts().sort_index() ## 빈도
print(c)
print(labelss)
fig = plt.figure(figsize=(8,8)) ## 캔버스 생성
fig.set_facecolor('white') ## 캔버스 배경색을 하얀색으로 설정
ax = fig.add_subplot() ## 프레임 생성
pie = ax.pie(c, ## 파이차트 출력
startangle=90, ## 시작점을 90도(degree)로 지정
counterclock=False, ## 시계 방향으로 그린다.
# autopct=lambda p : '{:.2f}%'.format(p), ## 퍼센티지 출력
wedgeprops=dict(width=0.5),
colors = ['yellowgreen', 'orange'],
labels = labelss,
textprops={'fontsize': 22}
)
total = np.sum(c) ## 빈도수 총합
sum_pct = 0 ## 백분율 초기값
for i, l in enumerate(labelss):
ang1, ang2 = pie[0][i].theta1, pie[0][i].theta2 ## 각1, 각2
r = pie[0][i].r ## 원의 반지름
x = ((r + 0.5) / 2) * np.cos(np.pi / 180 * ((ang1 + ang2) / 2)) ## 정중앙 x좌표
y = ((r + 0.5) / 2) * np.sin(np.pi / 180 * ((ang1 + ang2) / 2)) ## 정중앙 y좌표
if i < len(labelss) - 1:
sum_pct += float(f'{c[i] / total * 100:.2f}') ## 백분율을 누적한다.
ax.text(x, y, f'{c[i] / total * 100:.2f}%', ha='center', va='center', size=22, color='white',
weight='bold') ## 백분율 텍스트 표시
else: ## 총합을 100으로 맞추기위해 마지막 백분율은 100에서 백분율 누적값을 빼준다.
ax.text(x, y, f'{100 - sum_pct:.2f}%', ha='center', va='center',size=22,color='white',
weight='bold')
# pie.rc('font', family=font_name)
# plt.legend(pie[0], labelss) ## 범례 표시
plt.savefig('./static/images/pos_neg_ratio.png') # 경로
|
[
"matplotlib.pyplot.savefig",
"matplotlib.font_manager.FontProperties",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.rc"
] |
[((211, 243), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': 'font_name'}), "('font', family=font_name)\n", (217, 243), True, 'import matplotlib.pyplot as plt\n'), ((461, 487), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (471, 487), True, 'import matplotlib.pyplot as plt\n'), ((933, 942), 'numpy.sum', 'np.sum', (['c'], {}), '(c)\n', (939, 942), True, 'import numpy as np\n'), ((1844, 1892), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./static/images/pos_neg_ratio.png"""'], {}), "('./static/images/pos_neg_ratio.png')\n", (1855, 1892), True, 'import matplotlib.pyplot as plt\n'), ((161, 199), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'fname': 'font_location'}), '(fname=font_location)\n', (178, 199), True, 'import matplotlib.font_manager as fm\n'), ((1151, 1192), 'numpy.cos', 'np.cos', (['(np.pi / 180 * ((ang1 + ang2) / 2))'], {}), '(np.pi / 180 * ((ang1 + ang2) / 2))\n', (1157, 1192), True, 'import numpy as np\n'), ((1235, 1276), 'numpy.sin', 'np.sin', (['(np.pi / 180 * ((ang1 + ang2) / 2))'], {}), '(np.pi / 180 * ((ang1 + ang2) / 2))\n', (1241, 1276), True, 'import numpy as np\n')]
|
import numpy as np
import pickle
from os.path import exists, realpath
import sys
import math
from topple_data_loader import ToppleData, ToppleDataLoader
import transforms3d
class ToppleNormalizationInfo():
'''
Structure to hold all the normalization information for a dataset.
'''
def __init__(self):
# max element of any linear vel vector
self.max_lin_vel = None
# max element of any angular vel vector
self.max_ang_vel = None
# max distance between positions in two contiguous timesteps
self.max_pos = None
# max change in rotation around any axis between two contiguous timesteps (for euler rot)
self.max_rot = None
# max angle of rotation between two steps for axis-angle representation
self.max_delta_rot = None
# max 2-norm of applied impulse vector
self.force_vec_max = None
# max 2-norm of a point in an object point cloud (used for point cloud and force pos)
self.pc_max = None
# normalization values for shape-related stuff
self.density_offset = None
self.density_max = None
self.mass_offset = None
self.mass_max = None
self.inertia_offset = None
self.inertia_max = None
self.friction_offset = None
self.friction_max = None
def print_out(self):
print({'max_lin_vel' : self.max_lin_vel, 'max_ang_vel' : self.max_ang_vel, 'max_pos' : self.max_pos, \
'max_rot' : self.max_rot, 'max_delta_rot' : self.max_delta_rot, 'force_vec_max' : self.force_vec_max, 'pc_max' : self.pc_max, \
'density_off' : self.density_offset, 'density_max' : self.density_max, 'mass_off' : self.mass_offset, \
'mass_max' : self.mass_max, 'inertia_off' : self.inertia_offset, 'inertia_max' : self.inertia_max, \
'friction_off' : self.friction_offset, 'friction_max' : self.friction_max
})
def save(self, pkl_file):
''' Saves normalization info object to a specified .pkl file. '''
with open(pkl_file, 'wb') as f:
pickle.dump(self, f)
def load_from(self, pkl_file):
''' Load normalization info into this object from a specified .pkl file. '''
with open(pkl_file, 'rb') as f:
norm_info = pickle.load(f)
self.copy_from(norm_info)
def copy_from(self, norm_info):
'''
Takes values from the given normalization info object and copies them to this one
'''
self.max_lin_vel = norm_info.max_lin_vel
self.max_ang_vel = norm_info.max_ang_vel
self.max_pos = norm_info.max_pos
self.max_rot = norm_info.max_rot
try:
self.max_delta_rot = norm_info.max_delta_rot
except:
# old versions of data doesn't have max delta rot
pass
self.force_vec_max = norm_info.force_vec_max
self.pc_max = norm_info.pc_max
self.density_offset = norm_info.density_offset
self.density_max = norm_info.density_max
self.mass_offset = norm_info.mass_offset
self.mass_max = norm_info.mass_max
self.inertia_offset = norm_info.inertia_offset
self.inertia_max = norm_info.inertia_max
try:
self.friction_offset = norm_info.friction_offset
self.friction_max = norm_info.friction_max
except:
# old version doesn't have this
pass
class ToppleBatch(object):
'''
Structure to hold a single batch of data.
'''
def __init__(self, size, seq_len, num_pts):
self.size = size
self.num_steps = seq_len
self.num_pts = num_pts
self.point_cloud = np.zeros((self.size, self.num_pts, 3))
self.lin_vel = np.zeros((self.size, self.num_steps, 3))
self.ang_vel = np.zeros((self.size, self.num_steps, 3))
self.pos = np.zeros((self.size, self.num_steps, 3))
# cummulative euler angles
self.rot = np.zeros((self.size, self.num_steps, 3))
# change in rotation in quaternion rep (w, x, y, z)
self.delta_quat = np.zeros((self.size, self.num_steps, 4))
# change in rotation between steps in axis-angle rep (scaled 3 vec)
self.delta_rot = np.zeros((self.size, self.num_steps, 3))
# change in rotation between steps in split axis-angle rep (4-vec)
self.delta_rot_split = np.zeros((self.size, self.num_steps, 4))
# 0 if before topple idx, 1 if after
self.topple_label = np.zeros((self.size, self.num_steps), dtype=int)
# other meta-data not directly used in network
self.toppled = []
self.shape_name = []
self.body_friction = np.zeros((self.size))
self.mass = np.zeros((self.size))
self.scale = np.zeros((self.size, 3))
self.rot_euler = np.zeros((self.size, self.num_steps, 3))
class ToppleDataset(object):
'''
Loads toppling data and provides batches for training and model evaluation.
'''
def __init__(self, roots, norm_info_file, batch_size=32, num_steps=15, shuffle=False, num_pts=None, perturb_pts=0.0):
'''
- roots : list of directories containing data to load for this dataset
- norm_info_file : .pkl file containing normalization information
- batch_size : number of sequences to return in each batch
- num_steps : number of timesteps to return in each sequence
- shuffle : randomly shuffles the returned sequence ordering
- num_pts : the number of points to use in the returned point cloud. If None uses all points in the data.
- perturb_pts : the stdev to randomly perturb point clouds with. If None no perturbation is performed.
-
'''
# settings
self.batch_size = batch_size
self.steps_per_seq = num_steps
self.shuffle = shuffle
self.perturb_std = perturb_pts
self.num_pts = num_pts
# load in data
for root in roots:
if not exists(root):
print('Could not find dataset at ' + root)
return
data_loader = ToppleDataLoader()
self.data = data_loader.load_data(roots)
if num_pts is None:
# use all the points in the point cloud
self.num_pts = self.data.point_cloud.shape[1]
# load in normalization info
if not exists(norm_info_file):
print('Could not find normalization info at ' + norm_info_file)
return
self.norm_info = ToppleNormalizationInfo()
self.norm_info.load_from(norm_info_file)
print('Loaded normalization info!')
# see if we have axis-angle info (for backwards compat)
self.use_aa = False
self.use_aa_split = False
self.use_topple_idx = False
self.use_delta_quat = False
if len(self.data.delta_rot) > 0:
self.use_aa = True
if len(self.data.delta_rot_split) > 0:
self.use_aa_split = True
if len(self.data.topple_idx) > 0:
self.use_topple_idx = True
if len(self.data.body_friction) > 0:
self.use_body_friction = True
if len(self.data.delta_quat) > 0:
self.use_delta_quat = True
# normalize the data
print('Normalizing data...')
self.normalize_data(self.data, self.norm_info)
print('Finished normalizing!')
# order to iterate through data when returning batches (in order by default)
self.iter_inds = range(0, self.data.size)
# prepare to iterate through
self.reset()
def normalize_data(self, data, norm_info):
'''
Normalizes (in place) the given ToppleData using the ToppleNormalizationInfo.
'''
# point clouds -> [-1, 1]
data.point_cloud /= norm_info.pc_max
# force pos -> [-1, 1]
data.force_pos /= norm_info.pc_max
# force vec -> [-1, 1]
data.force_vec /= norm_info.force_vec_max
# density -> [0, 1]
data.density = (data.density - norm_info.density_offset) / norm_info.density_max
# mass -> [0, 1]
data.mass = (data.mass - norm_info.mass_offset) / norm_info.mass_max
# inertia -> [0, 1]
data.inertia = (data.inertia - norm_info.inertia_offset) / norm_info.inertia_max
# friction -> [0, 1]
if norm_info.friction_offset is not None:
data.body_friction = (data.body_friction - norm_info.friction_offset) / norm_info.friction_max
# now time sequence data
# velocities -> [-1, 1]
for i, lin_vel_steps in enumerate(data.lin_vel):
data.lin_vel[i] = [(x / norm_info.max_lin_vel) for x in lin_vel_steps]
for i, ang_vel_steps in enumerate(data.ang_vel):
data.ang_vel[i] = [(x / norm_info.max_ang_vel) for x in ang_vel_steps]
# delta position -> [-1, 1]
for i, pos_steps in enumerate(data.pos):
data.pos[i] = [(x / norm_info.max_pos) for x in pos_steps]
# delta rotation -> [-1, 1]
for i, rot_steps in enumerate(data.total_rot):
data.total_rot[i] = [(x / norm_info.max_rot) for x in rot_steps]
# delta rot axis-angle -> [-1, 1] norm
if self.use_aa:
for i, delta_rot_steps in enumerate(data.delta_rot):
data.delta_rot[i] = [(x / norm_info.max_delta_rot) for x in delta_rot_steps]
# make axes unit and and normalize angle -> [-1, 1]
if self.use_aa_split:
for i, delta_rot_split_steps in enumerate(data.delta_rot_split):
data.delta_rot_split[i] = [np.append(x[:3] / np.linalg.norm(x[:3]), x[3] / norm_info.max_delta_rot) for x in delta_rot_split_steps]
def reset(self):
'''
Prepares to iterate through dataset.
'''
if self.shuffle:
np.random.shuffle(self.iter_inds)
# we consider an epoch as returning one sequence from every single simulation
# ( though if the sequence length is shorter than sim length the unique sequences contained
# in the dataset will be much more than an epoch length )
self.num_batches = (self.data.size + self.batch_size - 1) // self.batch_size
self.batch_idx = 0
def has_next_batch(self):
'''
Returns false if done with the current "epoch" (seen each sim once).
'''
return self.batch_idx < self.num_batches
def next_batch(self, random_window=True, focus_toppling=False):
'''
Returns the next batch of data. if random_window=True will get a random sequence of correct length (otherwise
starts at 0). If focus_toppling=True, will make sure this sequence includes the part of the sequence where toppling occurs.
'''
# size is either batch_size, or shorter if we're at the end of the data
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx + 1) * self.batch_size, self.data.size)
batch_size = end_idx - start_idx
# get batch data
batch = ToppleBatch(self.batch_size, self.steps_per_seq, self.num_pts)
for i in range(batch_size):
pc, lin_vel, ang_vel, pos, rot, delta_quat, delta_rot, delta_rot_split, topple_label, meta_info = \
self.get_seq(self.iter_inds[start_idx + i], self.steps_per_seq, random_window, focus_toppling)
batch.point_cloud[i] = pc
batch.lin_vel[i] = lin_vel
batch.ang_vel[i] = ang_vel
batch.pos[i] = pos
batch.rot[i] = rot
if self.use_delta_quat:
batch.delta_quat[i] = delta_quat
if self.use_aa:
batch.delta_rot[i] = delta_rot
if self.use_aa_split:
batch.delta_rot_split[i] = delta_rot_split
if self.use_topple_idx:
batch.topple_label[i] = topple_label
batch.toppled.append(meta_info[0])
batch.shape_name.append(meta_info[1])
batch.scale[i] = meta_info[2]
batch.rot_euler[i] = meta_info[3]
if self.use_body_friction:
batch.body_friction[i] = meta_info[4]
batch.mass[i] = meta_info[5]
if batch_size != self.batch_size:
# need to pad the end with repeat of data
for i in range(self.batch_size - batch_size):
batch.point_cloud[batch_size + i] = batch.point_cloud[i]
batch.lin_vel[batch_size + i] = batch.lin_vel[i]
batch.ang_vel[batch_size + i] = batch.ang_vel[i]
batch.pos[batch_size + i] = batch.pos[i]
batch.rot[batch_size + i] = batch.rot[i]
if self.use_delta_quat:
batch.delta_quat[batch_size + i] = batch.delta_quat[i]
batch.toppled.append(batch.toppled[i])
batch.shape_name.append(batch.shape_name[i])
batch.scale[batch_size + i] = batch.scale[i]
batch.rot_euler[batch_size + i] = batch.rot_euler[i]
batch.mass[batch_size + i] = batch.mass[i]
if self.use_aa:
batch.delta_rot[batch_size + i] = batch.delta_rot[i]
if self.use_aa_split:
batch.delta_rot_split[batch_size + i] = batch.delta_rot_split[i]
if self.use_topple_idx:
batch.topple_label[batch_size + i] = batch.topple_label[i]
if self.use_body_friction:
batch.body_friction[batch_size + i] = batch.body_friction[i]
self.batch_idx += 1
return batch
def get_seq(self, idx, num_steps, random_window=True, focus_toppling=False):
'''
Returns a random contiguous sequence from the simulation at the given idx and length num_steps.
If num_steps > sim_length the final (sim_length-num_steps) steps are padded with the value at
sim[sim_length].
'''
# get the normalized canonical point cloud for this simulation
pc = np.copy(self.data.point_cloud[self.data.shape_idx[idx]])
scale = self.data.scale[idx]
# scale accordingly
pc *= np.reshape(scale, (1, -1))
# randomly perturb point cloud
pc += np.random.normal(0.0, self.perturb_std, pc.shape)
# randomly draw a subset of points if desired
if self.num_pts < pc.shape[0]:
pc_inds = np.random.choice(pc.shape[0], self.num_pts, replace=False)
pc = pc[pc_inds, :]
# randomly choose a size num_steps sequence from the simulation to return time-series data
total_steps = len(self.data.lin_vel[idx])
max_start_step = total_steps - num_steps
start_step = 0
if max_start_step < 0:
# simulation is shorter than desired sequence length
pad_len = abs(max_start_step)
lin_vel_list = self.data.lin_vel[idx]
lin_vel_out = np.array(lin_vel_list + [lin_vel_list[-1]]*pad_len)
ang_vel_list = self.data.ang_vel[idx]
ang_vel_out = np.array(ang_vel_list + [ang_vel_list[-1]]*pad_len)
pos_list = self.data.pos[idx]
pos_out = np.array(pos_list + [pos_list[-1]]*pad_len)
rot_list = self.data.total_rot[idx]
rot_out = np.array(rot_list + [rot_list[-1]]*pad_len)
if self.use_delta_quat:
delta_quat_list = self.data.delta_quat[idx]
delta_quat_out = np.array(delta_quat_list + [delta_quat_list[-1]]*pad_len)
euler_rot_list = self.data.rot_euler[idx]
euler_rot_out = np.array(euler_rot_list + [euler_rot_list[-1]]*pad_len)
if self.use_aa:
delta_rot_list = self.data.delta_rot[idx]
delta_rot_out = np.array(delta_rot_list + [delta_rot_list[-1]]*pad_len)
if self.use_aa_split:
delta_rot_split_list = self.data.delta_rot_split[idx]
delta_rot_split_out = np.array(delta_rot_split_list + [delta_rot_split_list[-1]]*pad_len)
if self.use_topple_idx:
topple_label_out = np.zeros((total_steps + pad_len), dtype=int)
seq_topple_idx = self.data.topple_idx[idx]
if seq_topple_idx > 0:
topple_label_out[seq_topple_idx:] = 1
else:
start_step = 0
if random_window:
if focus_toppling and self.data.toppled[idx]:
# choose window around the index where it topples
topple_idx = self.data.topple_idx[idx]
min_idx = max([topple_idx - num_steps + 1, 0])
if min_idx >= max_start_step:
# just pick the max index
start_step = max_start_step
else:
# our window is guaranteed to see some part of toppling
start_step = np.random.randint(min_idx, max_start_step+1)
else:
start_step = np.random.randint(0, max_start_step+1)
end_step = start_step + num_steps
# print('Range: %d, %d' % (start_step, end_step))
lin_vel_out = np.array(self.data.lin_vel[idx][start_step:end_step])
ang_vel_out = np.array(self.data.ang_vel[idx][start_step:end_step])
pos_out = np.array(self.data.pos[idx][start_step:end_step])
rot_out = np.array(self.data.total_rot[idx][start_step:end_step])
if self.use_delta_quat:
delta_quat_out = np.array(self.data.delta_quat[idx][start_step:end_step])
euler_rot_out = np.array(self.data.rot_euler[idx][start_step:end_step])
if self.use_aa:
delta_rot_out = np.array(self.data.delta_rot[idx][start_step:end_step])
if self.use_aa_split:
delta_rot_split_out = np.array(self.data.delta_rot_split[idx][start_step:end_step])
if self.use_topple_idx:
topple_label_out = np.zeros((num_steps), dtype=int)
seq_topple_idx = self.data.topple_idx[idx]
if seq_topple_idx > 0:
if seq_topple_idx <= start_step:
topple_label_out[:] = 1
elif seq_topple_idx < end_step:
topple_label_out[seq_topple_idx-start_step:] = 1
# rotate point cloud to align with first frame of sequence
init_rot = self.data.rot_euler[idx][start_step]
xrot, yrot, zrot = np.radians(init_rot)
R = transforms3d.euler.euler2mat(zrot, xrot, yrot, axes='szxy') # unity applies euler angles in z, x, y ordering
pc = np.dot(pc, R.T)
toppled = self.data.toppled[idx]
shape_name = self.data.shape_name[idx]
mass = self.data.mass[idx]
body_fric = -1.0
if self.use_body_friction:
body_fric = self.data.body_friction[idx]
meta_info = (toppled, shape_name, scale, euler_rot_out, body_fric, mass)
if not self.use_aa:
delta_rot_out = None
if not self.use_aa_split:
delta_rot_split_out = None
if not self.use_topple_idx:
topple_label_out = None
if not self.use_delta_quat:
delta_quat_out = None
return pc, lin_vel_out, ang_vel_out, pos_out, rot_out, delta_quat_out, delta_rot_out, delta_rot_split_out, topple_label_out, meta_info
def get_norm_info(self):
return self.norm_info
if __name__=='__main__':
# norm_info = ToppleNormalizationInfo()
# norm_info.load_from('../../data/sim/normalization_info/cube_train.pkl')
# norm_info.print_out()
topple_data = ToppleDataset(roots=['./data/sim/Cube/Cube30k_ObjSplit/Cube30kVal'], norm_info_file='./data/sim/normalization_info/cube_30k.pkl', \
batch_size=5, num_steps=10, shuffle=True, num_pts=None, perturb_pts=0.01)
count = 0
while topple_data.has_next_batch():
batch = topple_data.next_batch(random_window=True, focus_toppling=False)
count += 1
# print(batch.lin_vel[0])
# print(batch.toppled[0])
# print(batch.delta_rot_split[0])
# print(batch.delta_rot[0])
# print(batch.topple_label[0])
# print(batch.pos)
# print(batch.body_friction)
# print(batch.delta_quat[0])
# print(np.degrees(2*np.arccos(batch.delta_quat[0, :, 0])))
print('Total num batches: ' + str(count))
topple_data.reset()
count = 0
while topple_data.has_next_batch():
batch = topple_data.next_batch()
count += 1
print(batch.size)
print('Total num batches: ' + str(count))
|
[
"numpy.random.normal",
"numpy.copy",
"numpy.radians",
"os.path.exists",
"numpy.reshape",
"pickle.dump",
"numpy.random.shuffle",
"transforms3d.euler.euler2mat",
"numpy.random.choice",
"pickle.load",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.random.randint",
"numpy.linalg.norm",
"topple_data_loader.ToppleDataLoader"
] |
[((3728, 3766), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_pts, 3)'], {}), '((self.size, self.num_pts, 3))\n', (3736, 3766), True, 'import numpy as np\n'), ((3790, 3830), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 3)'], {}), '((self.size, self.num_steps, 3))\n', (3798, 3830), True, 'import numpy as np\n'), ((3854, 3894), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 3)'], {}), '((self.size, self.num_steps, 3))\n', (3862, 3894), True, 'import numpy as np\n'), ((3914, 3954), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 3)'], {}), '((self.size, self.num_steps, 3))\n', (3922, 3954), True, 'import numpy as np\n'), ((4009, 4049), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 3)'], {}), '((self.size, self.num_steps, 3))\n', (4017, 4049), True, 'import numpy as np\n'), ((4136, 4176), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 4)'], {}), '((self.size, self.num_steps, 4))\n', (4144, 4176), True, 'import numpy as np\n'), ((4278, 4318), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 3)'], {}), '((self.size, self.num_steps, 3))\n', (4286, 4318), True, 'import numpy as np\n'), ((4425, 4465), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 4)'], {}), '((self.size, self.num_steps, 4))\n', (4433, 4465), True, 'import numpy as np\n'), ((4540, 4588), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps)'], {'dtype': 'int'}), '((self.size, self.num_steps), dtype=int)\n', (4548, 4588), True, 'import numpy as np\n'), ((4729, 4748), 'numpy.zeros', 'np.zeros', (['self.size'], {}), '(self.size)\n', (4737, 4748), True, 'import numpy as np\n'), ((4771, 4790), 'numpy.zeros', 'np.zeros', (['self.size'], {}), '(self.size)\n', (4779, 4790), True, 'import numpy as np\n'), ((4814, 4838), 'numpy.zeros', 'np.zeros', (['(self.size, 3)'], {}), '((self.size, 3))\n', (4822, 4838), True, 'import numpy as np\n'), ((4864, 4904), 'numpy.zeros', 'np.zeros', (['(self.size, self.num_steps, 3)'], {}), '((self.size, self.num_steps, 3))\n', (4872, 4904), True, 'import numpy as np\n'), ((6156, 6174), 'topple_data_loader.ToppleDataLoader', 'ToppleDataLoader', ([], {}), '()\n', (6172, 6174), False, 'from topple_data_loader import ToppleData, ToppleDataLoader\n'), ((14138, 14194), 'numpy.copy', 'np.copy', (['self.data.point_cloud[self.data.shape_idx[idx]]'], {}), '(self.data.point_cloud[self.data.shape_idx[idx]])\n', (14145, 14194), True, 'import numpy as np\n'), ((14274, 14300), 'numpy.reshape', 'np.reshape', (['scale', '(1, -1)'], {}), '(scale, (1, -1))\n', (14284, 14300), True, 'import numpy as np\n'), ((14354, 14403), 'numpy.random.normal', 'np.random.normal', (['(0.0)', 'self.perturb_std', 'pc.shape'], {}), '(0.0, self.perturb_std, pc.shape)\n', (14370, 14403), True, 'import numpy as np\n'), ((18650, 18670), 'numpy.radians', 'np.radians', (['init_rot'], {}), '(init_rot)\n', (18660, 18670), True, 'import numpy as np\n'), ((18683, 18742), 'transforms3d.euler.euler2mat', 'transforms3d.euler.euler2mat', (['zrot', 'xrot', 'yrot'], {'axes': '"""szxy"""'}), "(zrot, xrot, yrot, axes='szxy')\n", (18711, 18742), False, 'import transforms3d\n'), ((18805, 18820), 'numpy.dot', 'np.dot', (['pc', 'R.T'], {}), '(pc, R.T)\n', (18811, 18820), True, 'import numpy as np\n'), ((2119, 2139), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (2130, 2139), False, 'import pickle\n'), ((2325, 2339), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2336, 2339), False, 'import pickle\n'), ((6416, 6438), 'os.path.exists', 'exists', (['norm_info_file'], {}), '(norm_info_file)\n', (6422, 6438), False, 'from os.path import exists, realpath\n'), ((9912, 9945), 'numpy.random.shuffle', 'np.random.shuffle', (['self.iter_inds'], {}), '(self.iter_inds)\n', (9929, 9945), True, 'import numpy as np\n'), ((14520, 14578), 'numpy.random.choice', 'np.random.choice', (['pc.shape[0]', 'self.num_pts'], {'replace': '(False)'}), '(pc.shape[0], self.num_pts, replace=False)\n', (14536, 14578), True, 'import numpy as np\n'), ((15047, 15100), 'numpy.array', 'np.array', (['(lin_vel_list + [lin_vel_list[-1]] * pad_len)'], {}), '(lin_vel_list + [lin_vel_list[-1]] * pad_len)\n', (15055, 15100), True, 'import numpy as np\n'), ((15175, 15228), 'numpy.array', 'np.array', (['(ang_vel_list + [ang_vel_list[-1]] * pad_len)'], {}), '(ang_vel_list + [ang_vel_list[-1]] * pad_len)\n', (15183, 15228), True, 'import numpy as np\n'), ((15291, 15336), 'numpy.array', 'np.array', (['(pos_list + [pos_list[-1]] * pad_len)'], {}), '(pos_list + [pos_list[-1]] * pad_len)\n', (15299, 15336), True, 'import numpy as np\n'), ((15405, 15450), 'numpy.array', 'np.array', (['(rot_list + [rot_list[-1]] * pad_len)'], {}), '(rot_list + [rot_list[-1]] * pad_len)\n', (15413, 15450), True, 'import numpy as np\n'), ((15718, 15775), 'numpy.array', 'np.array', (['(euler_rot_list + [euler_rot_list[-1]] * pad_len)'], {}), '(euler_rot_list + [euler_rot_list[-1]] * pad_len)\n', (15726, 15775), True, 'import numpy as np\n'), ((17327, 17380), 'numpy.array', 'np.array', (['self.data.lin_vel[idx][start_step:end_step]'], {}), '(self.data.lin_vel[idx][start_step:end_step])\n', (17335, 17380), True, 'import numpy as np\n'), ((17407, 17460), 'numpy.array', 'np.array', (['self.data.ang_vel[idx][start_step:end_step]'], {}), '(self.data.ang_vel[idx][start_step:end_step])\n', (17415, 17460), True, 'import numpy as np\n'), ((17483, 17532), 'numpy.array', 'np.array', (['self.data.pos[idx][start_step:end_step]'], {}), '(self.data.pos[idx][start_step:end_step])\n', (17491, 17532), True, 'import numpy as np\n'), ((17555, 17610), 'numpy.array', 'np.array', (['self.data.total_rot[idx][start_step:end_step]'], {}), '(self.data.total_rot[idx][start_step:end_step])\n', (17563, 17610), True, 'import numpy as np\n'), ((17765, 17820), 'numpy.array', 'np.array', (['self.data.rot_euler[idx][start_step:end_step]'], {}), '(self.data.rot_euler[idx][start_step:end_step])\n', (17773, 17820), True, 'import numpy as np\n'), ((6038, 6050), 'os.path.exists', 'exists', (['root'], {}), '(root)\n', (6044, 6050), False, 'from os.path import exists, realpath\n'), ((15578, 15637), 'numpy.array', 'np.array', (['(delta_quat_list + [delta_quat_list[-1]] * pad_len)'], {}), '(delta_quat_list + [delta_quat_list[-1]] * pad_len)\n', (15586, 15637), True, 'import numpy as np\n'), ((15892, 15949), 'numpy.array', 'np.array', (['(delta_rot_list + [delta_rot_list[-1]] * pad_len)'], {}), '(delta_rot_list + [delta_rot_list[-1]] * pad_len)\n', (15900, 15949), True, 'import numpy as np\n'), ((16090, 16159), 'numpy.array', 'np.array', (['(delta_rot_split_list + [delta_rot_split_list[-1]] * pad_len)'], {}), '(delta_rot_split_list + [delta_rot_split_list[-1]] * pad_len)\n', (16098, 16159), True, 'import numpy as np\n'), ((16229, 16271), 'numpy.zeros', 'np.zeros', (['(total_steps + pad_len)'], {'dtype': 'int'}), '(total_steps + pad_len, dtype=int)\n', (16237, 16271), True, 'import numpy as np\n'), ((17680, 17736), 'numpy.array', 'np.array', (['self.data.delta_quat[idx][start_step:end_step]'], {}), '(self.data.delta_quat[idx][start_step:end_step])\n', (17688, 17736), True, 'import numpy as np\n'), ((17881, 17936), 'numpy.array', 'np.array', (['self.data.delta_rot[idx][start_step:end_step]'], {}), '(self.data.delta_rot[idx][start_step:end_step])\n', (17889, 17936), True, 'import numpy as np\n'), ((18009, 18070), 'numpy.array', 'np.array', (['self.data.delta_rot_split[idx][start_step:end_step]'], {}), '(self.data.delta_rot_split[idx][start_step:end_step])\n', (18017, 18070), True, 'import numpy as np\n'), ((18142, 18172), 'numpy.zeros', 'np.zeros', (['num_steps'], {'dtype': 'int'}), '(num_steps, dtype=int)\n', (18150, 18172), True, 'import numpy as np\n'), ((17154, 17194), 'numpy.random.randint', 'np.random.randint', (['(0)', '(max_start_step + 1)'], {}), '(0, max_start_step + 1)\n', (17171, 17194), True, 'import numpy as np\n'), ((17054, 17100), 'numpy.random.randint', 'np.random.randint', (['min_idx', '(max_start_step + 1)'], {}), '(min_idx, max_start_step + 1)\n', (17071, 17100), True, 'import numpy as np\n'), ((9681, 9702), 'numpy.linalg.norm', 'np.linalg.norm', (['x[:3]'], {}), '(x[:3])\n', (9695, 9702), True, 'import numpy as np\n')]
|
import numpy as np
from operator import truediv
def AA_andEachClassAccuracy(confusion_matrix):
counter = confusion_matrix.shape[0]
list_diag = np.diag(confusion_matrix)
list_raw_sum = np.sum(confusion_matrix, axis=1)
each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))
average_acc = np.mean(each_acc)
return each_acc, average_acc
|
[
"operator.truediv",
"numpy.sum",
"numpy.mean",
"numpy.diag"
] |
[((157, 182), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (164, 182), True, 'import numpy as np\n'), ((203, 235), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(1)'}), '(confusion_matrix, axis=1)\n', (209, 235), True, 'import numpy as np\n'), ((319, 336), 'numpy.mean', 'np.mean', (['each_acc'], {}), '(each_acc)\n', (326, 336), True, 'import numpy as np\n'), ((266, 298), 'operator.truediv', 'truediv', (['list_diag', 'list_raw_sum'], {}), '(list_diag, list_raw_sum)\n', (273, 298), False, 'from operator import truediv\n')]
|
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import BoundaryNorm
def plot_images(
num_sample_perclass=10, x=None, y=None, labels=None, title=None, cmap=None
):
grid_x = num_sample_perclass + 1
grid_y = len(labels)
plt.figure(figsize=(grid_y, grid_x))
gs1 = gridspec.GridSpec(grid_y, grid_x)
gs1.update(wspace=0.025, hspace=0.05)
font = {"family": "serif", "weight": "bold"}
plt.suptitle(title)
j = 0
for i in range(grid_y):
idxs = [0] + list(np.where(y == list(labels.keys())[i])[0][: grid_x - 1])
label = labels[list(labels.keys())[i]]
for k, idx in enumerate(idxs):
ax1 = plt.subplot(gs1[j])
if k == 0:
ax1.text(0, 0.25, label, ha="right", wrap=True, fontdict=font)
else:
ax1.imshow(x[idx, ...], cmap=cmap)
plt.axis("off")
j += 1
plt.show()
def plot_2D(x, y, title, axis="off"):
BLUE, ORANGE = "#57B5E8", "#E69E00"
plt.figure(figsize=(8, 8))
plt.scatter(
x[:, 0],
x[:, 1],
s=18,
facecolors="none",
edgecolors=np.array([BLUE, ORANGE])[y],
)
if axis == "off":
plt.axis("off")
elif axis == "on":
plt.xlabel("x_1")
plt.ylabel("x_2")
else:
print("incorrect values for arg: axis (on or off only)")
sys.exit()
plt.title(title)
plt.show()
def plot_dna(df, label):
matrix = df.values
col_names = df.columns
rows = np.arange(matrix.shape[0])
cols = np.arange(matrix.shape[1])
np.random.seed(3)
np.random.shuffle(rows)
np.random.shuffle(cols)
matrix = matrix[:, cols[:100]].T
matrix = matrix[:, rows]
col_names = col_names[cols[:100]]
label = label[rows]
mat_min = np.min(matrix)
mat_max = np.max(matrix)
mat_min = -np.max([np.abs(mat_min), mat_max])
mat_max = np.max([np.abs(mat_min), mat_max])
matrix = np.ma.masked_where(np.abs(matrix) <= 0.3, matrix)
plt.figure(figsize=(6, 12))
cmap_list = ["red", "darkred", "green", "lime", "lightgreen"]
cmap = LinearSegmentedColormap.from_list("Custom cmap", cmap_list, len(cmap_list))
cmap.set_bad("black")
bounds = np.linspace(
mat_min + 6, mat_max - 6, 5
) # np.arange(mat_min + 6, mat_max - 6, 0.1)
idx = np.searchsorted(bounds, 0)
bounds = np.insert(bounds, idx, 0)
norm = BoundaryNorm(bounds, cmap.N)
plt.imshow(matrix, cmap=cmap, norm=norm)
plt.xticks(np.arange(len(label)))
plt.yticks(np.arange(len(col_names)))
ax = plt.gca()
ax.set_xticklabels(label, rotation=90)
ax.set_yticklabels(col_names)
ax.yaxis.tick_right()
ax.tick_params(axis=u"both", which=u"both", labelsize=5, length=0.0)
plt.tight_layout()
fig = plt.gcf()
# fig.set_size_inches((6, 12), forward=False)
# fig.savefig("img/dna.png", dpi=200)
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"numpy.array",
"sys.exit",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.searchsorted",
"matplotlib.pyplot.xlabel",
"numpy.max",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.random.seed",
"numpy.min",
"matplotlib.pyplot.axis",
"numpy.abs",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"numpy.insert",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.colors.BoundaryNorm",
"matplotlib.pyplot.subplot",
"numpy.random.shuffle"
] |
[((366, 402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(grid_y, grid_x)'}), '(figsize=(grid_y, grid_x))\n', (376, 402), True, 'import matplotlib.pyplot as plt\n'), ((413, 446), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['grid_y', 'grid_x'], {}), '(grid_y, grid_x)\n', (430, 446), True, 'import matplotlib.gridspec as gridspec\n'), ((544, 563), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (556, 563), True, 'import matplotlib.pyplot as plt\n'), ((1035, 1045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1043, 1045), True, 'import matplotlib.pyplot as plt\n'), ((1130, 1156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1140, 1156), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1539), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1532, 1539), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1552, 1554), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1669), 'numpy.arange', 'np.arange', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (1652, 1669), True, 'import numpy as np\n'), ((1681, 1707), 'numpy.arange', 'np.arange', (['matrix.shape[1]'], {}), '(matrix.shape[1])\n', (1690, 1707), True, 'import numpy as np\n'), ((1712, 1729), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (1726, 1729), True, 'import numpy as np\n'), ((1734, 1757), 'numpy.random.shuffle', 'np.random.shuffle', (['rows'], {}), '(rows)\n', (1751, 1757), True, 'import numpy as np\n'), ((1762, 1785), 'numpy.random.shuffle', 'np.random.shuffle', (['cols'], {}), '(cols)\n', (1779, 1785), True, 'import numpy as np\n'), ((1929, 1943), 'numpy.min', 'np.min', (['matrix'], {}), '(matrix)\n', (1935, 1943), True, 'import numpy as np\n'), ((1958, 1972), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (1964, 1972), True, 'import numpy as np\n'), ((2140, 2167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 12)'}), '(figsize=(6, 12))\n', (2150, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2401), 'numpy.linspace', 'np.linspace', (['(mat_min + 6)', '(mat_max - 6)', '(5)'], {}), '(mat_min + 6, mat_max - 6, 5)\n', (2372, 2401), True, 'import numpy as np\n'), ((2470, 2496), 'numpy.searchsorted', 'np.searchsorted', (['bounds', '(0)'], {}), '(bounds, 0)\n', (2485, 2496), True, 'import numpy as np\n'), ((2511, 2536), 'numpy.insert', 'np.insert', (['bounds', 'idx', '(0)'], {}), '(bounds, idx, 0)\n', (2520, 2536), True, 'import numpy as np\n'), ((2548, 2576), 'matplotlib.colors.BoundaryNorm', 'BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (2560, 2576), False, 'from matplotlib.colors import BoundaryNorm\n'), ((2582, 2622), 'matplotlib.pyplot.imshow', 'plt.imshow', (['matrix'], {'cmap': 'cmap', 'norm': 'norm'}), '(matrix, cmap=cmap, norm=norm)\n', (2592, 2622), True, 'import matplotlib.pyplot as plt\n'), ((2712, 2721), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2719, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2920), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2918, 2920), True, 'import matplotlib.pyplot as plt\n'), ((2931, 2940), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2938, 2940), True, 'import matplotlib.pyplot as plt\n'), ((3037, 3047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3045, 3047), True, 'import matplotlib.pyplot as plt\n'), ((1333, 1348), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1341, 1348), True, 'import matplotlib.pyplot as plt\n'), ((789, 808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[j]'], {}), '(gs1[j])\n', (800, 808), True, 'import matplotlib.pyplot as plt\n'), ((995, 1010), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1003, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1380, 1397), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x_1"""'], {}), "('x_1')\n", (1390, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1423), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x_2"""'], {}), "('x_2')\n", (1416, 1423), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1517), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1515, 1517), False, 'import sys\n'), ((2045, 2060), 'numpy.abs', 'np.abs', (['mat_min'], {}), '(mat_min)\n', (2051, 2060), True, 'import numpy as np\n'), ((2104, 2118), 'numpy.abs', 'np.abs', (['matrix'], {}), '(matrix)\n', (2110, 2118), True, 'import numpy as np\n'), ((1268, 1292), 'numpy.array', 'np.array', (['[BLUE, ORANGE]'], {}), '([BLUE, ORANGE])\n', (1276, 1292), True, 'import numpy as np\n'), ((1996, 2011), 'numpy.abs', 'np.abs', (['mat_min'], {}), '(mat_min)\n', (2002, 2011), True, 'import numpy as np\n')]
|
from data.data_reader import BIZCARD_LABEL_MAP, BizcardDataParser
import argparse
from pathlib import Path
import os
import json
import cv2
import numpy as np
def convert_bizcard_to_coco_format(image_dir, json_dir, id_list, out_dir, out_name):
coco_json = {}
images = []
annotations = []
categories = []
for _, key in enumerate(BIZCARD_LABEL_MAP.keys()):
categories.append({
'id': BIZCARD_LABEL_MAP[key],
'name': key
})
with open(id_list) as fp:
ids = fp.readlines()
for idx, file_id in enumerate(ids):
file_id = Path(file_id.strip())
print(idx, file_id)
if not (image_dir / file_id).with_suffix('.jpg').exists():
file_id = file_id.with_suffix('.jpeg')
else:
file_id = file_id.with_suffix('.jpg')
height, width = cv2.imread(str(image_dir / file_id)).shape[:2]
images.append({
'file_name': str(file_id),
'id': idx,
'height': height,
'width': width
})
try:
gt = BizcardDataParser.parse_data(str((json_dir / file_id).with_suffix('.json')), str(image_dir / file_id))[
0]
for word in gt.words:
anno = {
'id': len(annotations),
'image_id': idx,
'bbox': [word.bbox.min_x, word.bbox.min_y, (word.bbox.max_x - word.bbox.min_x),
(word.bbox.max_y - word.bbox.min_y)],
'segmentation': [word.bbox.val],
'category_id': word.label,
'iscrowd': 0,
'area': cv2.contourArea(np.reshape(word.bbox.val, [-1, 2]).astype(np.float32))
}
annotations.append(anno)
except Exception as e:
print(e)
print(str(image_dir / file_id))
coco_json['images'] = images
coco_json['annotations'] = annotations
coco_json['categories'] = categories
with open(Path(out_dir, out_name), 'w') as f:
json.dump(coco_json, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_dir', type=str)
parser.add_argument('--gt_dir', type=str)
parser.add_argument('--data_list', type=str)
parser.add_argument('--out_dir', type=str)
parser.add_argument('--out_name', type=str)
args = parser.parse_args()
if not Path(args.out_dir).exists():
Path(args.out_dir).mkdir()
convert_bizcard_to_coco_format(
Path(args.img_dir),
Path(args.gt_dir),
args.data_list,
args.out_dir,
args.out_name)
|
[
"data.data_reader.BIZCARD_LABEL_MAP.keys",
"numpy.reshape",
"argparse.ArgumentParser",
"pathlib.Path",
"json.dump"
] |
[((2152, 2177), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2175, 2177), False, 'import argparse\n'), ((351, 375), 'data.data_reader.BIZCARD_LABEL_MAP.keys', 'BIZCARD_LABEL_MAP.keys', ([], {}), '()\n', (373, 375), False, 'from data.data_reader import BIZCARD_LABEL_MAP, BizcardDataParser\n'), ((2086, 2109), 'json.dump', 'json.dump', (['coco_json', 'f'], {}), '(coco_json, f)\n', (2095, 2109), False, 'import json\n'), ((2567, 2585), 'pathlib.Path', 'Path', (['args.img_dir'], {}), '(args.img_dir)\n', (2571, 2585), False, 'from pathlib import Path\n'), ((2595, 2612), 'pathlib.Path', 'Path', (['args.gt_dir'], {}), '(args.gt_dir)\n', (2599, 2612), False, 'from pathlib import Path\n'), ((2042, 2065), 'pathlib.Path', 'Path', (['out_dir', 'out_name'], {}), '(out_dir, out_name)\n', (2046, 2065), False, 'from pathlib import Path\n'), ((2458, 2476), 'pathlib.Path', 'Path', (['args.out_dir'], {}), '(args.out_dir)\n', (2462, 2476), False, 'from pathlib import Path\n'), ((2495, 2513), 'pathlib.Path', 'Path', (['args.out_dir'], {}), '(args.out_dir)\n', (2499, 2513), False, 'from pathlib import Path\n'), ((1700, 1734), 'numpy.reshape', 'np.reshape', (['word.bbox.val', '[-1, 2]'], {}), '(word.bbox.val, [-1, 2])\n', (1710, 1734), True, 'import numpy as np\n')]
|
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
import re
import logging
from nomad.units import ureg
from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser
from nomad.datamodel.metainfo.simulation.run import Run, Program
from nomad.datamodel.metainfo.simulation.method import (
Method, DFT, Electronic, Smearing, XCFunctional, Functional,
GW as GWMethod, Scf, BasisSet
)
from nomad.datamodel.metainfo.simulation.system import (
System, Atoms
)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges,
Forces, ForcesEntry, ScfIteration, BandGap
)
from nomad.datamodel.metainfo.workflow import Workflow, GeometryOptimization
from .metainfo.exciting import x_exciting_section_MT_charge_atom, x_exciting_section_MT_moment_atom,\
x_exciting_section_spin, x_exciting_section_fermi_surface,\
x_exciting_section_atoms_group
re_float = r'[-+]?\d+\.\d*(?:[Ee][-+]\d+)?'
class GWInfoParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
def str_to_frequency(val_in):
val = [v.split() for v in val_in.split('\n')]
val = np.transpose(np.array([v for v in val if len(v) == 3], float))
return dict(
number=np.array(val[0], dtype=int), values=val[1] * ureg.hartree,
weights=val[2])
# TODO Read also input parameters here if input_GW.xml does not exist
self._quantities.append(
Quantity(
'frequency_data', r'frequency list:\s*\<\s*#\s*freqs\s*weight\s*>\s*([\d\.Ee\s\-]+)',
str_operation=str_to_frequency, repeats=False)
)
self._quantities.append(
Quantity(
'fermi_energy', r'\-\s*G0W0.+\-\s*\-+\s*[\s\S]*?Fermi [Ee]nergy\s*[:=](\s*-?[\d\.]+)\s',
unit=ureg.hartree, repeats=False)
)
self._quantities.append(
Quantity(
'direct_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Direct BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
self._quantities.append(
Quantity(
'fundamental_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Fundamental BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
self._quantities.append(
Quantity(
'optical_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Optical BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
class ExcitingEvalqpParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
def str_to_eigenvalue(val_in):
val = val_in.strip().split('\n')
kpts = np.array(val[0].split(), dtype=float)
keys = val[1].split()
eigs = np.transpose(np.array([v.split() for v in val[2:]], dtype=float))
eigs = {keys[i]: eigs[i] for i in range(len(keys))}
return [kpts, eigs]
self._quantities.append(
Quantity(
'kpoints_eigenvalues', r'\s*k\-point \#\s*\d+:\s*([\d\s\.\-]+)([ \w\(\)]+\n)([\s\d\.\-Ee]+)',
str_operation=str_to_eigenvalue, repeats=True))
class BandstructureDatParser(DataTextParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
# TODO make a parent clas for bandstructure dat and xml
self._nspin = None
self._nkpts_segment = None
self._neigs_segment = None
self._vertices = None
self._distances = None
self._band_energies = None
self._band_k_points = None
@property
def band_energies(self):
if self._band_energies is None:
if self.data is None:
return
data = np.transpose(self.data)
n_kpoints = int(max(data[1]))
bands = data[6:]
bands = np.reshape(bands, (
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues, n_kpoints))
self._band_energies = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([np.transpose(band)[start:end] for band in bands])
if self._energy_unit:
band_energy = band_energy * self._energy_unit
self._band_energies.append(band_energy)
start = end
return self._band_energies
@property
def band_k_points(self):
if self._band_k_points is None:
data = np.transpose(self.data)
self._band_k_points = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
self._band_k_points.append(
np.transpose(data[2:5])[start:end])
start = end
return self._band_k_points
@property
def distances(self):
if self._distances is None:
data = np.transpose(self.data)
self._distances = data[5][:int(max(data[1]))]
return self._distances
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = np.shape(np.transpose(self.data))[0] - 6
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment.append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
data = np.transpose(self.data)
self._neigs_segment = int(max(data[0]))
return self._neigs_segment
class BandOutParser(DataTextParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
self._nspin = None
self._distances = None
self._band_energies = None
self._neigs_segment = None
self._nkpts_segment = None
@property
def band_energies(self):
if self._band_energies is None:
data = np.transpose(self.data)
n_kpoints = np.where(data[0] == data[0][0])[0][1]
bands = data[1:]
bands = np.reshape(bands, (
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues, n_kpoints))
self._band_energies = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([np.transpose(band)[start:end] for band in bands])
if self._energy_unit:
band_energy = band_energy * self._energy_unit
self._band_energies.append(band_energy)
start = end
return self._band_energies
@property
def distances(self):
if self._distances is None:
dist = np.transpose(self.data)[0]
n_k_points = np.where(dist == dist[0])[0][1]
self._distances = dist[:n_k_points]
return self._distances
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = np.shape(np.transpose(self.data)[1:])[0]
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment.append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
data = np.transpose(self.data)[0]
self._neigs_segment = len(np.where(data == data[0])[0])
return self._neigs_segment
class BandstructureXMLParser(XMLParser):
def __init__(self, **kwargs):
# TODO make a parent class for dos and bandstructure
super().__init__(None)
self._distance_key = 'distance'
self._coord_key = 'coord'
self._energy_key = 'eval'
self._vertex_key = 'vertex'
self._band_key = 'band'
self._atom_key = 'atom'
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
self._nspin = None
self._nkpts_segment = None
self._neigs_segment = None
self._bands = None
self._vertices = None
self._distances = None
self._species = None
@property
def distances(self):
if self._distances is None:
if not self.bands:
return
self._distances = [
point.attrib.get(self._distance_key) for point in self.bands[0][0]]
self._distances = np.array(self._distances, dtype=float)
return self._distances
@property
def bands(self):
if self._bands is None:
bands = self.root.findall('./%s' % self._band_key)
self._bands = []
if bands:
self._bands.append(bands)
# add atom-resolved
bands_atom = self.root.findall('./*/%s' % self._atom_key)
for band in bands_atom:
self._bands.append(band.findall('./%s' % self._band_key))
return self._bands
@property
def vertices(self):
if self._vertices is None:
self._vertices = self.root.findall('./%s' % self._vertex_key)
return self._vertices
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = 1
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment .append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
self._neigs_segment = len(self.bands[0]) // self.number_of_spin_channels
return self._neigs_segment
def parse(self, key):
if self._results is None:
self._results = dict()
if not self.bands:
return
if key == 'band_energies':
# TODO I am not certain about the format for the spin polarized case
# I cannot find an example bandstructure file
# atom-resolved bandstructure are added as separate section_k_band
res = []
for n in range(len(self.bands)):
res_n = []
start = 0
band_energies = np.zeros((
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues,
len(self.distances)), dtype=float)
for i in range(len(self.bands[n])):
band_energies[i % self.number_of_spin_channels][i] = np.array(
[e.attrib.get(self._energy_key) for e in self.bands[n][i]])
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([
np.transpose(energy)[start:end] for energy in band_energies])
if self._energy_unit is not None:
band_energy = band_energy * self._energy_unit
res_n.append(band_energy)
start = end
res.append(res_n)
elif key == 'band_k_points':
res = []
for i in range(len(self.number_of_k_points_per_segment)):
start = np.array(
self.vertices[i].attrib.get(self._coord_key).split(), dtype=float)
end = np.array(
self.vertices[i + 1].attrib.get(self._coord_key).split(), dtype=float)
res.append(np.linspace(start, end, self.number_of_k_points_per_segment[i]))
elif key == 'band_segm_labels':
res = []
for i in range(len(self.vertices) - 1):
start = self.vertices[i].attrib.get('label')
end = self.vertices[i + 1].attrib.get('label')
res.append([
'\u0393' if start.lower() == 'gamma' else start,
'\u0393' if end.lower() == 'gamma' else end])
elif key == 'band_segm_start_end':
res = []
for i in range(len(self.number_of_k_points_per_segment)):
start = self.vertices[i].attrib.get(self._coord_key).split()
end = self.vertices[i + 1].attrib.get(self._coord_key).split()
res.append([start, end])
else:
res = None
self._results[key] = res
class DOSXMLParser(XMLParser):
def __init__(self, **kwargs):
super().__init__(None)
self._nspin_key = 'nspin'
self._totaldos_key = 'totaldos'
self._partialdos_key = 'partialdos'
self._diagram_key = 'diagram'
self._l_key = 'l'
self._m_key = 'm'
self._energy_key = 'e'
self._dos_key = 'dos'
self._unit_key = 'unit'
self._energy_unit = kwargs.get('energy_unit', None)
self._units_mapping = dict(hartree=ureg.hartree)
def init_parameters(self):
self._ndos = None
self._natoms = None
self._nspin = None
self._nlm = None
self._energies = None
self._total_dos = None
self._partial_dos = None
@property
def energy_unit(self):
if self._energy_unit is None:
axis = self.root.find('./axis')
if axis is None:
return
self._energy_unit = self._units_mapping.get(axis.attrib.get(self._unit_key).lower(), 1)
return self._energy_unit
@property
def number_of_spin_channels(self):
if self._nspin is None:
if not self.total_dos:
return
self._nspin = len(self.total_dos)
return self._nspin
@property
def number_of_atoms(self):
if self._natoms is None:
partial_dos = self.root.findall('./%s' % self._partialdos_key)
self._natoms = len(partial_dos)
return self._natoms
@property
def number_of_dos(self):
if self._ndos is None:
total_dos = self.root.find('./%s/%s' % (self._totaldos_key, self._diagram_key))
self._ndos = len(total_dos)
return self._ndos
@property
def number_of_lm(self):
if self._nlm is None:
if self.partial_dos is None:
return
self._nlm = 0
l_list = set([int(e.attrib.get(self._l_key)) for e in self.partial_dos])
for li in l_list:
self._nlm += 2 * li + 1
return self._nlm
@property
def total_dos(self):
if self._total_dos is None:
self._total_dos = self.root.findall('./%s/%s' % (self._totaldos_key, self._diagram_key))
return self._total_dos
@property
def partial_dos(self):
if self._partial_dos is None:
self._partial_dos = self.root.findall('./%s/%s' % (self._partialdos_key, self._diagram_key))
return self._partial_dos
@property
def energies(self):
if self._energies is None:
if self.total_dos is None:
return
self._energies = np.array(
[float(point.attrib.get(self._energy_key)) for point in self.total_dos[0]])
if self.energy_unit is not None:
self._energies = self._energies * self.energy_unit
return self._energies
def _get_dos(self, diagram):
dos = np.array(
[point.attrib.get(self._dos_key) for point in diagram], dtype=float)
return dos
def parse(self, key):
if self._results is None:
self._results = dict()
if 'total' in key:
if not self.total_dos:
return
res = np.zeros((self.number_of_spin_channels, self.number_of_dos))
for i in range(len(self.total_dos)):
spin = self.total_dos[i].attrib.get(self._nspin_key, i)
res[i] = self._get_dos(self._total_dos[i])
if self.energy_unit is not None:
res = res * (1 / self.energy_unit)
elif 'partial' in key:
if not self.partial_dos:
return
res = np.zeros((
self.number_of_lm, self.number_of_spin_channels, self.number_of_atoms, self.number_of_dos))
for i in range(len(self.partial_dos)):
spin = self.partial_dos[i].attrib.get(self._nspin_key, None)
if spin is None:
spin = (i % (self.number_of_spin_channels * self.number_of_lm)) // self.number_of_lm
else:
spin = int(spin) - 1
val_l = self.partial_dos[i].attrib.get(self._l_key, None)
val_m = self.partial_dos[i].attrib.get(self._m_key, None)
if val_l is None or val_m is None:
lm = i % self.number_of_lm
else:
lm = int(val_l) ** 2 + int(val_m) + int(val_l)
atom = i // (self.number_of_lm * self.number_of_spin_channels)
res[lm][spin][atom] = self._get_dos(self.partial_dos[i])
if self.energy_unit is not None:
res = res * (1 / self.energy_unit)
elif key == 'energies':
return self.energies
else:
res = None
self._results[key] = res
class ExcitingFermiSurfaceBxsfParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
self._quantities.append(
Quantity(
'fermi_energy', r'Fermi Energy:\s*([\d\.]+)\s*', unit=ureg.hartree, repeats=False))
def str_to_band_parameters(val_in):
val = val_in.strip().split('\n')
nbands = int(val[0])
mesh = np.array(val[1].split(), dtype=int)
origin = np.array(val[2].split(), dtype=float)
vector = np.array([v.split() for v in val[3:6]], dtype=float)
return [nbands, mesh, origin, vector]
self._quantities.append(
Quantity(
'band_parameters', r'BANDGRID_3D_BANDS\s*([\d\.\-Ee\s]+)',
str_operation=str_to_band_parameters, repeats=False))
self._quantities.append(
Quantity(
'fermi_surface', r'BAND:\s*\d+\s*([\d\-\+\.Ee\s]+)\n *E*', unit=ureg.hartree,
repeats=True))
class ExcitingEigenvalueParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
self._quantities.append(
Quantity(
'k_points', r'\s*\d+\s*([\d\.Ee\- ]+):\s*k\-point', repeats=True))
def str_to_eigenvalues(val_in):
val = val_in[:val_in.rfind('\n \n')].strip()
val = np.array([v.split() for v in val.split('\n')], dtype=float)
val = np.transpose(val)
occs = val[-1]
eigs = val[-2]
nspin = 2 if occs[0] == 1. else 1
data = dict()
data['occupancies'] = np.reshape(occs, (nspin, len(occs) // nspin))
data['eigenvalues'] = np.reshape(eigs, (nspin, len(eigs) // nspin))
return data
self._quantities.append(
Quantity(
'eigenvalues_occupancies', r'\(state\, eigenvalue and occupancy below\)\s*([\d\.Ee\-\s]+?(?:\n *\n))',
str_operation=str_to_eigenvalues, repeats=True))
class ExcitingGWOutParser(TextParser):
def __init__(self, mainfile, logger):
super().__init__(mainfile, logger=logger)
def init_quantities(self):
self._quantities = []
class ExcitingInfoParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
re_symbol = re.compile(r'([A-Z][a-z]?)')
def str_to_array(val_in):
val = [v.split(':')[-1].split() for v in val_in.strip().split('\n')]
val = val[0] if len(val) == 1 else val
return np.array(val, dtype=float)
def str_to_atom_properties_dict(val_in):
unit = None
if 'charge' in val_in:
unit = ureg.elementary_charge
elif 'moment' in val_in:
unit = ureg.elementary_charge * ureg.bohr
val = val_in.strip().split('\n')
properties = dict()
atom_resolved = []
species = None
for v in val:
v = v.strip().split(':')
if len(v) < 2:
continue
elif v[0].startswith('species'):
species = re.search(re_symbol, v[-1]).group(1)
elif v[0].startswith('atom'):
v[0] = v[0].split()
v[1] = [float(vi) for vi in v[1].split()]
v[1] = v[1][0] if len(v[1]) == 1 else v[1]
if species is None:
species = v[0][2]
atom_resolved.append(((species, v[1] * unit)))
else:
vi = [float(vii) for vii in v[1].split()]
vi = vi[0] if len(vi) == 1 else vi
properties[v[0].strip()] = vi * unit
properties['atom_resolved'] = atom_resolved
return properties
def str_to_quantity_tolerances(val_in):
return val_in.strip().replace('(', '').replace(')', '').split()
def str_to_energy_dict(val_in):
val = val_in.strip().split('\n')
energies = dict()
for v in val:
v = v.split(':')
if len(v) < 2:
continue
energies[v[0].strip()] = float(v[1]) * ureg.hartree
return energies
self._quantities = [Quantity(
'program_version', r'\s*EXCITING\s*([\w\-\(\)\. ]+)\s*started', repeats=False,
dtype=str, flatten=False)]
initialization_quantities = [
Quantity(
'lattice_vectors',
r'Lattice vectors\s*[\(cartesian\)]*\s*:\s*([\-0-9\.\s]+)\n',
str_operation=str_to_array, unit=ureg.bohr, repeats=False, convert=False),
Quantity(
'lattice_vectors_reciprocal',
r'Reciprocal lattice vectors\s*[\(cartesian\)]*\s*:\s*([\-0-9\.\s]+)\n',
str_operation=str_to_array, unit=1 / ureg.bohr, repeats=False, convert=False),
]
self._system_keys_mapping = {
'x_exciting_unit_cell_volume': ('Unit cell volume', ureg.bohr ** 3),
'x_exciting_brillouin_zone_volume': ('Brillouin zone volume', 1 / ureg.bohr ** 3),
'x_exciting_number_of_atoms': ('Total number of atoms per unit cell', None),
'x_exciting_spin_treatment': ('Spin treatment', None),
'x_exciting_number_of_bravais_lattice_symmetries': ('Number of Bravais lattice symmetries', None),
'x_exciting_number_of_crystal_symmetries': ('Number of crystal symmetries', None),
'x_exciting_kpoint_grid': (r'k\-point grid', None),
'x_exciting_kpoint_offset': (r'k\-point offset', None),
'x_exciting_number_kpoints': (r'Total number of k\-points', None),
'x_exciting_rgkmax': (r'R\^MT\_min \* \|G\+k\|\_max \(rgkmax\)', None),
'x_exciting_species_rtmin': (r'Species with R\^MT\_min', None),
'x_exciting_gkmax': (r'Maximum \|G\+k\| for APW functions', 1 / ureg.bohr),
'x_exciting_gmaxvr': (r'Maximum \|G\| for potential and density', 1 / ureg.bohr),
'x_exciting_gvector_size': (r'G\-vector grid sizes', None),
'x_exciting_gvector_total': (r'Total number of G\-vectors', None),
'x_exciting_lmaxapw': (r' APW functions', None),
'x_exciting_nuclear_charge': ('Total nuclear charge', ureg.elementary_charge),
'x_exciting_electronic_charge': ('Total electronic charge', ureg.elementary_charge),
'x_exciting_core_charge_initial': ('Total core charge', ureg.elementary_charge),
'x_exciting_valence_charge_initial': ('Total valence charge', ureg.elementary_charge),
'x_exciting_wigner_radius': (r'Effective Wigner radius, r\_s', ureg.bohr),
'x_exciting_empty_states': ('Number of empty states', None),
'x_exciting_valence_states': ('Total number of valence states', None),
'x_exciting_hamiltonian_size': ('Maximum Hamiltonian size', None),
'x_exciting_pw': (r'Maximum number of plane\-waves', None),
'x_exciting_lo': (r'Total number of local\-orbitals', None)}
self._method_keys_mapping = {
'smearing_kind': ('Smearing scheme', None),
'smearing_width': ('Smearing width', None)}
for name, key_unit in self._system_keys_mapping.items():
initialization_quantities.append(
Quantity(
name, r'%s\s*:\s*([\s\S]*?)\n' % key_unit[0], unit=key_unit[1], repeats=False)
)
for name, key_unit in self._method_keys_mapping.items():
initialization_quantities.append(
Quantity(
name, r'%s\s*:\s*([\s\S]*?)\n' % key_unit[0], unit=key_unit[1], repeats=False)
)
initialization_quantities.append(Quantity(
'species',
rf'(Species : *\d+ *\(\w+\)[\s\S]+?{re_float} *{re_float} *{re_float}\n\s*\n)',
repeats=True, sub_parser=TextParser(quantities=[
Quantity('number', r'Species : *(\d+)', dtype=np.int32),
Quantity('symbol', r'\((\w+)\)'),
Quantity('file', r'parameters loaded from *: *(.+)'),
Quantity('name', r'name *: *(.+)'),
Quantity('nuclear_charge', rf'nuclear charge *: *({re_float})', dtype=np.float64, unit=ureg.elementary_charge),
Quantity('electronic_charge', rf'electronic charge *: *({re_float})', dtype=np.float64, unit=ureg.elementary_charge),
Quantity('atomic_mass', rf'atomic mass *: *({re_float})', dtype=np.float64, unit=ureg.electron_mass),
Quantity('muffin_tin_radius', rf'muffin-tin radius *: *({re_float})', dtype=np.float64, unit=ureg.bohr),
Quantity('radial_points', rf'radial points in muffin-tin *: *({re_float})', dtype=np.int32),
Quantity('positions_format', r'atomic positions \((.+?)\)', flatten=False),
Quantity(
'positions',
rf'\d+ : *({re_float}) *({re_float}) *({re_float})',
repeats=True, dtype=np.dtype(np.float64))])))
initialization_quantities.append(Quantity(
'potential_mixing', r'Using ([\w ]+) potential mixing', repeats=False, flatten=False)
)
initialization_quantities.append(Quantity(
'xc_functional', r'(Exchange-correlation type[\s\S]+?\n *\n)',
sub_parser=TextParser(quantities=[
Quantity('type', r'Exchange-correlation type +: +(\S+)'),
Quantity(
'name_reference',
r'\n *(.+?,.+)',
str_operation=lambda x: [v.strip() for v in x.split(':')]),
Quantity(
'parameters',
r'\n *(.+?:.+)', repeats=True,
str_operation=lambda x: [v.strip() for v in x.split(':')])]))
)
self._quantities.append(Quantity(
'initialization',
r'(?:All units are atomic|Starting initialization)([\s\S]+?)(?:Using|Ending initialization)', repeats=False,
sub_parser=TextParser(quantities=initialization_quantities))
)
scf_quantities = [
Quantity(
'energy_total', r'[Tt]*otal energy\s*:\s*([\-\d\.Ee]+)', repeats=False,
dtype=float, unit=ureg.hartree),
Quantity(
'energy_contributions', r'(?:Energies|_)([\+\-\s\w\.\:]+?)\n *(?:DOS|Density)',
str_operation=str_to_energy_dict, repeats=False, convert=False),
Quantity(
'x_exciting_dos_fermi',
r'DOS at Fermi energy \(states\/Ha\/cell\)\s*:\s*([\-\d\.Ee]+)',
repeats=False, dtype=float, unit=1 / ureg.hartree),
Quantity(
'charge_contributions',
r'(?:Charges|Electron charges\s*\:*\s*)([\-\s\w\.\:\(\)]+?)\n *[A-Z\+]',
str_operation=str_to_atom_properties_dict, repeats=False, convert=False),
Quantity(
'moment_contributions',
r'(?:Moments\s*\:*\s*)([\-\s\w\.\:\(\)]+?)\n *[A-Z\+]',
str_operation=str_to_atom_properties_dict, repeats=False, convert=False)]
self._miscellaneous_keys_mapping = {
'x_exciting_gap': (r'Estimated fundamental gap', ureg.hartree),
'time': (r'Wall time \(seconds\)', ureg.s)}
for name, key_unit in self._miscellaneous_keys_mapping.items():
scf_quantities.append(Quantity(
name, r'%s\s*\:*\s*([\-\d\.Ee]+)' % key_unit[0], repeats=False,
unit=key_unit[1]))
self._convergence_keys_mapping = {
'x_exciting_effective_potential_convergence': (
r'RMS change in effective potential \(target\)', ureg.hartree),
'x_exciting_energy_convergence': (
r'Absolute change in total energy\s*\(target\)', ureg.hartree),
'x_exciting_charge_convergence': (
r'Charge distance\s*\(target\)', ureg.elementary_charge),
'x_exciting_IBS_force_convergence': (
r'Abs\. change in max\-nonIBS\-force\s*\(target\)', ureg.hartree / ureg.bohr)}
for name, key_unit in self._convergence_keys_mapping.items():
scf_quantities.append(Quantity(
name, r'%s\s*\:*\s*([\(\)\d\.\-\+Ee ]+)' % key_unit[0],
str_operation=str_to_quantity_tolerances, unit=key_unit[1], repeats=False))
module_quantities = [
Quantity(
'scf_iteration', r'(?:I| i)teration number :([\s\S]+?)(?:\n *\n\+{10}|\+\-{10})',
sub_parser=TextParser(quantities=scf_quantities), repeats=True),
Quantity(
'final',
r'(?:Convergence targets achieved\. Performing final SCF iteration|Reached self-consistent loops maximum)([\s\S]+?)(\n *\n\+{10})',
sub_parser=TextParser(quantities=scf_quantities), repeats=False),
Quantity(
'atomic_positions',
r'(Atomic positions\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'Atomic positions\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces', r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Atomic',
repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.hartree / ureg.bohr)
]
self._quantities.append(Quantity(
'groundstate',
r'(?:Self\-consistent loop started|Groundstate module started)([\s\S]+?)Groundstate module stopped',
sub_parser=TextParser(quantities=module_quantities), repeats=False))
optimization_quantities = [
Quantity(
'atomic_positions',
r'(Atomic positions at this step\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'Atomic positions at this step\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces',
r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Time',
repeats=False, str_operation=str_to_array, convert=False, unit=ureg.hartree / ureg.bohr),
Quantity(
'step', r'Optimization step\s*(\d+)', repeats=False, dtype=int),
Quantity(
'method', r'method\s*=\s*(\w+)', repeats=False, dtype=str),
Quantity(
'n_scf_iterations',
r'Number of (?:total)* scf iterations\s*\:\s*(\d+)', repeats=False, dtype=int),
Quantity(
'force_convergence',
r'Maximum force magnitude\s*\(target\)\s*\:(\s*[\(\)\d\.\-\+Ee ]+)',
str_operation=str_to_quantity_tolerances, unit=ureg.hartree / ureg.bohr, repeats=False,
dtype=float),
Quantity(
'energy_total', r'Total energy at this optimization step\s*\:\s*([\-\d\.Ee]+)',
unit=ureg.hartree, repeats=False, dtype=float),
Quantity(
'time', r'Time spent in this optimization step\s*\:\s*([\-\d\.Ee]+)\s*seconds',
unit=ureg.s, repeats=False, dtype=float)
]
self._quantities.append(Quantity(
'structure_optimization',
r'Structure\-optimization module started([\s\S]+?)Structure\-optimization module stopped',
sub_parser=TextParser(quantities=[
Quantity(
'optimization_step',
r'(Optimization step\s*\d+[\s\S]+?(?:\n *\n\-{10}|Time spent in this optimization step\s*:\s*[\d\.]+ seconds))',
sub_parser=TextParser(quantities=optimization_quantities),
repeats=True),
Quantity(
'final',
r'Force convergence target achieved([\s\S]+?Opt)',
sub_parser=TextParser(quantities=scf_quantities),
repeats=False),
Quantity(
'atomic_positions',
r'(imized atomic positions\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'imized atomic positions\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces',
r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Atomic',
repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.hartree / ureg.bohr),
]), repeats=False))
self._quantities.append(Quantity(
'hybrids',
r'Hybrids module started([\s\S]+?)Hybrids module stopped',
sub_parser=TextParser(quantities=module_quantities)
))
def get_atom_labels(self, section):
labels = section.get('symbols')
if labels is None:
# we get it by concatenating species symbols
species = self.get('initialization', {}).get('species', [])
labels = []
for specie in species:
labels += [specie.get('symbol')] * len(specie.get('positions'))
return labels
def get_positions_format(self, section):
positions_format = section.get('positions_format')
if positions_format is None:
species = self.get_initialization_parameter('species', [])
for specie in species:
positions_format = specie.get('positions_format', None)
if positions_format is not None:
break
return positions_format
def get_atom_positions(self, section={}, positions=None, positions_format=None):
positions = positions if positions is not None else section.get('positions')
if positions is None:
species = self.get_initialization_parameter('species', [])
if species:
positions = np.vstack([s.get('positions') for s in species])
if positions is None:
return
positions = np.array(positions)
positions_format = positions_format if positions_format is not None else self.get_positions_format(section)
if positions_format == 'lattice':
cell = self.get_initialization_parameter('lattice_vectors')
if cell is None:
return
positions = np.dot(positions, cell.magnitude)
return positions * ureg.bohr
def get_scf_threshold(self, name):
reference = self.get('groundstate', self.get('hybrids', {}))
return reference.get('scf_iteration', [{}])[-1].get(
name, [None, None])[-1]
def get_scf_quantity(self, name):
n_scf = len(self.get('energy_total_scf_iteration', []))
quantity = self.get('%s_scf_iteration' % name)
if quantity is None:
return
# this is really problematic if some scf steps dont have the quantity
# the only thing that we can do is to assume that the first steps are the
# ones with the missing quantity
if len(quantity) < n_scf:
quantity = [None] * (n_scf - len(quantity)) + quantity
return quantity
def get_xc_functional_name(self):
# TODO expand list to include other xcf
xc_functional_map = {
2: ['LDA_C_PZ', 'LDA_X_PZ'],
3: ['LDA_C_PW', 'LDA_X_PZ'],
4: ['LDA_C_XALPHA'],
5: ['LDA_C_VBH'],
20: ['GGA_C_PBE', 'GGA_X_PBE'],
21: ['GGA_C_PBE', 'GGA_X_PBE_R'],
22: ['GGA_C_PBE_SOL', 'GGA_X_PBE_SOL'],
26: ['GGA_C_PBE', 'GGA_X_WC'],
30: ['GGA_C_AM05', 'GGA_C_AM05'],
300: ['GGA_C_BGCP', 'GGA_X_PBE'],
406: ['HYB_GGA_XC_PBEH'],
408: ['HYB_GGA_XC_HSE03']}
xc_functional = self.get('initialization', {}).get('xc_functional', None)
if xc_functional is None:
return []
name = xc_functional_map.get(xc_functional.type, [])
return name
@property
def n_optimization_steps(self):
return len(self.get('structure_optimization', {}).get('optimization_step', []))
def get_number_of_spin_channels(self):
spin_treatment = self.get('initialization', {}).get(
'x_exciting_spin_treatment', 'spin-unpolarised')
n_spin = 1 if spin_treatment.lower() == 'spin-unpolarised' else 2
return n_spin
def get_unit_cell_volume(self):
return self.get('initialization', {}).get('x_exciting_unit_cell_volume', 1.0 * ureg.bohr ** 3)
def get_initialization_parameter(self, key, default=None):
return self.get('initialization', {}).get(key, default)
class ExcitingParser:
def __init__(self):
self.info_parser = ExcitingInfoParser()
self.dos_parser = DOSXMLParser(energy_unit=ureg.hartree)
self.bandstructure_parser = BandstructureXMLParser(energy_unit=ureg.hartree)
self.eigval_parser = ExcitingEigenvalueParser()
self.fermisurf_parser = ExcitingFermiSurfaceBxsfParser()
self.evalqp_parser = ExcitingEvalqpParser()
self.dos_out_parser = DataTextParser()
self.bandstructure_dat_parser = BandstructureDatParser(energy_unit=ureg.hartree)
self.band_out_parser = BandOutParser(energy_unit=ureg.hartree)
self.info_gw_parser = GWInfoParser()
self.input_xml_parser = XMLParser()
self.data_xs_parser = DataTextParser()
self.data_clathrate_parser = DataTextParser(dtype=str)
# different names for different versions of exciting
self._energy_keys_mapping = {
'energy_total': ['Total energy', 'total energy'],
'x_exciting_fermi_energy': ['Fermi energy', 'Fermi'],
'energy_kinetic_electronic': ['Kinetic energy', 'electronic kinetic'],
'energy_coulomb': ['Coulomb energy', 'Coulomb'],
'x_exciting_coulomb_energy': ['Coulomb energy', 'Coulomb'],
'energy_exchange': ['Exchange energy', 'exchange'],
'x_exciting_exchange_energy': ['Exchange energy', 'exchange'],
'energy_correlation': ['Correlation energy', 'correlation'],
'x_exciting_correlation_energy': ['Correlation energy', 'correlation'],
'energy_sum_eigenvalues': ['Sum of eigenvalues', 'sum of eigenvalues'],
'x_exciting_effective_potential_energy': ['Effective potential energy'],
'x_exciting_coulomb_potential_energy': ['Coulomb potential energy', 'Coulomb potential'],
'energy_xc_potential': ['xc potential energy', 'xc potential'],
'energy_electrostatic': ['Hartree energy', 'Hartree'],
'x_exciting_hartree_energy': ['Hartree energy', 'Hartree'],
'x_exciting_electron_nuclear_energy': ['Electron-nuclear energy', 'electron-nuclear '],
'x_exciting_nuclear_nuclear_energy': ['Nuclear-nuclear energy', 'nuclear-nuclear'],
'x_exciting_madelung_energy': ['Madelung energy', 'Madelung'],
'x_exciting_core_electron_kinetic_energy': ['Core-electron kinetic energy', 'core electron kinetic'],
'x_exciting_dft_d2_dispersion_correction': ['DFT-D2 dispersion correction']
}
self._electron_charge_keys_mapping = {
'x_exciting_core_charge': ['core'],
'x_exciting_core_leakage': ['core leakage'],
'x_exciting_valence_charge': ['valence'],
'x_exciting_interstitial_charge': ['interstitial'],
'x_exciting_total_MT_charge': ['total charge in muffin-tins', 'total in muffin-tins'],
'charge_total': ['total charge'],
'x_exciting_section_MT_charge_atom': ['atom_resolved']
}
self._moment_keys_mapping = {
'x_exciting_interstitial_moment': ['interstitial'],
'x_exciting_total_MT_moment': ['total moment in muffin-tins'],
'x_exciting_total_moment': ['total moment'],
'x_exciting_section_MT_moment_atom': ['atom_resolved']
}
def get_exciting_files(self, default):
mainfile = os.path.basename(self.info_parser.mainfile)
suffix = mainfile.strip('INFO.OUT')
target = default.rsplit('.', 1)
filename = '%s%s' % (target[0], suffix)
if target[1:]:
filename = '%s.%s' % (filename, target[1])
filename = os.path.join(self.info_parser.maindir, filename)
if os.path.isfile(filename):
return [filename]
filename = os.path.join(self.info_parser.maindir, default)
if not os.path.isfile(filename):
file_ext = default.split('.')[-1]
mainfile_base = mainfile.rsplit('.', 1)[0].replace('INFO', '')
options = [
f for f in os.listdir(
self.info_parser.maindir) if target[0] in f and mainfile_base in f]
options = [f for f in options if f.endswith(file_ext)]
options.sort()
filenames = [os.path.join(self.info_parser.maindir, f) for f in options]
else:
filenames = [filename]
filenames = [f for f in filenames if os.access(f, os.F_OK)]
return filenames
def file_exists(self, filename):
"""Checks if a the given filename exists and is accessible in the same
folder where the mainfile is stored.
"""
mainfile = os.path.basename(self.info_parser.mainfile)
suffix = mainfile.strip('INFO.OUT')
target = filename.rsplit('.', 1)
filepath = '%s%s' % (target[0], suffix)
if target[1:]:
filepath = '%s.%s' % (filepath, target[1])
filepath = os.path.join(self.info_parser.maindir, filepath)
if os.path.isfile(filepath) and os.access(filepath, os.F_OK):
return True
return False
def _parse_dos(self, sec_scc):
if self.dos_parser.get('totaldos', None) is None:
return
# Get fermi energy: it is used to un-shift the DOS to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
sec_dos.n_energies = self.dos_parser.number_of_dos
sec_dos.energies = self.dos_parser.energies + energy_fermi
volume = self.info_parser.get_unit_cell_volume()
totaldos = self.dos_parser.get('totaldos') * volume.to('m**3').magnitude
for spin in range(len(totaldos)):
sec_dos_values = sec_dos.m_create(DosValues, Dos.total)
sec_dos_values.spin = spin
sec_dos_values.value = totaldos[spin]
partialdos = self.dos_parser.get('partialdos')
if partialdos is None:
return
partialdos = partialdos.to('1/joule').magnitude
lm_values = np.column_stack((np.arange(len(partialdos)), np.zeros(len(partialdos), dtype=np.int32)))
for lm in range(len(partialdos)):
for spin in range(len(partialdos[lm])):
for atom in range(len(partialdos[lm][spin])):
sec_dos_values = sec_dos.m_create(DosValues, Dos.atom_projected)
sec_dos_values.m_kind = 'spherical'
sec_dos_values.lm = lm_values[lm]
sec_dos_values.spin = spin
sec_dos_values.atom_index = atom
sec_dos_values.value = partialdos[lm][spin][atom]
def _parse_bandstructure(self, sec_scc):
# we need to set nspin again as this is overwritten when setting mainfile
self.bandstructure_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.bandstructure_parser.get('band_energies', [])
for n in range(len(band_energies)):
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
continue
energy_fermi = energy_fermi.to("hartree")
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
band_k_points = self.bandstructure_parser.get('band_k_points')
nkpts_segment = self.bandstructure_parser.number_of_k_points_per_segment
band_seg_labels = self.bandstructure_parser.get('band_segm_labels')
for nb in range(len(band_energies[n])):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.kpoints = band_k_points[nb]
sec_k_band_segment.endpoints_labels = band_seg_labels[nb]
sec_k_band_segment.energies = band_energies[n][nb] + energy_fermi
def _parse_eigenvalues(self, sec_scc):
if self.eigval_parser.get('eigenvalues_occupancies', None) is None:
return
nspin = self.info_parser.get_number_of_spin_channels()
def get_data(key):
data = self.eigval_parser.get('eigenvalues_occupancies')
# reshaping is not necessary as this is done in parser, however nspin is
# determined from occupancies which is problematic sometimes
res = np.hstack([np.reshape(v[key], (nspin, np.size(v[key]) // nspin)) for v in data])
res = res.reshape((len(res), len(data), len(res[0]) // len(data)))
if key == 'eigenvalues':
res = res * ureg.hartree
return res
sec_eigenvalues = sec_scc.m_create(BandEnergies)
sec_eigenvalues.kpoints = self.eigval_parser.get('k_points')
sec_eigenvalues.occupations = get_data('occupancies')
sec_eigenvalues.energies = get_data('eigenvalues')
def _parse_fermisurface(self, sec_scc):
fermi_surface = self.fermisurf_parser.get('fermi_surface', [None])[0]
if fermi_surface is None:
return
sec_fermisurface = sec_scc.m_create(x_exciting_section_fermi_surface)
band_parameters = self.fermisurf_parser.get('band_parameters', None)
if band_parameters is not None:
sec_fermisurface.x_exciting_number_of_bands_fermi_surface = band_parameters[0]
sec_fermisurface.x_exciting_number_of_mesh_points_fermi_surface = np.product(band_parameters[1])
sec_fermisurface.x_exciting_grid_fermi_surface = band_parameters[1]
sec_fermisurface.x_exciting_origin_fermi_surface = band_parameters[2]
sec_fermisurface.x_exciting_vectors_fermi_surface = band_parameters[3]
fermi_energy = self.fermisurf_parser.get('fermi_energy', None)
if fermi_energy is not None:
sec_fermisurface.x_exciting_fermi_energy_fermi_surface = fermi_energy
sec_fermisurface.x_exciting_values_fermi_surface = fermi_surface
def _parse_evalqp(self, sec_scc):
data = self.evalqp_parser.get('kpoints_eigenvalues')
if data is None:
return
def get_data(key):
if key == 'k_points':
return np.array([d[0][:3] for d in data])
elif key == 'Znk':
return np.array([d[1].get(key, None) for d in data])
else:
energy = np.array([d[1].get(key, None) for d in data])
if None in energy:
return energy
return np.array([d[1].get(key) for d in data]) * ureg.hartree
eigs_gw = get_data('E_GW')
if eigs_gw[0] is None:
return
nspin = self.info_parser.get_number_of_spin_channels()
def reshape(data):
if data[0] is None:
return
return np.reshape(data, (nspin, len(data) // nspin, len(data[0])))
sec_gw_eigenvalues = sec_scc.m_create(BandEnergies)
sec_gw_eigenvalues.qp_linearization_prefactor = reshape(get_data('Znk'))
sec_gw_eigenvalues.n_bands = len(eigs_gw[0])
sec_gw_eigenvalues.n_kpoints = len(eigs_gw)
sec_gw_eigenvalues.kpoints = get_data('k_points')
sec_gw_eigenvalues.energies = reshape(eigs_gw)
sec_gw_eigenvalues.value_exchange = reshape(get_data('Sx'))
eigs_gw_C = reshape(get_data('Sc'))
if eigs_gw_C is None:
eigs_gw_C = reshape(get_data('Re(Sc)'))
sec_gw_eigenvalues.value_correlation = eigs_gw_C
sec_gw_eigenvalues.value_xc_potential = reshape(get_data('Vxc'))
def _parse_dos_out(self, sec_scc):
data = self.dos_out_parser.data
if data is None:
return
# Get fermi energy: it is used to un-shift the DOS to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
# TODO I am not sure about format for spin-polarized case! I assume it is
# energy dos_up dos_down
nspin = self.info_parser.get_number_of_spin_channels()
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
sec_dos.n_energies = len(data) // nspin
data = np.reshape(data, (nspin, len(data) // nspin, 2))
data = np.transpose(data, axes=(2, 0, 1))
sec_dos.energies = data[0][0] * ureg.hartree + energy_fermi
volume = self.info_parser.get_unit_cell_volume()
dos = data[1] * (1 / ureg.hartree) * volume.to('m**3').magnitude
for spin in range(len(dos)):
sec_dos_values = sec_dos.m_create(DosValues, Dos.total)
sec_dos_values.spin = spin
sec_dos_values.value = dos[spin]
# TODO add PDOS
def _parse_bandstructure_dat(self, sec_scc):
self.bandstructure_dat_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.bandstructure_dat_parser.band_energies
if band_energies is None:
return
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
band_k_points = self.bandstructure_dat_parser.band_k_points
nkpts_segment = self.bandstructure_dat_parser.number_of_k_points_per_segment
for nb in range(len(band_energies)):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.kpoints = band_k_points[nb]
sec_k_band_segment.energies = band_energies[nb] + energy_fermi
def _parse_band_out(self, sec_scc):
self.band_out_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.band_out_parser.band_energies
if band_energies is None:
return
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = 0.0 * ureg.hartree
if sec_scc.energy is not None:
energy_fermi = sec_scc.energy.fermi
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
nkpts_segment = self.band_out_parser.number_of_k_points_per_segment
for nb in range(len(band_energies)):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.value = band_energies[nb] + energy_fermi
def parse_file(self, name, section):
# TODO add support for info.xml, wannier.out
if name.startswith('dos') and name.endswith('xml'):
parser = self.dos_parser
parser_function = self._parse_dos
elif name.startswith('bandstructure') and name.endswith('xml'):
parser = self.bandstructure_parser
parser_function = self._parse_bandstructure
elif name.startswith('EIGVAL') and name.endswith('OUT'):
parser = self.eigval_parser
parser_function = self._parse_eigenvalues
elif (name.startswith('FERMISURF') or name.startswith('FS')) and name.endswith('bxsf'):
parser = self.fermisurf_parser
parser_function = self._parse_fermisurface
elif name.startswith('EVALQP') and (name.endswith('DAT') or name.endswith('TXT')):
parser = self.evalqp_parser
parser_function = self._parse_evalqp
elif name.startswith('TDOS') and name.endswith('OUT'):
parser = self.dos_out_parser
parser_function = self._parse_dos_out
elif name.startswith('bandstructure') and name.endswith('dat'):
parser = self.bandstructure_dat_parser
parser_function = self._parse_bandstructure_dat
elif name.startswith('BAND') and name.endswith('OUT'):
parser = self.band_out_parser
parser_function = self._parse_band_out
elif name.startswith('input') and name.endswith('xml'):
parser = self.input_xml_parser
if self._calculation_type == 'gw':
parser_function = self._parse_input_gw
elif self._calculation_type == 'xs':
parser_function = self._parse_input_xs
else:
# TODO implement reading of parameters from input.xml for normal calculations
# in addition to INFO.OUT
return
else:
return
files = self.get_exciting_files(name)
if len(files) > 1:
self.logger.warn('Found multiple files. Will read all!', data=dict(file=name))
for n in range(len(files)):
parser.mainfile = files[n]
parser_function(section)
# free up memory
parser.mainfile = None
def _parse_input_xs(self, sec_method):
xstype = self.input_xml_parser.get('xs/xstype', None)
if xstype is not None:
sec_method.x_exciting_xs_xstype = xstype
sec_method.x_exciting_electronic_structure_method = xstype
sec_method.x_exciting_xs_broadening = self.input_xml_parser.get(
'xs/broad', 0.01, 'hartree')
sec_method.x_exciting_xs_gqmax = self.input_xml_parser.get(
'xs/gqmax', 0.0, '1/bohr')
sec_method.x_exciting_xs_lmaxapw = self.input_xml_parser.get('xs/lmaxapw', 10)
sec_method.x_exciting_xs_number_of_empty_states = self.input_xml_parser.get(
'xs/nempty', 5)
sec_method.x_exciting_xs_ngridq = self.input_xml_parser.get('xs/ngridq', [1, 1, 1])
sec_method.x_exciting_xs_ngridk = self.input_xml_parser.get('xs/ngridk', [1, 1, 1])
rgkmax = self.input_xml_parser.get('xs/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0.)
sec_method.x_exciting_xs_rgkmax = rgkmax
sec_method.x_exciting_xs_scissor = self.input_xml_parser.get('xs/scissor', 0.0)
sec_method.x_exciting_xs_vkloff = self.input_xml_parser.get('xs/vkloff', [0., 0., 0.])
# TODO I am not certain if screening/BSE are children of xs
if self.input_xml_parser.get('xs/screening') is not None:
sec_method.x_exciting_xs_screening_number_of_empty_states = self.input_xml_parser.get(
'xs/screening/nempty', 0)
sec_method.x_exciting_xs_screening_ngridk = self.input_xml_parser.get(
'xs/screening/ngridk', [0, 0, 0])
rgkmax = self.input_xml_parser.get('xs/screening/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0.)
sec_method.x_exciting_xs_screening_rgkmax = rgkmax
sec_method.x_exciting_xs_screening_type = self.input_xml_parser.get(
'xs/screening/screentype', 'full')
if self.input_xml_parser.get('xs/BSE') is not None:
sec_method.x_exciting_xs_bse_antiresonant = self.input_xml_parser.get(
'xs/BSE/aresbse', True)
sec_method.x_exciting_xs_bse_angular_momentum_cutoff = self.input_xml_parser.get(
'xs/BSE/lmaxdielt', 14)
rgkmax = self.input_xml_parser.get('xs/BSE/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0)
sec_method.x_exciting_xs_bse_rgkmax = rgkmax
sec_method.x_exciting_xs_bse_sciavbd = self.input_xml_parser.get(
'xs/BSE/sciavbd', True)
sec_method.x_exciting_xs_bse_sciavqbd = self.input_xml_parser.get(
'xs/BSE/sciavqbd', False)
sec_method.x_exciting_xs_bse_sciavqhd = self.input_xml_parser.get(
'xs/BSE/sciavqhd', False)
sec_method.x_exciting_xs_bse_sciavqwg = self.input_xml_parser.get(
'xs/BSE/sciavqwg', False)
sec_method.x_exciting_xs_bse_sciavtype = self.input_xml_parser.get(
'xs/BSE/sciavtype', 'spherical')
sec_method.x_exciting_xs_bse_xas = self.input_xml_parser.get(
'xs/BSE/xas', False)
sec_method.x_exciting_xs_bse_number_of_bands = self.input_xml_parser.get(
'xs/BSE/nstlbse', [0, 0, 0, 0])
if sec_method.x_exciting_xs_bse_xas:
sec_method.x_exciting_xs_bse_xasatom = self.input_xml_parser.get(
'xs/BSE/xasatom', 0)
sec_method.x_exciting_xs_bse_xasedge = self.input_xml_parser.get(
'xs/BSE/xasedge', 'K')
sec_method.x_exciting_xs_bse_xasspecies = self.input_xml_parser.get(
'xs/BSE/xasspecies', 0)
sec_method.x_exciting_xs_bse_xas_number_of_bands = self.input_xml_parser.get(
'xs/BSE/nstlxas', [0, 0])
if self.input_xml_parser.get('xs/tddft') is not None:
sec_method.x_exciting_xs_tddft_analytic_continuation = self.input_xml_parser.get(
'xs/tddft/acont', False)
sec_method.x_exciting_xs_tddft_anomalous_Hall_conductivity = self.input_xml_parser.get(
'xs/tddft/ahc', False)
sec_method.x_exciting_xs_tddft_anti_resonant_dielectric = self.input_xml_parser.get(
'xs/tddft/aresdf', False)
sec_method.x_exciting_xs_tddft_anti_resonant_xc_kernel = self.input_xml_parser.get(
'xs/tddft/aresfxc', True)
sec_method.x_exciting_xs_tddft_drude = self.input_xml_parser.get(
'xs/tddft/drude', [0., 0.])
sec_method.x_exciting_xs_tddft_split_parameter = self.input_xml_parser.get(
'xs/tddft/fxcbsesplit', 0.00001, 'hartree')
sec_method.x_exciting_xs_tddft_xc_kernel = self.input_xml_parser.get(
'xs/tddft/fxctype', 'RPA')
sec_method.x_exciting_xs_tddft_finite_q_intraband_contribution = self.input_xml_parser.get(
'xs/tddft/intraband', False)
sec_method.x_exciting_xs_tddft_diagonal_xc_kernel = self.input_xml_parser.get(
'xs/tddft/kerndiag', False)
sec_method.x_exciting_xs_tddft_lmax_alda = self.input_xml_parser.get(
'xs/tddft/lmaxalda', 3)
sec_method.x_exciting_xs_tddft_macroscopic_dielectric_function_q_treatment = self.input_xml_parser.get(
'xs/tddft/mdfqtype', 0)
sec_method.x_exciting_xs_tddft_analytic_continuation_number_of_intervals = self.input_xml_parser.get(
'xs/tddft/nwacont', 0)
sec_method.x_exciting_xs_tetra = self.input_xml_parser.get(
'xs/tetra/tetradf', False)
def _parse_xs_bse(self):
sec_run = self.archive.run[-1]
# TODO read from xml file
def get_files(name):
bse_types = ['IP', 'singlet', 'triplet', 'RPA']
scr_types = ['full', 'diag', 'noinvdiag', 'longrange']
bse_files = []
for bse_type in bse_types:
for scr_type in scr_types:
files = self.get_exciting_files(
'%s_BSE%s_SCR%s.OUT' % (name, bse_type, scr_type))
bse_files.append(files)
return bse_files
def get_data(files):
data = []
for f in files:
self.data_xs_parser.mainfile = f
if self.data_xs_parser.data is None:
continue
data.append(self.data_xs_parser.data)
return data
def parse_exciton(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
sec_scc.x_exciting_xs_bse_number_of_components = n_components
n_excitons = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_number_of_excitons = n_excitons
sec_scc.x_exciting_xs_bse_exciton_energies = np.reshape(
data[1], (n_components, n_excitons)) * ureg.hartree
sec_scc.x_exciting_xs_bse_exciton_binding_energies = np.reshape(
data[2], (n_components, n_excitons)) * ureg.hartree
sec_scc.x_exciting_xs_bse_exciton_oscillator_strength = np.reshape(
data[3], (n_components, n_excitons))
sec_scc.x_exciting_xs_bse_exciton_amplitude_re = np.reshape(
data[4], (n_components, n_excitons))
sec_scc.x_exciting_xs_bse_exciton_amplitude_im = np.reshape(
data[5], (n_components, n_excitons))
def parse_epsilon(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_epsilon = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_number_of_energy_points = n_epsilon
sec_scc.x_exciting_xs_bse_epsilon_energies = np.reshape(
data[0], (n_components, n_epsilon)) * ureg.hartree
sec_scc.x_exciting_xs_bse_epsilon_re = np.reshape(
data[1], (n_components, n_epsilon))
sec_scc.x_exciting_xs_bse_epsilon_im = np.reshape(
data[2], (n_components, n_epsilon))
def parse_sigma(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_sigma = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_sigma_energies = np.reshape(
data[0], (n_components, n_sigma)) * ureg.hartree
sec_scc.x_exciting_xs_bse_sigma_re = np.reshape(
data[1], (n_components, n_sigma))
sec_scc.x_exciting_xs_bse_sigma_im = np.reshape(
data[2], (n_components, n_sigma))
def parse_loss(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_loss = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_loss_energies = np.reshape(
data[0], (n_components, n_loss)) * ureg.hartree
sec_scc.x_exciting_xs_bse_loss = np.reshape(
data[1], (n_components, n_loss))
# TODO check if format of files are really correct, i.e. columns are supposed
# to be what they are. What is the fourth column in epsilon which is not parsed?
sccs = []
for quantity in ['EXCITON', 'EPSILON', 'SIGMA', 'LOSS']:
files = get_files(quantity)
for i in range(len(files)):
data = get_data(files[i])
if not data:
sccs.append(None)
continue
if quantity == 'EXCITON':
sec_scc = sec_run.m_create(Calculation)
sccs.append(sec_scc)
else:
sec_scc = sccs[i]
if sec_scc is None:
# This is the case when there is a mismatch between files
self.logger.warn(
'Mismatch in EXCITON and file type', data=dict(file=quantity))
sec_scc = sec_run.m_create(Calculation)
if quantity == 'EXCITON':
parse_function = parse_exciton
elif quantity == 'EPSILON':
parse_function = parse_epsilon
elif quantity == 'SIGMA':
parse_function = parse_sigma
elif quantity == 'LOSS':
parse_function = parse_loss
else:
continue
try:
parse_function(data, sec_scc)
except Exception:
self.logger.error('Error setting xs data', data=dict(file=quantity))
def _parse_xs_tddft(self):
sec_run = self.archive.run[-1]
fxctype = self.input_xml_parser.get('xs/tddft/fxctype', 'RPA')
tetradf = self.input_xml_parser.get('xs/tetra/tetradf', None)
nwacont = self.input_xml_parser.get('xs/tddft/nwacont', None)
aresdf = self.input_xml_parser.get('xs/tddft/aresdf', True)
file_ext_list = [
'TET' if tetradf else None, 'AC' if nwacont else None, 'NAR' if not aresdf else None]
file_ext = '_'.join([e for e in file_ext_list if e])
# read q points
qpoints = self.input_xml_parser.get('xs/qpointset/qpoint')
def get_data(quantity, ext):
# all files related to quantity at all qpoints
files = self.get_exciting_files('%s_%s%s%s.OUT' % (quantity, file_ext, ext, fxctype))
data = [[], [], []]
for i in range(len(qpoints)):
data_q = []
files_q = [f for f in files if f.endswith('QMT%s.OUT' % str(i + 1).rjust(3, '0'))]
for f in files_q:
self.data_xs_parser.mainfile = f
if self.data_xs_parser.data is None:
continue
data_q.append(self.data_xs_parser.data)
if not data_q:
continue
data_q = np.transpose(data_q, axes=(2, 0, 1))
for j in range(len(data)):
data[j].append(data_q[j])
return data
for quantity in ['EPSILON', 'LOSS', 'SIGMA']:
for ext in ['FXC', 'NLF_FXC']:
data = get_data(quantity, ext)
if not data[0]:
continue
if quantity == 'EPSILON' and ext == 'FXC':
sec_scc = sec_run.m_create(Calculation)
sec_scc.x_exciting_xs_tddft_number_of_epsilon_values = len(data[0][0][0])
sec_scc.x_exciting_xs_tddft_epsilon_energies = data[0][0][0] * ureg.hartree
sec_scc.x_exciting_xs_tddft_dielectric_function_local_field = data[1:]
elif quantity == 'EPSILON' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_dielectric_function_no_local_field = data[1:3]
elif quantity == 'LOSS' and ext == 'FXC':
sec_scc.x_exciting_xs_tddft_loss_function_local_field = data[1]
elif quantity == 'LOSS' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_loss_function_no_local_field = data[1]
elif quantity == 'SIGMA' and ext == 'FXC':
sec_scc.x_exciting_xs_tddft_sigma_local_field = data[1:3]
elif quantity == 'SIGMA' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_sigma_no_local_field = data[1:3]
def parse_xs(self):
sec_run = self.archive.run[-1]
xs_info_files = self.get_exciting_files('INFOXS.OUT')
if not xs_info_files:
return
self._calculation_type = 'xs'
# inconsistency in the naming convention for xs input xml file
sec_method = sec_run.m_create(Method)
sec_method_ref = self.archive.run[-1].method[0]
sec_method.starting_method_ref = sec_method_ref
sec_method.methods_ref = [sec_method_ref]
self.parse_file('input.xml', sec_method)
# parse properties
input_file = self.get_exciting_files('input.xml')
if not input_file:
return
self.input_xml_parser.mainfile = input_file[0]
xstype = self.input_xml_parser.get('xs/xstype', '')
if xstype.lower() == 'bse':
self._parse_xs_bse()
elif xstype.lower() == 'tddft':
self._parse_xs_tddft()
def _parse_input_gw(self, sec_method):
sec_gw = sec_method.m_create(GWMethod)
sec_gw.type = 'G0W0'
gmaxvr = self.info_parser.get_initialization_parameter('x_exciting_gmaxvr', 0)
sec_gw.core_treatment = self.input_xml_parser.get(
'gw/coreflag', 'all')
sec_gw.polarizability_number_of_empty_states = int(
self.input_xml_parser.get('gw/nempty', 0))
sec_gw.ngridq = self.input_xml_parser.get('gw/ngridq', [1, 1, 1])
sec_gw.basis_set = 'mixed'
sec_gw.qp_equation_treatment = 'linearization'
sec_gw.max_frequency = self.input_xml_parser.get(
'gw/freqgrid/freqmax', 1.0)
sec_gw.frequency_grid_type = self.input_xml_parser.get(
'gw/freqgrid/fgrid', 'gaule2')
sec_gw.number_of_frequencies = int(self.input_xml_parser.get(
'gw/freqgrid/nomeg', 16))
sec_gw.self_energy_c_number_of_poles = int(self.input_xml_parser.get(
'gw/selfenergy/npol', 0))
sec_gw.self_energy_c_number_of_empty_states = int(self.input_xml_parser.get(
'gw/selfenergy/nempty', 0))
sec_gw.self_energy_singularity_treatment = self.input_xml_parser.get(
'gw/selfenergy/singularity', 'mpd')
sec_gw.self_energy_c_analytical_continuation = self.input_xml_parser.get(
'gw/selfenergy/actype', 'pade')
sec_gw.mixed_basis_lmax = int(self.input_xml_parser.get(
'gw/mixbasis/lmaxmb', 3))
sec_gw.mixed_basis_tolerance = self.input_xml_parser.get(
'gw/mixbasis/epsmb', 0.0001)
gmb = self.input_xml_parser.get('gw/mixbasis/gmb', 1.0)
sec_gw.mixed_basis_gmax = gmb * gmaxvr
pwm = self.input_xml_parser.get('gw/barecoul/pwm', 2.0)
sec_gw.bare_coulomb_gmax = pwm * gmb * gmaxvr
sec_gw.bare_coulomb_cutofftype = self.input_xml_parser.get(
'gw/barecoul/cutofftype', 'none')
sec_gw.screened_coulomb_volume_average = self.input_xml_parser.get(
'gw/scrcoul/sciavtype', 'isotropic')
sec_gw.screened_Coulomb = self.input_xml_parser.get(
'gw/scrcoul/scrtype', 'rpa')
def parse_gw(self):
sec_run = self.archive.run[-1]
# two versions of gw info files
gw_info_files = ['GW_INFO.OUT', 'GWINFO.OUT']
for f in gw_info_files:
if self.get_exciting_files(f):
self._calculation_type = 'gw'
gw_info_file = f
break
if not self._calculation_type == 'gw':
return
sec_method = sec_run.m_create(Method)
sec_method_ref = self.archive.run[-1].method[0]
sec_method.starting_method_ref = sec_method_ref
sec_method.methods_ref = [sec_method_ref]
# parse input xml file, there seems to be two versions, input_gw.xml and input-gw.xml
for f in ['input_gw.xml', 'input-gw.xml', 'input.xml']:
self.parse_file(f, sec_method)
xc_functional_name = ' '.join(self.info_parser.get_xc_functional_name())
sec_method.gw.starting_point = xc_functional_name
sec_scc = sec_run.m_create(Calculation)
sec_scc.method_ref = sec_method
if sec_run.system:
sec_scc.system_ref = sec_run.system[-1]
sec_scc_ref = sec_run.calculation[0]
sec_scc.starting_calculation_ref = sec_scc_ref
sec_scc.calculations_ref = [sec_scc_ref]
# parse properties
gw_info_files = self.get_exciting_files(gw_info_file)
if len(gw_info_files) > 1:
self.logger.warn('Found multiple GW info files, will read only first!')
self.info_gw_parser.mainfile = gw_info_files[0]
fermi_energy = self.info_gw_parser.get('fermi_energy', None)
if fermi_energy is not None:
sec_scc.energy = Energy(fermi=fermi_energy)
gw_files = ['EVALQP.DAT', 'EVALQP.TXT', 'TDOS-QP.OUT']
# Parse GW band structure from one of the files:
bs_files = ['bandstructure-qp.dat', 'BAND-QP.OUT']
for fname in bs_files:
if self.file_exists(fname):
gw_files.append(fname)
break
for f in gw_files:
self.parse_file(f, sec_scc)
frequency_data = self.info_gw_parser.get('frequency_data', None)
if frequency_data is not None:
number = frequency_data.get('number')
sec_method.gw.number_of_frequencies = len(number)
sec_method.gw.frequency_number = number
sec_method.gw.frequency_values = frequency_data.get('values')
sec_method.gw.frequency_weights = frequency_data.get('weights')
fundamental_band_gap = self.info_gw_parser.get('direct_band_gap', None)
if fundamental_band_gap is None:
fundamental_band_gap = self.info_gw_parser.get('fundamental_band_gap', None)
sec_gap = sec_scc.eigenvalues[-1].m_create(BandGap)
if fundamental_band_gap is not None:
sec_gap.value_fundamental = fundamental_band_gap
optical_band_gap = self.info_gw_parser.get('optical_band_gap', None)
if optical_band_gap is not None:
sec_gap.value_optical = optical_band_gap
def parse_miscellaneous(self):
sec_worfklow = self.archive.m_create(Workflow)
sec_worfklow.type = 'single_point'
structure_optimization = self.info_parser.get('structure_optimization')
if structure_optimization is not None:
sec_worfklow.type = 'geometry_optimization'
sec_geometry_opt = sec_worfklow.m_create(GeometryOptimization)
threshold_force = structure_optimization.get(
'optimization_step', [{}])[0].get('force_convergence', [0., 0.])[-1]
sec_geometry_opt.input_force_maximum_tolerance = threshold_force
def parse_method(self):
sec_run = self.archive.run[-1]
sec_method = sec_run.m_create(Method)
sec_method.basis_set.append(BasisSet(type='(L)APW+lo'))
sec_dft = sec_method.m_create(DFT)
sec_electronic = sec_method.m_create(Electronic)
sec_electronic.method = 'DFT'
smearing_kind_map = {
'Gaussian': 'gaussian', 'Methfessel-Paxton': 'methfessel-paxton',
'Fermi-Dirac': 'fermi', 'Extended': 'tetrahedra'}
sec_smearing = sec_electronic.m_create(Smearing)
smearing_kind = self.info_parser.get_initialization_parameter('smearing_kind')
if smearing_kind is not None:
if not isinstance(smearing_kind, str):
smearing_kind = smearing_kind[0]
smearing_kind = smearing_kind_map[smearing_kind]
sec_smearing.kind = smearing_kind
smearing_width = self.info_parser.get_initialization_parameter('smearing_width')
if smearing_width is not None:
smearing_width = (smearing_width * ureg.hartree).to('joule')
# TODO smearing with should have units of energy
sec_smearing.width = smearing_width.magnitude
for name in self.info_parser._convergence_keys_mapping.keys():
threshold = self.info_parser.get_scf_threshold(name)
if threshold is None:
continue
metainfo_name = 'x_exciting_scf_threshold_%s_change' % name.split('_')[-2]
setattr(sec_method, metainfo_name, threshold)
# additionally, set threshold to global metainfo. This is killing me!
if metainfo_name == 'x_exciting_scf_threshold_energy_change':
sec_method.scf = Scf(threshold_energy_change=threshold)
xc_functional_names = self.info_parser.get_xc_functional_name()
if not xc_functional_names:
# get it from input.xml
input_file = self.get_exciting_files('input.xml')
for f in input_file:
self.input_xml_parser.mainfile = f
correlation = self.input_xml_parser.get('libxc/correlation', None)
xc_functional_names.append(correlation)
exchange = self.input_xml_parser.get('libxc/exchange', None)
xc_functional_names.append(exchange)
sec_xc_functional = sec_dft.m_create(XCFunctional)
for name in xc_functional_names:
if name is None:
continue
if '_X_' in name:
sec_xc_functional.exchange.append(Functional(name=name))
elif '_C_' in name:
sec_xc_functional.correlation.append(Functional(name=name))
elif 'HYB' in name:
sec_xc_functional.hybrid.append(Functional(name=name))
else:
sec_xc_functional.contributions.append(Functional(name=name))
if not xc_functional_names:
# simply write parameters
xc_functional = self.info_parser.get('initialization', {}).get('xc_functional')
if xc_functional is not None:
sec_xc_functional.name = xc_functional.get('name_reference', [None, None])[0]
sec_xc_functional.reference = xc_functional.get('name_reference', [None, None])[1]
sec_electronic.n_spin_channels = self.info_parser.get_number_of_spin_channels()
if self._calculation_type == 'volume_optimization':
sec_method.x_exciting_volume_optimization = True
def parse_scc(self, section):
sec_run = self.archive.run[-1]
final = section if section.get('energy_total') is not None else section.get('final')
if final is None:
# get it from last scf_iteration or optimization_step
final = section.get('scf_iteration', [None])[-1]
final = section.get('optimization_step', [None])[-1] if final is None else final
if final is None:
return
sec_scc = sec_run.m_create(Calculation)
def parse_scf(iteration, msection):
energy_total = iteration.get('energy_total')
sec_energy = msection.m_create(Energy)
if energy_total is not None:
sec_energy.total = EnergyEntry(value=energy_total)
x_exciting_dos_fermi = iteration.get('x_exciting_dos_fermi')
if x_exciting_dos_fermi is not None:
setattr(msection, 'x_exciting_dos_fermi', x_exciting_dos_fermi)
# energy contributions
energy_contributions = iteration.get('energy_contributions', {})
for key, names in self._energy_keys_mapping.items():
val = None
for name in names:
val = energy_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key.startswith('energy_'):
sec_energy.m_add_sub_section(getattr(
Energy, key.replace('energy_', '')), EnergyEntry(value=val))
else:
setattr(msection, key, val)
if key == 'x_exciting_fermi_energy':
sec_energy.fermi = val
# charge contributions
charge_contributions = iteration.get('charge_contributions', {})
for key, names in self._electron_charge_keys_mapping.items():
val = None
for name in names:
val = charge_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key == 'x_exciting_section_MT_charge_atom':
for n in range(len(val)):
sec_mt_charge_atom = msection.m_create(x_exciting_section_MT_charge_atom)
sec_mt_charge_atom.x_exciting_MT_charge_atom_index = n + 1
sec_mt_charge_atom.x_exciting_MT_charge_atom_symbol = val[n][0]
sec_mt_charge_atom.x_exciting_MT_charge_atom_value = val[n][1]
sec_charges = msection.m_create(Charges)
sec_charges.value = [
val[n][1].magnitude for n in range(len(val))] * val[0][1].units
sec_charges.total = charge_contributions.get('total charge')
elif key == 'charge_total':
pass
else:
setattr(msection, key, val)
# moment contributions
moment_contributions = iteration.get('moment_contributions', {})
for key, names in self._moment_keys_mapping.items():
val = None
for name in names:
val = moment_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key == 'x_exciting_section_MT_moment_atom':
for n in range(len(val)):
sec_mt_moment_atom = msection.m_create(x_exciting_section_MT_moment_atom)
sec_mt_moment_atom.x_exciting_MT_moment_atom_index = n + 1
sec_mt_moment_atom.x_exciting_MT_moment_atom_symbol = val[n][0]
sec_mt_moment_atom.x_exciting_MT_moment_atom_value = val[n][1]
else:
setattr(msection, key, val)
# convergence values
for name in self.info_parser._convergence_keys_mapping.keys():
val = iteration.get(name)
if val is None:
continue
setattr(msection, name, val)
# other metainfo
for name in self.info_parser._miscellaneous_keys_mapping.keys():
val = iteration.get(name)
if val is None:
continue
if name == 'time':
msection.time_calculation = val
else:
setattr(msection, name, val)
# energy, moment, charge contributions
parse_scf(final, sec_scc)
# forces
forces = section.get('forces')
if forces is not None:
sec_forces = sec_scc.m_create(Forces)
sec_forces.total = ForcesEntry(value=forces)
# scf iterations
scf_iterations = section.get('scf_iteration', [])
for scf_iteration in scf_iterations:
sec_scf_iteration = sec_scc.m_create(ScfIteration)
parse_scf(scf_iteration, sec_scf_iteration)
return sec_scc
def parse_system(self, section):
sec_run = self.archive.run[-1]
positions = self.info_parser.get_atom_positions(section.get('atomic_positions', {}))
lattice_vectors = self.info_parser.get_initialization_parameter('lattice_vectors')
atom_labels = self.info_parser.get_atom_labels(section.get('atomic_positions', {}))
input_file = self.get_exciting_files('input.xml')
if positions is None:
# get it from input.xml
for f in input_file:
self.input_xml_parser.mainfile = f
positions = self.input_xml_parser.get('structure/species/atom/coord')
lattice_vectors = self.input_xml_parser.get(
'structure/crystal/basevect', np.eye(3))
species = self.input_xml_parser.get('structure/species/speciesfile')
if positions is None or lattice_vectors is None or species is None:
continue
lattice_vectors = np.array(lattice_vectors, dtype=float)
lattice_vectors *= self.input_xml_parser.get('structure/crystal/scale', 1.0)
positions = np.dot(positions, lattice_vectors) * ureg.bohr
lattice_vectors = lattice_vectors * ureg.bohr
atoms = self.input_xml_parser.get('structure/species/atom')
atom_labels = []
for n in range(len(atoms)):
atom_labels.extend([species[n].split('.')[0]] * len(atoms[n]))
if positions is None or atom_labels is None:
return
sec_system = sec_run.m_create(System)
sec_atoms = sec_system.m_create(Atoms)
sec_atoms.positions = positions
sec_atoms.labels = atom_labels
sec_atoms.periodic = [True] * 3
# TODO confirm no cell optimization in exciting
sec_atoms.lattice_vectors = lattice_vectors
lattice_vectors_reciprocal = self.info_parser.get_initialization_parameter(
'lattice_vectors_reciprocal')
sec_atoms.lattice_vectors_reciprocal = lattice_vectors_reciprocal
if len(sec_run.system) > 1:
return sec_system
for name in self.info_parser._system_keys_mapping.keys():
val = self.info_parser.get_initialization_parameter(name)
if val is None:
continue
if name == 'x_exciting_spin_treatment':
sub_sec = sec_system.m_create(x_exciting_section_spin)
sub_sec.x_exciting_spin_treatment = val
elif name == 'x_exciting_species_rtmin':
setattr(sec_system, name, ' '.join([str(v) for v in val]))
else:
try:
setattr(sec_system, name, val)
except Exception:
self.logger.warn('Error setting metainfo.')
# species
species = self.info_parser.get_initialization_parameter('species', [])
for specie in species:
sec_atoms_group = sec_system.m_create(x_exciting_section_atoms_group)
sec_atoms_group.x_exciting_geometry_atom_labels = specie.get('symbol')
sec_atoms_group.x_exciting_geometry_atom_number = str(specie.get('number'))
sec_atoms_group.x_exciting_muffin_tin_points = specie.get('radial_points')
sec_atoms_group.x_exciting_muffin_tin_radius = specie.get('muffin_tin_radius')
positions_format = specie.get('positions_format')
sec_atoms_group.x_exciting_atom_position_format = positions_format
positions = specie.get('positions')
positions = self.info_parser.get_atom_positions(
positions=positions, positions_format=positions_format).to('m')
sec_atoms_group.x_exciting_geometry_atom_positions = positions.magnitude
# clathrate info
clathrate_file = self.get_exciting_files('str.out')
if clathrate_file:
sec_system.x_exciting_clathrates = True
self.data_clathrate_parser.mainfile = clathrate_file[0]
if self.data_clathrate_parser.data:
data = np.transpose(self.data_clathrate_parser.data)
sec_system.x_exciting_clathrates_atom_coordinates = np.transpose(
np.array(data[:3], dtype=float))
sec_system.x_exciting_clathrates_atom_labels = list(data[3])
else:
sec_system.x_exciting_clathrates = False
potential_mixing = self.info_parser.get_initialization_parameter('potential_mixing')
if potential_mixing is not None:
sec_system.x_exciting_potential_mixing = potential_mixing
return sec_system
def parse_configurations(self):
sec_run = self.archive.run[-1]
def parse_configuration(section):
if not section:
return
sec_scc = self.parse_scc(section)
if sec_scc is None:
return
sec_system = self.parse_system(section)
if sec_system is not None:
sec_scc.system_ref = sec_system
sec_scc.method_ref = sec_run.method[-1]
return sec_scc
# groundstate and hybrids calculation
for module in ['groundstate', 'hybrids']:
sec_scc = parse_configuration(self.info_parser.get(module))
if sec_scc is None:
continue
# add data to scc
# TODO add support for more output files and properties
exciting_files = ['EIGVAL.OUT', 'FERMISURF.bxsf', 'FS.bxsf']
# Parse DFT DOS from one of the files
bs_files = ['dos.xml', 'TDOS.OUT']
for fname in bs_files:
if self.file_exists(fname):
exciting_files.append(fname)
break
# Parse DFT band structure from one of the files
bs_files = ['bandstructure.xml', 'BAND.OUT', 'bandstructure.dat']
for fname in bs_files:
if self.file_exists(fname):
exciting_files.append(fname)
break
for f in exciting_files:
self.parse_file(f, sec_scc)
# structure optimization
structure_optimization = self.info_parser.get('structure_optimization', {})
for optimization_step in structure_optimization.get('optimization_step', []):
sec_scc = parse_configuration(optimization_step)
if optimization_step.get('method') is not None:
sec_scc.x_exciting_geometry_optimization_method = optimization_step.get('method')
if optimization_step.get('step') is not None:
sec_scc.x_exciting_geometry_optimization_step = optimization_step.get('step')
force_convergence = optimization_step.get('force_convergence')
if force_convergence is not None:
sec_scc.x_exciting_maximum_force_magnitude = force_convergence[0]
sec_scc.x_exciting_geometry_optimization_threshold_force = force_convergence[1]
sec_scc = parse_configuration(structure_optimization)
if sec_scc is None:
return
# volume optimizations
volume_index = 1
while True:
info_volume = self.get_exciting_files('run_dir%s/INFO.OUT' % str(volume_index).rjust(2, '0'))
if not info_volume:
break
sec_scc.calculations_path.append(info_volume[0])
def init_parser(self):
self.info_parser.mainfile = self.filepath
self.info_parser.logger = self.logger
self.dos_parser.logger = self.logger
self.bandstructure_parser.logger = self.logger
self.eigval_parser.logger = self.logger
self.fermisurf_parser.logger = self.logger
self.evalqp_parser.logger = self.logger
self.dos_out_parser.logger = self.logger
self.bandstructure_dat_parser.logger = self.logger
self.band_out_parser.logger = self.logger
self.info_gw_parser.logger = self.logger
self.input_xml_parser.logger = self.logger
self.data_xs_parser.logger = self.logger
self.data_clathrate_parser.logger = self.logger
def reuse_parser(self, parser):
self.info_parser.quantities = parser.info_parser.quantities
self.eigval_parser.quantities = parser.eigval_parser.quantities
self.fermisurf_parser.quantities = parser.fermisurf_parser.quantities
self.evalqp_parser.quantities = parser.evalqp_parser.quantities
self.info_gw_parser.quantities = parser.info_gw_parser.quantities
def parse(self, filepath, archive, logger):
self.filepath = filepath
self.archive = archive
self.logger = logger if logger is not None else logging
self._calculation_type = None
self.init_parser()
sec_run = self.archive.m_create(Run)
sec_run.program = Program(
name='exciting', version=self.info_parser.get('program_version', '').strip())
# method goes first since reference needed for sec_scc
self.parse_method()
self.parse_configurations()
self.parse_gw()
self.parse_xs()
self.parse_miscellaneous()
|
[
"numpy.product",
"nomad.datamodel.metainfo.simulation.method.Scf",
"re.compile",
"numpy.array",
"nomad.parsing.file_parser.XMLParser",
"nomad.parsing.file_parser.TextParser",
"re.search",
"os.listdir",
"numpy.reshape",
"nomad.datamodel.metainfo.simulation.method.Functional",
"numpy.where",
"nomad.datamodel.metainfo.simulation.method.BasisSet",
"numpy.dot",
"numpy.linspace",
"numpy.vstack",
"numpy.dtype",
"numpy.eye",
"numpy.size",
"os.access",
"os.path.isfile",
"nomad.datamodel.metainfo.simulation.calculation.Energy",
"nomad.parsing.file_parser.Quantity",
"numpy.transpose",
"nomad.datamodel.metainfo.simulation.calculation.ForcesEntry",
"os.path.join",
"nomad.datamodel.metainfo.simulation.calculation.EnergyEntry",
"numpy.zeros",
"os.path.basename",
"nomad.parsing.file_parser.DataTextParser"
] |
[((22188, 22215), 're.compile', 're.compile', (['"""([A-Z][a-z]?)"""'], {}), "('([A-Z][a-z]?)')\n", (22198, 22215), False, 'import re\n'), ((39102, 39121), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (39110, 39121), True, 'import numpy as np\n'), ((42197, 42213), 'nomad.parsing.file_parser.DataTextParser', 'DataTextParser', ([], {}), '()\n', (42211, 42213), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((42451, 42462), 'nomad.parsing.file_parser.XMLParser', 'XMLParser', ([], {}), '()\n', (42460, 42462), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((42493, 42509), 'nomad.parsing.file_parser.DataTextParser', 'DataTextParser', ([], {}), '()\n', (42507, 42509), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((42547, 42572), 'nomad.parsing.file_parser.DataTextParser', 'DataTextParser', ([], {'dtype': 'str'}), '(dtype=str)\n', (42561, 42572), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((45150, 45193), 'os.path.basename', 'os.path.basename', (['self.info_parser.mainfile'], {}), '(self.info_parser.mainfile)\n', (45166, 45193), False, 'import os\n'), ((45423, 45471), 'os.path.join', 'os.path.join', (['self.info_parser.maindir', 'filename'], {}), '(self.info_parser.maindir, filename)\n', (45435, 45471), False, 'import os\n'), ((45484, 45508), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (45498, 45508), False, 'import os\n'), ((45560, 45607), 'os.path.join', 'os.path.join', (['self.info_parser.maindir', 'default'], {}), '(self.info_parser.maindir, default)\n', (45572, 45607), False, 'import os\n'), ((46437, 46480), 'os.path.basename', 'os.path.basename', (['self.info_parser.mainfile'], {}), '(self.info_parser.mainfile)\n', (46453, 46480), False, 'import os\n'), ((46711, 46759), 'os.path.join', 'os.path.join', (['self.info_parser.maindir', 'filepath'], {}), '(self.info_parser.maindir, filepath)\n', (46723, 46759), False, 'import os\n'), ((54545, 54579), 'numpy.transpose', 'np.transpose', (['data'], {'axes': '(2, 0, 1)'}), '(data, axes=(2, 0, 1))\n', (54557, 54579), True, 'import numpy as np\n'), ((2249, 2409), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""frequency_data"""', '"""frequency list:\\\\s*\\\\<\\\\s*#\\\\s*freqs\\\\s*weight\\\\s*>\\\\s*([\\\\d\\\\.Ee\\\\s\\\\-]+)"""'], {'str_operation': 'str_to_frequency', 'repeats': '(False)'}), "('frequency_data',\n 'frequency list:\\\\s*\\\\<\\\\s*#\\\\s*freqs\\\\s*weight\\\\s*>\\\\s*([\\\\d\\\\.Ee\\\\s\\\\-]+)'\n , str_operation=str_to_frequency, repeats=False)\n", (2257, 2409), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((2480, 2632), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""fermi_energy"""', '"""\\\\-\\\\s*G0W0.+\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Fermi [Ee]nergy\\\\s*[:=](\\\\s*-?[\\\\d\\\\.]+)\\\\s"""'], {'unit': 'ureg.hartree', 'repeats': '(False)'}), "('fermi_energy',\n '\\\\-\\\\s*G0W0.+\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Fermi [Ee]nergy\\\\s*[:=](\\\\s*-?[\\\\d\\\\.]+)\\\\s'\n , unit=ureg.hartree, repeats=False)\n", (2488, 2632), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((2701, 2861), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""direct_band_gap"""', '"""\\\\-\\\\s*G0W0\\\\s*\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Direct BandGap\\\\s*\\\\((?P<__unit>\\\\w+)\\\\)\\\\s*\\\\:(\\\\s*[\\\\d\\\\.]+)\\\\s"""'], {'repeats': '(False)'}), "('direct_band_gap',\n '\\\\-\\\\s*G0W0\\\\s*\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Direct BandGap\\\\s*\\\\((?P<__unit>\\\\w+)\\\\)\\\\s*\\\\:(\\\\s*[\\\\d\\\\.]+)\\\\s'\n , repeats=False)\n", (2709, 2861), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((2924, 3094), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""fundamental_band_gap"""', '"""\\\\-\\\\s*G0W0\\\\s*\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Fundamental BandGap\\\\s*\\\\((?P<__unit>\\\\w+)\\\\)\\\\s*\\\\:(\\\\s*[\\\\d\\\\.]+)\\\\s"""'], {'repeats': '(False)'}), "('fundamental_band_gap',\n '\\\\-\\\\s*G0W0\\\\s*\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Fundamental BandGap\\\\s*\\\\((?P<__unit>\\\\w+)\\\\)\\\\s*\\\\:(\\\\s*[\\\\d\\\\.]+)\\\\s'\n , repeats=False)\n", (2932, 3094), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((3157, 3319), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""optical_band_gap"""', '"""\\\\-\\\\s*G0W0\\\\s*\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Optical BandGap\\\\s*\\\\((?P<__unit>\\\\w+)\\\\)\\\\s*\\\\:(\\\\s*[\\\\d\\\\.]+)\\\\s"""'], {'repeats': '(False)'}), "('optical_band_gap',\n '\\\\-\\\\s*G0W0\\\\s*\\\\-\\\\s*\\\\-+\\\\s*[\\\\s\\\\S]*?Optical BandGap\\\\s*\\\\((?P<__unit>\\\\w+)\\\\)\\\\s*\\\\:(\\\\s*[\\\\d\\\\.]+)\\\\s'\n , repeats=False)\n", (3165, 3319), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((3898, 4073), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""kpoints_eigenvalues"""', '"""\\\\s*k\\\\-point \\\\#\\\\s*\\\\d+:\\\\s*([\\\\d\\\\s\\\\.\\\\-]+)([ \\\\w\\\\(\\\\)]+\\\\n)([\\\\s\\\\d\\\\.\\\\-Ee]+)"""'], {'str_operation': 'str_to_eigenvalue', 'repeats': '(True)'}), "('kpoints_eigenvalues',\n '\\\\s*k\\\\-point \\\\#\\\\s*\\\\d+:\\\\s*([\\\\d\\\\s\\\\.\\\\-]+)([ \\\\w\\\\(\\\\)]+\\\\n)([\\\\s\\\\d\\\\.\\\\-Ee]+)'\n , str_operation=str_to_eigenvalue, repeats=True)\n", (3906, 4073), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((4792, 4815), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (4804, 4815), True, 'import numpy as np\n'), ((4907, 5013), 'numpy.reshape', 'np.reshape', (['bands', '(self.number_of_spin_channels, self.number_of_band_segment_eigenvalues,\n n_kpoints)'], {}), '(bands, (self.number_of_spin_channels, self.\n number_of_band_segment_eigenvalues, n_kpoints))\n', (4917, 5013), True, 'import numpy as np\n'), ((5617, 5640), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (5629, 5640), True, 'import numpy as np\n'), ((6073, 6096), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (6085, 6096), True, 'import numpy as np\n'), ((6985, 7008), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (6997, 7008), True, 'import numpy as np\n'), ((7610, 7633), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (7622, 7633), True, 'import numpy as np\n'), ((7745, 7851), 'numpy.reshape', 'np.reshape', (['bands', '(self.number_of_spin_channels, self.number_of_band_segment_eigenvalues,\n n_kpoints)'], {}), '(bands, (self.number_of_spin_channels, self.\n number_of_band_segment_eigenvalues, n_kpoints))\n', (7755, 7851), True, 'import numpy as np\n'), ((10547, 10585), 'numpy.array', 'np.array', (['self._distances'], {'dtype': 'float'}), '(self._distances, dtype=float)\n', (10555, 10585), True, 'import numpy as np\n'), ((18088, 18148), 'numpy.zeros', 'np.zeros', (['(self.number_of_spin_channels, self.number_of_dos)'], {}), '((self.number_of_spin_channels, self.number_of_dos))\n', (18096, 18148), True, 'import numpy as np\n'), ((19927, 20026), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""fermi_energy"""', '"""Fermi Energy:\\\\s*([\\\\d\\\\.]+)\\\\s*"""'], {'unit': 'ureg.hartree', 'repeats': '(False)'}), "('fermi_energy', 'Fermi Energy:\\\\s*([\\\\d\\\\.]+)\\\\s*', unit=ureg.\n hartree, repeats=False)\n", (19935, 20026), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((20446, 20574), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""band_parameters"""', '"""BANDGRID_3D_BANDS\\\\s*([\\\\d\\\\.\\\\-Ee\\\\s]+)"""'], {'str_operation': 'str_to_band_parameters', 'repeats': '(False)'}), "('band_parameters', 'BANDGRID_3D_BANDS\\\\s*([\\\\d\\\\.\\\\-Ee\\\\s]+)',\n str_operation=str_to_band_parameters, repeats=False)\n", (20454, 20574), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((20647, 20759), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""fermi_surface"""', '"""BAND:\\\\s*\\\\d+\\\\s*([\\\\d\\\\-\\\\+\\\\.Ee\\\\s]+)\\\\n *E*"""'], {'unit': 'ureg.hartree', 'repeats': '(True)'}), "('fermi_surface', 'BAND:\\\\s*\\\\d+\\\\s*([\\\\d\\\\-\\\\+\\\\.Ee\\\\s]+)\\\\n *E*',\n unit=ureg.hartree, repeats=True)\n", (20655, 20759), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((20990, 21076), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""k_points"""', '"""\\\\s*\\\\d+\\\\s*([\\\\d\\\\.Ee\\\\- ]+):\\\\s*k\\\\-point"""'], {'repeats': '(True)'}), "('k_points', '\\\\s*\\\\d+\\\\s*([\\\\d\\\\.Ee\\\\- ]+):\\\\s*k\\\\-point', repeats\n =True)\n", (20998, 21076), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((21277, 21294), 'numpy.transpose', 'np.transpose', (['val'], {}), '(val)\n', (21289, 21294), True, 'import numpy as np\n'), ((21652, 21829), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""eigenvalues_occupancies"""', '"""\\\\(state\\\\, eigenvalue and occupancy below\\\\)\\\\s*([\\\\d\\\\.Ee\\\\-\\\\s]+?(?:\\\\n *\\\\n))"""'], {'str_operation': 'str_to_eigenvalues', 'repeats': '(True)'}), "('eigenvalues_occupancies',\n '\\\\(state\\\\, eigenvalue and occupancy below\\\\)\\\\s*([\\\\d\\\\.Ee\\\\-\\\\s]+?(?:\\\\n *\\\\n))'\n , str_operation=str_to_eigenvalues, repeats=True)\n", (21660, 21829), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((22403, 22429), 'numpy.array', 'np.array', (['val'], {'dtype': 'float'}), '(val, dtype=float)\n', (22411, 22429), True, 'import numpy as np\n'), ((24191, 24319), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""program_version"""', '"""\\\\s*EXCITING\\\\s*([\\\\w\\\\-\\\\(\\\\)\\\\. ]+)\\\\s*started"""'], {'repeats': '(False)', 'dtype': 'str', 'flatten': '(False)'}), "('program_version',\n '\\\\s*EXCITING\\\\s*([\\\\w\\\\-\\\\(\\\\)\\\\. ]+)\\\\s*started', repeats=False,\n dtype=str, flatten=False)\n", (24199, 24319), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((24382, 24561), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""lattice_vectors"""', '"""Lattice vectors\\\\s*[\\\\(cartesian\\\\)]*\\\\s*:\\\\s*([\\\\-0-9\\\\.\\\\s]+)\\\\n"""'], {'str_operation': 'str_to_array', 'unit': 'ureg.bohr', 'repeats': '(False)', 'convert': '(False)'}), "('lattice_vectors',\n 'Lattice vectors\\\\s*[\\\\(cartesian\\\\)]*\\\\s*:\\\\s*([\\\\-0-9\\\\.\\\\s]+)\\\\n',\n str_operation=str_to_array, unit=ureg.bohr, repeats=False, convert=False)\n", (24390, 24561), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((24608, 24818), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""lattice_vectors_reciprocal"""', '"""Reciprocal lattice vectors\\\\s*[\\\\(cartesian\\\\)]*\\\\s*:\\\\s*([\\\\-0-9\\\\.\\\\s]+)\\\\n"""'], {'str_operation': 'str_to_array', 'unit': '(1 / ureg.bohr)', 'repeats': '(False)', 'convert': '(False)'}), "('lattice_vectors_reciprocal',\n 'Reciprocal lattice vectors\\\\s*[\\\\(cartesian\\\\)]*\\\\s*:\\\\s*([\\\\-0-9\\\\.\\\\s]+)\\\\n'\n , str_operation=str_to_array, unit=1 / ureg.bohr, repeats=False,\n convert=False)\n", (24616, 24818), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((29117, 29216), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""potential_mixing"""', '"""Using ([\\\\w ]+) potential mixing"""'], {'repeats': '(False)', 'flatten': '(False)'}), "('potential_mixing', 'Using ([\\\\w ]+) potential mixing', repeats=\n False, flatten=False)\n", (29125, 29216), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((30184, 30304), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""energy_total"""', '"""[Tt]*otal energy\\\\s*:\\\\s*([\\\\-\\\\d\\\\.Ee]+)"""'], {'repeats': '(False)', 'dtype': 'float', 'unit': 'ureg.hartree'}), "('energy_total', '[Tt]*otal energy\\\\s*:\\\\s*([\\\\-\\\\d\\\\.Ee]+)',\n repeats=False, dtype=float, unit=ureg.hartree)\n", (30192, 30304), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((30343, 30509), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""energy_contributions"""', '"""(?:Energies|_)([\\\\+\\\\-\\\\s\\\\w\\\\.\\\\:]+?)\\\\n *(?:DOS|Density)"""'], {'str_operation': 'str_to_energy_dict', 'repeats': '(False)', 'convert': '(False)'}), "('energy_contributions',\n '(?:Energies|_)([\\\\+\\\\-\\\\s\\\\w\\\\.\\\\:]+?)\\\\n *(?:DOS|Density)',\n str_operation=str_to_energy_dict, repeats=False, convert=False)\n", (30351, 30509), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((30542, 30706), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""x_exciting_dos_fermi"""', '"""DOS at Fermi energy \\\\(states\\\\/Ha\\\\/cell\\\\)\\\\s*:\\\\s*([\\\\-\\\\d\\\\.Ee]+)"""'], {'repeats': '(False)', 'dtype': 'float', 'unit': '(1 / ureg.hartree)'}), "('x_exciting_dos_fermi',\n 'DOS at Fermi energy \\\\(states\\\\/Ha\\\\/cell\\\\)\\\\s*:\\\\s*([\\\\-\\\\d\\\\.Ee]+)',\n repeats=False, dtype=float, unit=1 / ureg.hartree)\n", (30550, 30706), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((30753, 30951), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""charge_contributions"""', '"""(?:Charges|Electron charges\\\\s*\\\\:*\\\\s*)([\\\\-\\\\s\\\\w\\\\.\\\\:\\\\(\\\\)]+?)\\\\n *[A-Z\\\\+]"""'], {'str_operation': 'str_to_atom_properties_dict', 'repeats': '(False)', 'convert': '(False)'}), "('charge_contributions',\n '(?:Charges|Electron charges\\\\s*\\\\:*\\\\s*)([\\\\-\\\\s\\\\w\\\\.\\\\:\\\\(\\\\)]+?)\\\\n *[A-Z\\\\+]'\n , str_operation=str_to_atom_properties_dict, repeats=False, convert=False)\n", (30761, 30951), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((30994, 31174), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""moment_contributions"""', '"""(?:Moments\\\\s*\\\\:*\\\\s*)([\\\\-\\\\s\\\\w\\\\.\\\\:\\\\(\\\\)]+?)\\\\n *[A-Z\\\\+]"""'], {'str_operation': 'str_to_atom_properties_dict', 'repeats': '(False)', 'convert': '(False)'}), "('moment_contributions',\n '(?:Moments\\\\s*\\\\:*\\\\s*)([\\\\-\\\\s\\\\w\\\\.\\\\:\\\\(\\\\)]+?)\\\\n *[A-Z\\\\+]',\n str_operation=str_to_atom_properties_dict, repeats=False, convert=False)\n", (31002, 31174), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((33552, 33766), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""forces"""', '"""Total atomic forces including IBS \\\\(\\\\w+\\\\)\\\\s*\\\\:(\\\\s*atom[\\\\-\\\\s\\\\w\\\\.\\\\:]*?)\\\\n *Atomic"""'], {'repeats': '(False)', 'str_operation': 'str_to_array', 'dtype': 'float', 'unit': '(ureg.hartree / ureg.bohr)'}), "('forces',\n 'Total atomic forces including IBS \\\\(\\\\w+\\\\)\\\\s*\\\\:(\\\\s*atom[\\\\-\\\\s\\\\w\\\\.\\\\:]*?)\\\\n *Atomic'\n , repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.\n hartree / ureg.bohr)\n", (33560, 33766), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((34683, 34897), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""forces"""', '"""Total atomic forces including IBS \\\\(\\\\w+\\\\)\\\\s*\\\\:(\\\\s*atom[\\\\-\\\\s\\\\w\\\\.\\\\:]*?)\\\\n *Time"""'], {'repeats': '(False)', 'str_operation': 'str_to_array', 'convert': '(False)', 'unit': '(ureg.hartree / ureg.bohr)'}), "('forces',\n 'Total atomic forces including IBS \\\\(\\\\w+\\\\)\\\\s*\\\\:(\\\\s*atom[\\\\-\\\\s\\\\w\\\\.\\\\:]*?)\\\\n *Time'\n , repeats=False, str_operation=str_to_array, convert=False, unit=ureg.\n hartree / ureg.bohr)\n", (34691, 34897), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((34935, 35008), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""step"""', '"""Optimization step\\\\s*(\\\\d+)"""'], {'repeats': '(False)', 'dtype': 'int'}), "('step', 'Optimization step\\\\s*(\\\\d+)', repeats=False, dtype=int)\n", (34943, 35008), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((35038, 35107), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""method"""', '"""method\\\\s*=\\\\s*(\\\\w+)"""'], {'repeats': '(False)', 'dtype': 'str'}), "('method', 'method\\\\s*=\\\\s*(\\\\w+)', repeats=False, dtype=str)\n", (35046, 35107), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((35136, 35254), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""n_scf_iterations"""', '"""Number of (?:total)* scf iterations\\\\s*\\\\:\\\\s*(\\\\d+)"""'], {'repeats': '(False)', 'dtype': 'int'}), "('n_scf_iterations',\n 'Number of (?:total)* scf iterations\\\\s*\\\\:\\\\s*(\\\\d+)', repeats=False,\n dtype=int)\n", (35144, 35254), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((35290, 35514), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""force_convergence"""', '"""Maximum force magnitude\\\\s*\\\\(target\\\\)\\\\s*\\\\:(\\\\s*[\\\\(\\\\)\\\\d\\\\.\\\\-\\\\+Ee ]+)"""'], {'str_operation': 'str_to_quantity_tolerances', 'unit': '(ureg.hartree / ureg.bohr)', 'repeats': '(False)', 'dtype': 'float'}), "('force_convergence',\n 'Maximum force magnitude\\\\s*\\\\(target\\\\)\\\\s*\\\\:(\\\\s*[\\\\(\\\\)\\\\d\\\\.\\\\-\\\\+Ee ]+)'\n , str_operation=str_to_quantity_tolerances, unit=ureg.hartree / ureg.\n bohr, repeats=False, dtype=float)\n", (35298, 35514), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((35568, 35716), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""energy_total"""', '"""Total energy at this optimization step\\\\s*\\\\:\\\\s*([\\\\-\\\\d\\\\.Ee]+)"""'], {'unit': 'ureg.hartree', 'repeats': '(False)', 'dtype': 'float'}), "('energy_total',\n 'Total energy at this optimization step\\\\s*\\\\:\\\\s*([\\\\-\\\\d\\\\.Ee]+)',\n unit=ureg.hartree, repeats=False, dtype=float)\n", (35576, 35716), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((35750, 35894), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""time"""', '"""Time spent in this optimization step\\\\s*\\\\:\\\\s*([\\\\-\\\\d\\\\.Ee]+)\\\\s*seconds"""'], {'unit': 'ureg.s', 'repeats': '(False)', 'dtype': 'float'}), "('time',\n 'Time spent in this optimization step\\\\s*\\\\:\\\\s*([\\\\-\\\\d\\\\.Ee]+)\\\\s*seconds'\n , unit=ureg.s, repeats=False, dtype=float)\n", (35758, 35894), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((39429, 39462), 'numpy.dot', 'np.dot', (['positions', 'cell.magnitude'], {}), '(positions, cell.magnitude)\n', (39435, 39462), True, 'import numpy as np\n'), ((45623, 45647), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (45637, 45647), False, 'import os\n'), ((46772, 46796), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (46786, 46796), False, 'import os\n'), ((46801, 46829), 'os.access', 'os.access', (['filepath', 'os.F_OK'], {}), '(filepath, os.F_OK)\n', (46810, 46829), False, 'import os\n'), ((51601, 51631), 'numpy.product', 'np.product', (['band_parameters[1]'], {}), '(band_parameters[1])\n', (51611, 51631), True, 'import numpy as np\n'), ((66982, 67029), 'numpy.reshape', 'np.reshape', (['data[3]', '(n_components, n_excitons)'], {}), '(data[3], (n_components, n_excitons))\n', (66992, 67029), True, 'import numpy as np\n'), ((67108, 67155), 'numpy.reshape', 'np.reshape', (['data[4]', '(n_components, n_excitons)'], {}), '(data[4], (n_components, n_excitons))\n', (67118, 67155), True, 'import numpy as np\n'), ((67234, 67281), 'numpy.reshape', 'np.reshape', (['data[5]', '(n_components, n_excitons)'], {}), '(data[5], (n_components, n_excitons))\n', (67244, 67281), True, 'import numpy as np\n'), ((67743, 67789), 'numpy.reshape', 'np.reshape', (['data[1]', '(n_components, n_epsilon)'], {}), '(data[1], (n_components, n_epsilon))\n', (67753, 67789), True, 'import numpy as np\n'), ((67858, 67904), 'numpy.reshape', 'np.reshape', (['data[2]', '(n_components, n_epsilon)'], {}), '(data[2], (n_components, n_epsilon))\n', (67868, 67904), True, 'import numpy as np\n'), ((68282, 68326), 'numpy.reshape', 'np.reshape', (['data[1]', '(n_components, n_sigma)'], {}), '(data[1], (n_components, n_sigma))\n', (68292, 68326), True, 'import numpy as np\n'), ((68393, 68437), 'numpy.reshape', 'np.reshape', (['data[2]', '(n_components, n_sigma)'], {}), '(data[2], (n_components, n_sigma))\n', (68403, 68437), True, 'import numpy as np\n'), ((68807, 68850), 'numpy.reshape', 'np.reshape', (['data[1]', '(n_components, n_loss)'], {}), '(data[1], (n_components, n_loss))\n', (68817, 68850), True, 'import numpy as np\n'), ((78099, 78125), 'nomad.datamodel.metainfo.simulation.calculation.Energy', 'Energy', ([], {'fermi': 'fermi_energy'}), '(fermi=fermi_energy)\n', (78105, 78125), False, 'from nomad.datamodel.metainfo.simulation.calculation import Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges, Forces, ForcesEntry, ScfIteration, BandGap\n'), ((80248, 80274), 'nomad.datamodel.metainfo.simulation.method.BasisSet', 'BasisSet', ([], {'type': '"""(L)APW+lo"""'}), "(type='(L)APW+lo')\n", (80256, 80274), False, 'from nomad.datamodel.metainfo.simulation.method import Method, DFT, Electronic, Smearing, XCFunctional, Functional, GW as GWMethod, Scf, BasisSet\n'), ((88548, 88573), 'nomad.datamodel.metainfo.simulation.calculation.ForcesEntry', 'ForcesEntry', ([], {'value': 'forces'}), '(value=forces)\n', (88559, 88573), False, 'from nomad.datamodel.metainfo.simulation.calculation import Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges, Forces, ForcesEntry, ScfIteration, BandGap\n'), ((8447, 8470), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (8459, 8470), True, 'import numpy as np\n'), ((9409, 9432), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (9421, 9432), True, 'import numpy as np\n'), ((18538, 18644), 'numpy.zeros', 'np.zeros', (['(self.number_of_lm, self.number_of_spin_channels, self.number_of_atoms,\n self.number_of_dos)'], {}), '((self.number_of_lm, self.number_of_spin_channels, self.\n number_of_atoms, self.number_of_dos))\n', (18546, 18644), True, 'import numpy as np\n'), ((27328, 27423), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['name', "('%s\\\\s*:\\\\s*([\\\\s\\\\S]*?)\\\\n' % key_unit[0])"], {'unit': 'key_unit[1]', 'repeats': '(False)'}), "(name, '%s\\\\s*:\\\\s*([\\\\s\\\\S]*?)\\\\n' % key_unit[0], unit=key_unit[1],\n repeats=False)\n", (27336, 27423), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((27579, 27674), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['name', "('%s\\\\s*:\\\\s*([\\\\s\\\\S]*?)\\\\n' % key_unit[0])"], {'unit': 'key_unit[1]', 'repeats': '(False)'}), "(name, '%s\\\\s*:\\\\s*([\\\\s\\\\S]*?)\\\\n' % key_unit[0], unit=key_unit[1],\n repeats=False)\n", (27587, 27674), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((31491, 31591), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['name', "('%s\\\\s*\\\\:*\\\\s*([\\\\-\\\\d\\\\.Ee]+)' % key_unit[0])"], {'repeats': '(False)', 'unit': 'key_unit[1]'}), "(name, '%s\\\\s*\\\\:*\\\\s*([\\\\-\\\\d\\\\.Ee]+)' % key_unit[0], repeats=\n False, unit=key_unit[1])\n", (31499, 31591), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((32298, 32449), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['name', "('%s\\\\s*\\\\:*\\\\s*([\\\\(\\\\)\\\\d\\\\.\\\\-\\\\+Ee ]+)' % key_unit[0])"], {'str_operation': 'str_to_quantity_tolerances', 'unit': 'key_unit[1]', 'repeats': '(False)'}), "(name, '%s\\\\s*\\\\:*\\\\s*([\\\\(\\\\)\\\\d\\\\.\\\\-\\\\+Ee ]+)' % key_unit[0],\n str_operation=str_to_quantity_tolerances, unit=key_unit[1], repeats=False)\n", (32306, 32449), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((46041, 46082), 'os.path.join', 'os.path.join', (['self.info_parser.maindir', 'f'], {}), '(self.info_parser.maindir, f)\n', (46053, 46082), False, 'import os\n'), ((46196, 46217), 'os.access', 'os.access', (['f', 'os.F_OK'], {}), '(f, os.F_OK)\n', (46205, 46217), False, 'import os\n'), ((52371, 52405), 'numpy.array', 'np.array', (['[d[0][:3] for d in data]'], {}), '([d[0][:3] for d in data])\n', (52379, 52405), True, 'import numpy as np\n'), ((66416, 66431), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (66425, 66431), True, 'import numpy as np\n'), ((66689, 66736), 'numpy.reshape', 'np.reshape', (['data[1]', '(n_components, n_excitons)'], {}), '(data[1], (n_components, n_excitons))\n', (66699, 66736), True, 'import numpy as np\n'), ((66834, 66881), 'numpy.reshape', 'np.reshape', (['data[2]', '(n_components, n_excitons)'], {}), '(data[2], (n_components, n_excitons))\n', (66844, 66881), True, 'import numpy as np\n'), ((67411, 67426), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (67420, 67426), True, 'import numpy as np\n'), ((67613, 67659), 'numpy.reshape', 'np.reshape', (['data[0]', '(n_components, n_epsilon)'], {}), '(data[0], (n_components, n_epsilon))\n', (67623, 67659), True, 'import numpy as np\n'), ((68032, 68047), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (68041, 68047), True, 'import numpy as np\n'), ((68156, 68200), 'numpy.reshape', 'np.reshape', (['data[0]', '(n_components, n_sigma)'], {}), '(data[0], (n_components, n_sigma))\n', (68166, 68200), True, 'import numpy as np\n'), ((68564, 68579), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (68573, 68579), True, 'import numpy as np\n'), ((68686, 68729), 'numpy.reshape', 'np.reshape', (['data[0]', '(n_components, n_loss)'], {}), '(data[0], (n_components, n_loss))\n', (68696, 68729), True, 'import numpy as np\n'), ((71830, 71866), 'numpy.transpose', 'np.transpose', (['data_q'], {'axes': '(2, 0, 1)'}), '(data_q, axes=(2, 0, 1))\n', (71842, 71866), True, 'import numpy as np\n'), ((81826, 81864), 'nomad.datamodel.metainfo.simulation.method.Scf', 'Scf', ([], {'threshold_energy_change': 'threshold'}), '(threshold_energy_change=threshold)\n', (81829, 81864), False, 'from nomad.datamodel.metainfo.simulation.method import Method, DFT, Electronic, Smearing, XCFunctional, Functional, GW as GWMethod, Scf, BasisSet\n'), ((84341, 84372), 'nomad.datamodel.metainfo.simulation.calculation.EnergyEntry', 'EnergyEntry', ([], {'value': 'energy_total'}), '(value=energy_total)\n', (84352, 84372), False, 'from nomad.datamodel.metainfo.simulation.calculation import Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges, Forces, ForcesEntry, ScfIteration, BandGap\n'), ((89850, 89888), 'numpy.array', 'np.array', (['lattice_vectors'], {'dtype': 'float'}), '(lattice_vectors, dtype=float)\n', (89858, 89888), True, 'import numpy as np\n'), ((92984, 93029), 'numpy.transpose', 'np.transpose', (['self.data_clathrate_parser.data'], {}), '(self.data_clathrate_parser.data)\n', (92996, 93029), True, 'import numpy as np\n'), ((2033, 2060), 'numpy.array', 'np.array', (['val[0]'], {'dtype': 'int'}), '(val[0], dtype=int)\n', (2041, 2060), True, 'import numpy as np\n'), ((7658, 7689), 'numpy.where', 'np.where', (['(data[0] == data[0][0])'], {}), '(data[0] == data[0][0])\n', (7666, 7689), True, 'import numpy as np\n'), ((8499, 8524), 'numpy.where', 'np.where', (['(dist == dist[0])'], {}), '(dist == dist[0])\n', (8507, 8524), True, 'import numpy as np\n'), ((9474, 9499), 'numpy.where', 'np.where', (['(data == data[0])'], {}), '(data == data[0])\n', (9482, 9499), True, 'import numpy as np\n'), ((30084, 30132), 'nomad.parsing.file_parser.TextParser', 'TextParser', ([], {'quantities': 'initialization_quantities'}), '(quantities=initialization_quantities)\n', (30094, 30132), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((32650, 32687), 'nomad.parsing.file_parser.TextParser', 'TextParser', ([], {'quantities': 'scf_quantities'}), '(quantities=scf_quantities)\n', (32660, 32687), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((32926, 32963), 'nomad.parsing.file_parser.TextParser', 'TextParser', ([], {'quantities': 'scf_quantities'}), '(quantities=scf_quantities)\n', (32936, 32963), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((33991, 34031), 'nomad.parsing.file_parser.TextParser', 'TextParser', ([], {'quantities': 'module_quantities'}), '(quantities=module_quantities)\n', (34001, 34031), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((37777, 37817), 'nomad.parsing.file_parser.TextParser', 'TextParser', ([], {'quantities': 'module_quantities'}), '(quantities=module_quantities)\n', (37787, 37817), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((45821, 45857), 'os.listdir', 'os.listdir', (['self.info_parser.maindir'], {}), '(self.info_parser.maindir)\n', (45831, 45857), False, 'import os\n'), ((82660, 82681), 'nomad.datamodel.metainfo.simulation.method.Functional', 'Functional', ([], {'name': 'name'}), '(name=name)\n', (82670, 82681), False, 'from nomad.datamodel.metainfo.simulation.method import Method, DFT, Electronic, Smearing, XCFunctional, Functional, GW as GWMethod, Scf, BasisSet\n'), ((89606, 89615), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (89612, 89615), True, 'import numpy as np\n'), ((90010, 90044), 'numpy.dot', 'np.dot', (['positions', 'lattice_vectors'], {}), '(positions, lattice_vectors)\n', (90016, 90044), True, 'import numpy as np\n'), ((93132, 93163), 'numpy.array', 'np.array', (['data[:3]'], {'dtype': 'float'}), '(data[:3], dtype=float)\n', (93140, 93163), True, 'import numpy as np\n'), ((5878, 5901), 'numpy.transpose', 'np.transpose', (['data[2:5]'], {}), '(data[2:5])\n', (5890, 5901), True, 'import numpy as np\n'), ((6308, 6331), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (6320, 6331), True, 'import numpy as np\n'), ((8732, 8755), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (8744, 8755), True, 'import numpy as np\n'), ((13936, 13999), 'numpy.linspace', 'np.linspace', (['start', 'end', 'self.number_of_k_points_per_segment[i]'], {}), '(start, end, self.number_of_k_points_per_segment[i])\n', (13947, 13999), True, 'import numpy as np\n'), ((82768, 82789), 'nomad.datamodel.metainfo.simulation.method.Functional', 'Functional', ([], {'name': 'name'}), '(name=name)\n', (82778, 82789), False, 'from nomad.datamodel.metainfo.simulation.method import Method, DFT, Electronic, Smearing, XCFunctional, Functional, GW as GWMethod, Scf, BasisSet\n'), ((85175, 85197), 'nomad.datamodel.metainfo.simulation.calculation.EnergyEntry', 'EnergyEntry', ([], {'value': 'val'}), '(value=val)\n', (85186, 85197), False, 'from nomad.datamodel.metainfo.simulation.calculation import Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges, Forces, ForcesEntry, ScfIteration, BandGap\n'), ((5240, 5258), 'numpy.transpose', 'np.transpose', (['band'], {}), '(band)\n', (5252, 5258), True, 'import numpy as np\n'), ((8078, 8096), 'numpy.transpose', 'np.transpose', (['band'], {}), '(band)\n', (8090, 8096), True, 'import numpy as np\n'), ((82871, 82892), 'nomad.datamodel.metainfo.simulation.method.Functional', 'Functional', ([], {'name': 'name'}), '(name=name)\n', (82881, 82892), False, 'from nomad.datamodel.metainfo.simulation.method import Method, DFT, Electronic, Smearing, XCFunctional, Functional, GW as GWMethod, Scf, BasisSet\n'), ((82967, 82988), 'nomad.datamodel.metainfo.simulation.method.Functional', 'Functional', ([], {'name': 'name'}), '(name=name)\n', (82977, 82988), False, 'from nomad.datamodel.metainfo.simulation.method import Method, DFT, Electronic, Smearing, XCFunctional, Functional, GW as GWMethod, Scf, BasisSet\n'), ((13237, 13257), 'numpy.transpose', 'np.transpose', (['energy'], {}), '(energy)\n', (13249, 13257), True, 'import numpy as np\n'), ((23024, 23051), 're.search', 're.search', (['re_symbol', 'v[-1]'], {}), '(re_symbol, v[-1])\n', (23033, 23051), False, 'import re\n'), ((27946, 28001), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""number"""', '"""Species : *(\\\\d+)"""'], {'dtype': 'np.int32'}), "('number', 'Species : *(\\\\d+)', dtype=np.int32)\n", (27954, 28001), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28019, 28053), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""symbol"""', '"""\\\\((\\\\w+)\\\\)"""'], {}), "('symbol', '\\\\((\\\\w+)\\\\)')\n", (28027, 28053), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28069, 28120), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""file"""', '"""parameters loaded from *: *(.+)"""'], {}), "('file', 'parameters loaded from *: *(.+)')\n", (28077, 28120), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28139, 28172), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""name"""', '"""name *: *(.+)"""'], {}), "('name', 'name *: *(.+)')\n", (28147, 28172), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28191, 28305), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""nuclear_charge"""', 'f"""nuclear charge *: *({re_float})"""'], {'dtype': 'np.float64', 'unit': 'ureg.elementary_charge'}), "('nuclear_charge', f'nuclear charge *: *({re_float})', dtype=np.\n float64, unit=ureg.elementary_charge)\n", (28199, 28305), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28319, 28439), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""electronic_charge"""', 'f"""electronic charge *: *({re_float})"""'], {'dtype': 'np.float64', 'unit': 'ureg.elementary_charge'}), "('electronic_charge', f'electronic charge *: *({re_float})', dtype=\n np.float64, unit=ureg.elementary_charge)\n", (28327, 28439), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28453, 28556), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""atomic_mass"""', 'f"""atomic mass *: *({re_float})"""'], {'dtype': 'np.float64', 'unit': 'ureg.electron_mass'}), "('atomic_mass', f'atomic mass *: *({re_float})', dtype=np.float64,\n unit=ureg.electron_mass)\n", (28461, 28556), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28571, 28678), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""muffin_tin_radius"""', 'f"""muffin-tin radius *: *({re_float})"""'], {'dtype': 'np.float64', 'unit': 'ureg.bohr'}), "('muffin_tin_radius', f'muffin-tin radius *: *({re_float})', dtype=\n np.float64, unit=ureg.bohr)\n", (28579, 28678), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28692, 28786), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""radial_points"""', 'f"""radial points in muffin-tin *: *({re_float})"""'], {'dtype': 'np.int32'}), "('radial_points', f'radial points in muffin-tin *: *({re_float})',\n dtype=np.int32)\n", (28700, 28786), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((28801, 28876), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""positions_format"""', '"""atomic positions \\\\((.+?)\\\\)"""'], {'flatten': '(False)'}), "('positions_format', 'atomic positions \\\\((.+?)\\\\)', flatten=False)\n", (28809, 28876), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((29425, 29481), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""type"""', '"""Exchange-correlation type +: +(\\\\S+)"""'], {}), "('type', 'Exchange-correlation type +: +(\\\\S+)')\n", (29433, 29481), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((33166, 33232), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""positions_format"""', '"""Atomic positions\\\\s*\\\\(([a-z]+)\\\\)"""'], {}), "('positions_format', 'Atomic positions\\\\s*\\\\(([a-z]+)\\\\)')\n", (33174, 33232), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((33277, 33347), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""symbols"""', '"""atom\\\\s*\\\\d+\\\\s*(\\\\w+)"""'], {'repeats': '(True)', 'dtype': 'str'}), "('symbols', 'atom\\\\s*\\\\d+\\\\s*(\\\\w+)', repeats=True, dtype=str)\n", (33285, 33347), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((33391, 33506), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""positions"""', '"""\\\\s*:\\\\s*([\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+)"""'], {'repeats': '(True)', 'dtype': 'float'}), "('positions',\n '\\\\s*:\\\\s*([\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+)', repeats=True,\n dtype=float)\n", (33399, 33506), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((34284, 34363), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""positions_format"""', '"""Atomic positions at this step\\\\s*\\\\(([a-z]+)\\\\)"""'], {}), "('positions_format', 'Atomic positions at this step\\\\s*\\\\(([a-z]+)\\\\)')\n", (34292, 34363), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((34408, 34478), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""symbols"""', '"""atom\\\\s*\\\\d+\\\\s*(\\\\w+)"""'], {'repeats': '(True)', 'dtype': 'str'}), "('symbols', 'atom\\\\s*\\\\d+\\\\s*(\\\\w+)', repeats=True, dtype=str)\n", (34416, 34478), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((34522, 34637), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""positions"""', '"""\\\\s*:\\\\s*([\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+)"""'], {'repeats': '(True)', 'dtype': 'float'}), "('positions',\n '\\\\s*:\\\\s*([\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+)', repeats=True,\n dtype=float)\n", (34530, 34637), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((37333, 37547), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""forces"""', '"""Total atomic forces including IBS \\\\(\\\\w+\\\\)\\\\s*\\\\:(\\\\s*atom[\\\\-\\\\s\\\\w\\\\.\\\\:]*?)\\\\n *Atomic"""'], {'repeats': '(False)', 'str_operation': 'str_to_array', 'dtype': 'float', 'unit': '(ureg.hartree / ureg.bohr)'}), "('forces',\n 'Total atomic forces including IBS \\\\(\\\\w+\\\\)\\\\s*\\\\:(\\\\s*atom[\\\\-\\\\s\\\\w\\\\.\\\\:]*?)\\\\n *Atomic'\n , repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.\n hartree / ureg.bohr)\n", (37341, 37547), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((50587, 50602), 'numpy.size', 'np.size', (['v[key]'], {}), '(v[key])\n', (50594, 50602), True, 'import numpy as np\n'), ((29049, 29069), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (29057, 29069), True, 'import numpy as np\n'), ((36385, 36431), 'nomad.parsing.file_parser.TextParser', 'TextParser', ([], {'quantities': 'optimization_quantities'}), '(quantities=optimization_quantities)\n', (36395, 36431), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((36625, 36662), 'nomad.parsing.file_parser.TextParser', 'TextParser', ([], {'quantities': 'scf_quantities'}), '(quantities=scf_quantities)\n', (36635, 36662), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((36912, 36985), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""positions_format"""', '"""imized atomic positions\\\\s*\\\\(([a-z]+)\\\\)"""'], {}), "('positions_format', 'imized atomic positions\\\\s*\\\\(([a-z]+)\\\\)')\n", (36920, 36985), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((37038, 37108), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""symbols"""', '"""atom\\\\s*\\\\d+\\\\s*(\\\\w+)"""'], {'repeats': '(True)', 'dtype': 'str'}), "('symbols', 'atom\\\\s*\\\\d+\\\\s*(\\\\w+)', repeats=True, dtype=str)\n", (37046, 37108), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n'), ((37160, 37275), 'nomad.parsing.file_parser.Quantity', 'Quantity', (['"""positions"""', '"""\\\\s*:\\\\s*([\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+)"""'], {'repeats': '(True)', 'dtype': 'float'}), "('positions',\n '\\\\s*:\\\\s*([\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+\\\\s*[\\\\d\\\\.\\\\-]+)', repeats=True,\n dtype=float)\n", (37168, 37275), False, 'from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser\n')]
|
"""
Plot up surface or bottom (or any fixed level) errors from a profile object
with no z_dim (vertical dimension). Provide an array of netcdf files and
mess with the options to get a figure you like.
You can define how many rows and columns the plot will have. This script will
plot the provided list of netcdf datasets from left to right and top to bottom.
A colorbar will be placed right of the figure.
"""
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append("/Users/dbyrne/code/COAsT")
import coast
import pandas as pd
#%% File settings
run_name = "test"
# List of analysis output files. Profiles from each will be plotted
# on each axis of the plot
fn_list = [
"~/transfer/test_grid.nc",
"~/transfer/test_grid.nc",
]
# Filename for the output
fn_out = "/Users/dbyrne/transfer/surface_gridded_errors_{0}.png".format(run_name)
#%% General Plot Settings
var_name = "abs_diff_temperature" # Variable name in analysis file to plot
# If you used var modified to make gridded data
# then this is where to select season etc.
save_plot = False
# Masking out grid cells that don't contain many points
min_points_in_average = 5
name_of_count_variable = "grid_N"
# Subplot axes settings
n_r = 2 # Number of subplot rows
n_c = 2 # Number of subplot columns
figsize = (10, 5) # Figure size
lonbounds = [-15, 9.5] # Longitude bounds
latbounds = [45, 64] # Latitude bounds
subplot_padding = 0.5 # Amount of vertical and horizontal padding between plots
fig_pad = (0.075, 0.075, 0.1, 0.1) # Figure padding (left, top, right, bottom)
# Leave some space on right for colorbar
# Scatter opts
marker_size = 3 # Marker size
cmap = "bwr" # Colormap for normal points
clim = (-1, 1) # Color limits for normal points
discrete_cmap = True # Discretize colormap
cmap_levels = 14
# Labels and Titles
fig_title = "SST Errors" # Whole figure title
title_fontsize = 13 # Fontsize of title
title_fontweight = "bold" # Fontweight to use for title
dataset_names = ["CO9p0", "CO9p0", "CO9p0"] # Names to use for labelling plots
subtitle_fontsize = 11 # Fontsize for dataset subtitles
subtitle_fontweight = "normal" # Fontweight for dataset subtitles
# PLOT SEASONS. Make sure n_r = 2 and n_c = 2
# If this option is true, only the first dataset will be plotted, with seasonal
# variables on each subplot. The season_suffixes will be added to var_name
# for each subplot panel.
plot_seasons = True
season_suffixes = ["DJF", "MAM", "JJA", "SON"]
#%% Read and plotdata
# Read all datasets into list
ds_list = [xr.open_dataset(dd) for dd in fn_list]
n_ds = len(ds_list)
n_ax = n_r * n_c
# Create plot and flatten axis array
f, a = coast.plot_util.create_geo_subplots(lonbounds, latbounds, n_r, n_c, figsize=figsize)
a_flat = a.flatten()
# Dicretize colormap maybe
if discrete_cmap:
cmap = plt.cm.get_cmap(cmap, cmap_levels)
# Determine if we will extend the colormap or not
extend_cbar = []
# Loop over dataset
for ii in range(n_ax):
ur_index = np.unravel_index(ii, (n_r, n_c))
# Select season if required
if plot_seasons:
ds = ds_list[0]
var_ii = var_name + "_{0}".format(season_suffixes[ii])
N_var = "{0}_{1}".format(name_of_count_variable, season_suffixes[ii])
a_flat[ii].text(0.05, 1.02, season_suffixes[ii], transform=a_flat[ii].transAxes, fontweight="bold")
else:
ds = ds_list[ii]
var_ii = var_name
a_flat[ii].set_title(dataset_names[ii], fontsize=subtitle_fontsize, fontweight=subtitle_fontweight)
N_var = name_of_count_variable
data = ds[var_ii].values
count_var = ds[N_var]
data[count_var < min_points_in_average] = np.nan
# Scatter and set title
pc = a_flat[ii].pcolormesh(
ds.longitude,
ds.latitude,
data,
cmap=cmap,
vmin=clim[0],
vmax=clim[1],
)
# Will we extend the colorbar for this dataset?
extend_cbar.append(coast.plot_util.determine_colorbar_extension(data, clim[0], clim[1]))
# Set Figure title
f.suptitle(fig_title, fontsize=title_fontsize, fontweight=title_fontweight)
# Set tight figure layout
f.tight_layout(w_pad=subplot_padding, h_pad=subplot_padding)
f.subplots_adjust(left=(fig_pad[0]), bottom=(fig_pad[1]), right=(1 - fig_pad[2]), top=(1 - fig_pad[3]))
# Handle colorbar -- will we extend it?
if "both" in extend_cbar:
extend = "both"
elif "max" in extend_cbar and "min" in extend_cbar:
extend = "both"
elif "max" in extend_cbar:
extend = "max"
elif "min" in extend_cbar:
extend = "min"
else:
extend = "neither"
cbar_ax = f.add_axes([(1 - fig_pad[2] + fig_pad[2] * 0.15), 0.15, 0.025, 0.7])
f.colorbar(pc, cax=cbar_ax, extend=extend)
# Save plot maybe
if save_plot:
f.savefig(fn_out)
|
[
"coast.plot_util.determine_colorbar_extension",
"coast.plot_util.create_geo_subplots",
"numpy.unravel_index",
"matplotlib.pyplot.cm.get_cmap",
"xarray.open_dataset",
"sys.path.append"
] |
[((497, 540), 'sys.path.append', 'sys.path.append', (['"""/Users/dbyrne/code/COAsT"""'], {}), "('/Users/dbyrne/code/COAsT')\n", (512, 540), False, 'import sys\n'), ((2685, 2774), 'coast.plot_util.create_geo_subplots', 'coast.plot_util.create_geo_subplots', (['lonbounds', 'latbounds', 'n_r', 'n_c'], {'figsize': 'figsize'}), '(lonbounds, latbounds, n_r, n_c, figsize\n =figsize)\n', (2720, 2774), False, 'import coast\n'), ((2564, 2583), 'xarray.open_dataset', 'xr.open_dataset', (['dd'], {}), '(dd)\n', (2579, 2583), True, 'import xarray as xr\n'), ((2848, 2882), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap', 'cmap_levels'], {}), '(cmap, cmap_levels)\n', (2863, 2882), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3042), 'numpy.unravel_index', 'np.unravel_index', (['ii', '(n_r, n_c)'], {}), '(ii, (n_r, n_c))\n', (3026, 3042), True, 'import numpy as np\n'), ((3950, 4018), 'coast.plot_util.determine_colorbar_extension', 'coast.plot_util.determine_colorbar_extension', (['data', 'clim[0]', 'clim[1]'], {}), '(data, clim[0], clim[1])\n', (3994, 4018), False, 'import coast\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import tqdm
import torch
import pickle
import resource
import numpy as np
import matplotlib.pyplot as plt
from args import parse_args
from modelSummary import model_dict
from pytorchtools import load_from_file
from torch.utils.data import DataLoader
from helperfunctions import mypause, stackall_Dict
from loss import get_seg2ptLoss
from utils import get_nparams, get_predictions
from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048*10, rlimit[1]))
#%%
if __name__ == '__main__':
args = parse_args()
device=torch.device("cuda")
torch.cuda.manual_seed(12)
if torch.cuda.device_count() > 1:
print('Moving to a multiGPU setup.')
args.useMultiGPU = True
else:
args.useMultiGPU = False
torch.backends.cudnn.deterministic=False
if args.model not in model_dict:
print("Model not found.")
print("valid models are: {}".format(list(model_dict.keys())))
exit(1)
LOGDIR = os.path.join(os.getcwd(), 'logs', args.model, args.expname)
path2model = os.path.join(LOGDIR, 'weights')
path2checkpoint = os.path.join(LOGDIR, 'checkpoints')
path2writer = os.path.join(LOGDIR, 'TB.lock')
path2op = os.path.join(os.getcwd(), 'op', str(args.curObj))
os.makedirs(LOGDIR, exist_ok=True)
os.makedirs(path2model, exist_ok=True)
os.makedirs(path2checkpoint, exist_ok=True)
os.makedirs(path2writer, exist_ok=True)
os.makedirs(path2op, exist_ok=True)
model = model_dict[args.model]
netDict = load_from_file([args.loadfile,
os.path.join(path2checkpoint, 'checkpoint.pt')])
startEp = netDict['epoch'] if 'epoch' in netDict.keys() else 0
if 'state_dict' in netDict.keys():
model.load_state_dict(netDict['state_dict'])
print('Parameters: {}'.format(get_nparams(model)))
model = model if not args.useMultiGPU else torch.nn.DataParallel(model)
model = model.to(device).to(args.prec)
f = open(os.path.join('curObjects',
'baseline',
'cond_'+str(args.curObj)+'.pkl'), 'rb')
_, _, testObj = pickle.load(f)
testObj.path2data = os.path.join(args.path2data, 'Datasets', 'All')
testObj.augFlag = False
testloader = DataLoader(testObj,
batch_size=args.batchsize,
shuffle=False,
num_workers=args.workers,
drop_last=False)
if args.disp:
fig, axs = plt.subplots(nrows=1, ncols=1)
#%%
accLoss = 0.0
imCounter = 0
ious = []
dists_pupil_latent = []
dists_pupil_seg = []
dists_iris_latent = []
dists_iris_seg = []
model.eval()
opDict = {'id':[], 'archNum': [], 'archName': [], 'code': [],
'scores':{'iou':[], 'lat_dst':[], 'seg_dst':[]},
'pred':{'pup_latent_c':[],
'pup_seg_c':[],
'iri_latent_c':[],
'iri_seg_c':[],
'mask':[]},
'gt':{'pup_c':[], 'mask':[]}}
with torch.no_grad():
for bt, batchdata in enumerate(tqdm.tqdm(testloader)):
img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata
out_tup = model(img.to(device).to(args.prec),
labels.to(device).long(),
pupil_center.to(device).to(args.prec),
elNorm.to(device).to(args.prec),
spatialWeights.to(device).to(args.prec),
distMap.to(device).to(args.prec),
cond.to(device).to(args.prec),
imInfo[:, 2].to(device).to(torch.long),
0.5)
output, elOut, latent, loss = out_tup
latent_pupil_center = elOut[:, 0:2].detach().cpu().numpy()
latent_iris_center = elOut[:, 5:7].detach().cpu().numpy()
_, seg_pupil_center = get_seg2ptLoss(output[:, 2, ...].cpu(), pupil_center, temperature=4)
_, seg_iris_center = get_seg2ptLoss(-output[:, 0, ...].cpu(), iris_center, temperature=4)
loss = loss if args.useMultiGPU else loss.mean()
accLoss += loss.detach().cpu().item()
predict = get_predictions(output)
iou, iou_bySample = getSeg_metrics(labels.numpy(),
predict.numpy(),
cond[:, 1].numpy())[1:]
latent_pupil_dist, latent_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
latent_pupil_center,
cond[:,0].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_pupil_dist, seg_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
seg_pupil_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
latent_iris_dist, latent_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
latent_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_iris_dist, seg_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
seg_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
dists_pupil_latent.append(latent_pupil_dist)
dists_iris_latent.append(latent_iris_dist)
dists_pupil_seg.append(seg_pupil_dist)
dists_iris_seg.append(seg_iris_dist)
ious.append(iou)
pup_latent_c = unnormPts(latent_pupil_center,
img.shape[2:])
pup_seg_c = unnormPts(seg_pupil_center,
img.shape[2:])
iri_latent_c = unnormPts(latent_iris_center,
img.shape[2:])
iri_seg_c = unnormPts(seg_iris_center,
img.shape[2:])
dispI = generateImageGrid(img.numpy().squeeze(),
predict.numpy(),
elOut.detach().cpu().numpy().reshape(-1, 2, 5),
pup_seg_c,
cond.numpy(),
override=True,
heatmaps=False)
for i in range(0, img.shape[0]):
archNum = testObj.imList[imCounter, 1]
opDict['id'].append(testObj.imList[imCounter, 0])
opDict['code'].append(latent[i,...].detach().cpu().numpy())
opDict['archNum'].append(archNum)
opDict['archName'].append(testObj.arch[archNum])
opDict['pred']['pup_latent_c'].append(pup_latent_c[i, :])
opDict['pred']['pup_seg_c'].append(pup_seg_c[i, :])
opDict['pred']['iri_latent_c'].append(iri_latent_c[i, :])
opDict['pred']['iri_seg_c'].append(iri_seg_c[i, :])
if args.test_save_op_masks:
opDict['pred']['mask'].append(predict[i,...].numpy().astype(np.uint8))
opDict['scores']['iou'].append(iou_bySample[i, ...])
opDict['scores']['lat_dst'].append(latent_pupil_dist_bySample[i, ...])
opDict['scores']['seg_dst'].append(seg_pupil_dist_bySample[i, ...])
opDict['gt']['pup_c'].append(pupil_center[i,...].numpy())
if args.test_save_op_masks:
opDict['gt']['mask'].append(labels[i,...].numpy().astype(np.uint8))
imCounter+=1
if args.disp:
if bt == 0:
h_im = plt.imshow(dispI.permute(1, 2, 0))
plt.pause(0.01)
else:
h_im.set_data(dispI.permute(1, 2, 0))
mypause(0.01)
opDict = stackall_Dict(opDict)
ious = np.stack(ious, axis=0)
ious = np.nanmean(ious, axis=0)
print('mIoU: {}. IoUs: {}'.format(np.mean(ious), ious))
print('Latent space PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_latent),
np.nanstd(dists_pupil_latent)))
print('Segmentation PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_seg),
np.nanstd(dists_pupil_seg)))
print('Latent space IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_latent),
np.nanstd(dists_iris_latent)))
print('Segmentation IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_seg),
np.nanstd(dists_iris_seg)))
print('--- Saving output directory ---')
f = open(os.path.join(path2op, 'opDict.pkl'), 'wb')
pickle.dump(opDict, f)
f.close()
|
[
"modelSummary.model_dict.keys",
"torch.cuda.device_count",
"utils.get_predictions",
"numpy.nanmean",
"utils.unnormPts",
"numpy.mean",
"helperfunctions.mypause",
"numpy.stack",
"resource.setrlimit",
"helperfunctions.stackall_Dict",
"numpy.nanstd",
"pickle.load",
"utils.get_nparams",
"matplotlib.pyplot.pause",
"args.parse_args",
"torch.device",
"pickle.dump",
"os.makedirs",
"numpy.nanmedian",
"resource.getrlimit",
"tqdm.tqdm",
"os.path.join",
"torch.nn.DataParallel",
"os.getcwd",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed",
"matplotlib.pyplot.subplots"
] |
[((615, 657), 'resource.getrlimit', 'resource.getrlimit', (['resource.RLIMIT_NOFILE'], {}), '(resource.RLIMIT_NOFILE)\n', (633, 657), False, 'import resource\n'), ((658, 724), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(2048 * 10, rlimit[1])'], {}), '(resource.RLIMIT_NOFILE, (2048 * 10, rlimit[1]))\n', (676, 724), False, 'import resource\n'), ((767, 779), 'args.parse_args', 'parse_args', ([], {}), '()\n', (777, 779), False, 'from args import parse_args\n'), ((792, 812), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (804, 812), False, 'import torch\n'), ((817, 843), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(12)'], {}), '(12)\n', (839, 843), False, 'import torch\n'), ((1296, 1327), 'os.path.join', 'os.path.join', (['LOGDIR', '"""weights"""'], {}), "(LOGDIR, 'weights')\n", (1308, 1327), False, 'import os\n'), ((1350, 1385), 'os.path.join', 'os.path.join', (['LOGDIR', '"""checkpoints"""'], {}), "(LOGDIR, 'checkpoints')\n", (1362, 1385), False, 'import os\n'), ((1404, 1435), 'os.path.join', 'os.path.join', (['LOGDIR', '"""TB.lock"""'], {}), "(LOGDIR, 'TB.lock')\n", (1416, 1435), False, 'import os\n'), ((1505, 1539), 'os.makedirs', 'os.makedirs', (['LOGDIR'], {'exist_ok': '(True)'}), '(LOGDIR, exist_ok=True)\n', (1516, 1539), False, 'import os\n'), ((1544, 1582), 'os.makedirs', 'os.makedirs', (['path2model'], {'exist_ok': '(True)'}), '(path2model, exist_ok=True)\n', (1555, 1582), False, 'import os\n'), ((1587, 1630), 'os.makedirs', 'os.makedirs', (['path2checkpoint'], {'exist_ok': '(True)'}), '(path2checkpoint, exist_ok=True)\n', (1598, 1630), False, 'import os\n'), ((1635, 1674), 'os.makedirs', 'os.makedirs', (['path2writer'], {'exist_ok': '(True)'}), '(path2writer, exist_ok=True)\n', (1646, 1674), False, 'import os\n'), ((1679, 1714), 'os.makedirs', 'os.makedirs', (['path2op'], {'exist_ok': '(True)'}), '(path2op, exist_ok=True)\n', (1690, 1714), False, 'import os\n'), ((2376, 2390), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2387, 2390), False, 'import pickle\n'), ((2415, 2462), 'os.path.join', 'os.path.join', (['args.path2data', '"""Datasets"""', '"""All"""'], {}), "(args.path2data, 'Datasets', 'All')\n", (2427, 2462), False, 'import os\n'), ((2509, 2618), 'torch.utils.data.DataLoader', 'DataLoader', (['testObj'], {'batch_size': 'args.batchsize', 'shuffle': '(False)', 'num_workers': 'args.workers', 'drop_last': '(False)'}), '(testObj, batch_size=args.batchsize, shuffle=False, num_workers=\n args.workers, drop_last=False)\n', (2519, 2618), False, 'from torch.utils.data import DataLoader\n'), ((851, 876), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (874, 876), False, 'import torch\n'), ((1232, 1243), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1241, 1243), False, 'import os\n'), ((1463, 1474), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1472, 1474), False, 'import os\n'), ((2138, 2166), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (2159, 2166), False, 'import torch\n'), ((2764, 2794), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (2776, 2794), True, 'import matplotlib.pyplot as plt\n'), ((3354, 3369), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3367, 3369), False, 'import torch\n'), ((9323, 9344), 'helperfunctions.stackall_Dict', 'stackall_Dict', (['opDict'], {}), '(opDict)\n', (9336, 9344), False, 'from helperfunctions import mypause, stackall_Dict\n'), ((9360, 9382), 'numpy.stack', 'np.stack', (['ious'], {'axis': '(0)'}), '(ious, axis=0)\n', (9368, 9382), True, 'import numpy as np\n'), ((9398, 9422), 'numpy.nanmean', 'np.nanmean', (['ious'], {'axis': '(0)'}), '(ious, axis=0)\n', (9408, 9422), True, 'import numpy as np\n'), ((10349, 10371), 'pickle.dump', 'pickle.dump', (['opDict', 'f'], {}), '(opDict, f)\n', (10360, 10371), False, 'import pickle\n'), ((579, 590), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (588, 590), False, 'import os\n'), ((1827, 1873), 'os.path.join', 'os.path.join', (['path2checkpoint', '"""checkpoint.pt"""'], {}), "(path2checkpoint, 'checkpoint.pt')\n", (1839, 1873), False, 'import os\n'), ((2070, 2088), 'utils.get_nparams', 'get_nparams', (['model'], {}), '(model)\n', (2081, 2088), False, 'from utils import get_nparams, get_predictions\n'), ((3410, 3431), 'tqdm.tqdm', 'tqdm.tqdm', (['testloader'], {}), '(testloader)\n', (3419, 3431), False, 'import tqdm\n'), ((4611, 4634), 'utils.get_predictions', 'get_predictions', (['output'], {}), '(output)\n', (4626, 4634), False, 'from utils import get_nparams, get_predictions\n'), ((6975, 7020), 'utils.unnormPts', 'unnormPts', (['latent_pupil_center', 'img.shape[2:]'], {}), '(latent_pupil_center, img.shape[2:])\n', (6984, 7020), False, 'from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts\n'), ((7082, 7124), 'utils.unnormPts', 'unnormPts', (['seg_pupil_center', 'img.shape[2:]'], {}), '(seg_pupil_center, img.shape[2:])\n', (7091, 7124), False, 'from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts\n'), ((7186, 7230), 'utils.unnormPts', 'unnormPts', (['latent_iris_center', 'img.shape[2:]'], {}), '(latent_iris_center, img.shape[2:])\n', (7195, 7230), False, 'from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts\n'), ((7292, 7333), 'utils.unnormPts', 'unnormPts', (['seg_iris_center', 'img.shape[2:]'], {}), '(seg_iris_center, img.shape[2:])\n', (7301, 7333), False, 'from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts\n'), ((10298, 10333), 'os.path.join', 'os.path.join', (['path2op', '"""opDict.pkl"""'], {}), "(path2op, 'opDict.pkl')\n", (10310, 10333), False, 'import os\n'), ((9465, 9478), 'numpy.mean', 'np.mean', (['ious'], {}), '(ious)\n', (9472, 9478), True, 'import numpy as np\n'), ((9552, 9584), 'numpy.nanmedian', 'np.nanmedian', (['dists_pupil_latent'], {}), '(dists_pupil_latent)\n', (9564, 9584), True, 'import numpy as np\n'), ((9646, 9675), 'numpy.nanstd', 'np.nanstd', (['dists_pupil_latent'], {}), '(dists_pupil_latent)\n', (9655, 9675), True, 'import numpy as np\n'), ((9743, 9772), 'numpy.nanmedian', 'np.nanmedian', (['dists_pupil_seg'], {}), '(dists_pupil_seg)\n', (9755, 9772), True, 'import numpy as np\n'), ((9834, 9860), 'numpy.nanstd', 'np.nanstd', (['dists_pupil_seg'], {}), '(dists_pupil_seg)\n', (9843, 9860), True, 'import numpy as np\n'), ((9927, 9958), 'numpy.nanmedian', 'np.nanmedian', (['dists_iris_latent'], {}), '(dists_iris_latent)\n', (9939, 9958), True, 'import numpy as np\n'), ((10019, 10047), 'numpy.nanstd', 'np.nanstd', (['dists_iris_latent'], {}), '(dists_iris_latent)\n', (10028, 10047), True, 'import numpy as np\n'), ((10114, 10142), 'numpy.nanmedian', 'np.nanmedian', (['dists_iris_seg'], {}), '(dists_iris_seg)\n', (10126, 10142), True, 'import numpy as np\n'), ((10203, 10228), 'numpy.nanstd', 'np.nanstd', (['dists_iris_seg'], {}), '(dists_iris_seg)\n', (10212, 10228), True, 'import numpy as np\n'), ((1168, 1185), 'modelSummary.model_dict.keys', 'model_dict.keys', ([], {}), '()\n', (1183, 1185), False, 'from modelSummary import model_dict\n'), ((9175, 9190), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (9184, 9190), True, 'import matplotlib.pyplot as plt\n'), ((9291, 9304), 'helperfunctions.mypause', 'mypause', (['(0.01)'], {}), '(0.01)\n', (9298, 9304), False, 'from helperfunctions import mypause, stackall_Dict\n')]
|
import torch
import lib.modeling.resnet as resnet
import lib.modeling.semseg_heads as snet
import torch.nn as nn
import torch.optim as optim
import utils.resnet_weights_helper as resnet_utils
from torch.autograd import Variable
from roi_data.loader import RoiDataLoader, MinibatchSampler, collate_minibatch, collate_minibatch_semseg
from datasets.roidb import combined_roidb_for_training, combined_roidb_for_training_semseg
import os
import numpy as np
import nn as mynn
import cv2
from modeling.model_builder_3DSD import Generalized_3DSD
from modeling.model_builder_PSP3D import DispSeg
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
#load net
class load_net(nn.Module):
def __init__(self):
super(load_net, self).__init__()
build=snet.ModelBuilder()
fc_dim = 2048
self.encoder = build.build_encoder(
arch= 'resnet50_dilated8',
fc_dim=fc_dim)
self.decoder = build.build_decoder(
arch = 'ppm_bilinear',
num_class=19,
fc_dim=fc_dim,
use_softmax=False)
def _init_modules(self):
resnet_utils.load_pretrained_imagenet_weights(self)
def forward(self, data):
pred=self.decoder(self.encoder(data, return_feature_maps=True))
pred = nn.functional.interpolate(
pred, size=[128,128],
mode='bilinear', align_corners=False)
pred = nn.functional.log_softmax(pred, dim=1)
return pred
def dataloader(bs, gpus):
inputs = {}
inputs['data'] = Variable(torch.randn(2*bs, 3, 128, 128)).to('cuda')
inputs['semseg_label_0'] = Variable(torch.LongTensor(
np.random.randint(0, 19, (bs, 128//8, 128//8), dtype=np.long))).to('cuda')
inputs['disp_label_0'] = Variable(torch.rand(bs, 128//8, 128//8)).to('cuda')
inputs['disp_scans'] = Variable(torch.arange(0,
cfg.DISP.MAX_DISPLACEMENT).float().view(1,cfg.DISP.MAX_DISPLACEMENT,1,1).repeat(bs,1,1,1)).to('cuda')
inputs['semseg_scans'] = Variable(torch.arange(0,
cfg.MODEL.NUM_CLASSES).float().view(1, cfg.MODEL.NUM_CLASSES, 1, 1).repeat(bs,1,1,1)).to('cuda')
return inputs
cfg_file = 'e2e_segdisp-R-50_3Dpool_1x.yaml'
cfg_from_file(cfg_file)
print (cfg.SEM)
print (cfg.DISP)
#cfg_from_list(cfg_file)
#assert_and_infer_cfg()
devices_ids=[5]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(ids) for ids in devices_ids])
torch.backends.cudnn.benchmark=True
#torch.cuda.set_device(3)
len_gpus = len(devices_ids)
batch_size = 2 * len_gpus
#net = mynn.DataParallel(load_net().to('cuda'), minibatch=True)
net = mynn.DataParallel(DispSeg().to('cuda'), minibatch=True)
optimizer = optim.SGD(net.parameters(), lr=0.000875, momentum=0.9)
criterion = nn.NLLLoss(ignore_index=255)
#dataloader= dataloader(batch_size, len_gpus)
for i in range(10):
#for i, inputs in zip(range(1000), dataloader):
inputs = dataloader(batch_size, len_gpus)
for key in inputs:
inputs[key] = torch.chunk(inputs[key], chunks=len_gpus, dim=0)
optimizer.zero_grad()
loss=net(**inputs)
optimizer.step()
for k in loss['losses'].keys():
print (loss['losses'][k].item())
|
[
"core.config.cfg_from_file",
"numpy.random.randint",
"torch.chunk",
"torch.nn.NLLLoss",
"modeling.model_builder_PSP3D.DispSeg",
"torch.nn.functional.interpolate",
"torch.nn.functional.log_softmax",
"utils.resnet_weights_helper.load_pretrained_imagenet_weights",
"torch.arange",
"lib.modeling.semseg_heads.ModelBuilder",
"torch.rand",
"torch.randn"
] |
[((2264, 2287), 'core.config.cfg_from_file', 'cfg_from_file', (['cfg_file'], {}), '(cfg_file)\n', (2277, 2287), False, 'from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\n'), ((2788, 2816), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'ignore_index': '(255)'}), '(ignore_index=255)\n', (2798, 2816), True, 'import torch.nn as nn\n'), ((786, 805), 'lib.modeling.semseg_heads.ModelBuilder', 'snet.ModelBuilder', ([], {}), '()\n', (803, 805), True, 'import lib.modeling.semseg_heads as snet\n'), ((1162, 1213), 'utils.resnet_weights_helper.load_pretrained_imagenet_weights', 'resnet_utils.load_pretrained_imagenet_weights', (['self'], {}), '(self)\n', (1207, 1213), True, 'import utils.resnet_weights_helper as resnet_utils\n'), ((1340, 1430), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['pred'], {'size': '[128, 128]', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(pred, size=[128, 128], mode='bilinear',\n align_corners=False)\n", (1365, 1430), True, 'import torch.nn as nn\n'), ((1474, 1512), 'torch.nn.functional.log_softmax', 'nn.functional.log_softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (1499, 1512), True, 'import torch.nn as nn\n'), ((3022, 3070), 'torch.chunk', 'torch.chunk', (['inputs[key]'], {'chunks': 'len_gpus', 'dim': '(0)'}), '(inputs[key], chunks=len_gpus, dim=0)\n', (3033, 3070), False, 'import torch\n'), ((2671, 2680), 'modeling.model_builder_PSP3D.DispSeg', 'DispSeg', ([], {}), '()\n', (2678, 2680), False, 'from modeling.model_builder_PSP3D import DispSeg\n'), ((1605, 1637), 'torch.randn', 'torch.randn', (['(2 * bs)', '(3)', '(128)', '(128)'], {}), '(2 * bs, 3, 128, 128)\n', (1616, 1637), False, 'import torch\n'), ((1827, 1861), 'torch.rand', 'torch.rand', (['bs', '(128 // 8)', '(128 // 8)'], {}), '(bs, 128 // 8, 128 // 8)\n', (1837, 1861), False, 'import torch\n'), ((1714, 1779), 'numpy.random.randint', 'np.random.randint', (['(0)', '(19)', '(bs, 128 // 8, 128 // 8)'], {'dtype': 'np.long'}), '(0, 19, (bs, 128 // 8, 128 // 8), dtype=np.long)\n', (1731, 1779), True, 'import numpy as np\n'), ((1907, 1949), 'torch.arange', 'torch.arange', (['(0)', 'cfg.DISP.MAX_DISPLACEMENT'], {}), '(0, cfg.DISP.MAX_DISPLACEMENT)\n', (1919, 1949), False, 'import torch\n'), ((2075, 2113), 'torch.arange', 'torch.arange', (['(0)', 'cfg.MODEL.NUM_CLASSES'], {}), '(0, cfg.MODEL.NUM_CLASSES)\n', (2087, 2113), False, 'import torch\n')]
|
from abc import ABC, abstractmethod
from typing import Optional
from xml import dom
import numpy as np
import pandas as pd
from .utils import get_factors_rev
def calc_plot_size(domain_x, domain_y, plot_goal, house_goal):
f1 = sorted(get_factors_rev(domain_x))
f2 = sorted(get_factors_rev(domain_y))
plot_x, plot_y = None, None
for x in f1:
for y in f2:
if x * y - house_goal >= 0 and plot_goal - x * y >= 0:
if not plot_x and not plot_y:
plot_x, plot_y = x, y
if (plot_goal - x * y) < (plot_goal - plot_x * plot_y):
plot_x, plot_y = x, y
elif ((plot_goal - x * y) == (plot_goal - plot_x * plot_y)) and ((x - y) < (plot_x - plot_y)):
plot_x, plot_y = x, y
return plot_x, plot_y
def calc_plot_sizes(
domain_x, domain_y, plot_footprint, house_footprint, plot_ratio, dx, dy, full_domain, x_spread=None, y_spread=None
):
x_spread = x_spread if x_spread is not None else (-round(domain_x / 15), 0)
y_spread = (
y_spread if y_spread is not None else (-round(domain_y / 20), min(full_domain - domain_y, round(domain_y / 10)))
)
goal = plot_footprint / (dx * dy)
house_goal = house_footprint / (dx * dy)
dom_x = range(domain_x + x_spread[0], domain_x + x_spread[1] + 1)
dom_y = range(domain_y + y_spread[0], domain_y + y_spread[1] + 1)
plots = []
for d_x in dom_x:
for d_y in dom_y:
trimmed_d_y = int(d_y * plot_ratio)
plot_x, plot_y = calc_plot_size(d_x, trimmed_d_y, goal, house_goal)
if plot_x is not None and plot_y is not None:
plots.append((plot_x, plot_y, d_x, d_y, trimmed_d_y))
return plots
def get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy):
goal = plot_footprint / (dx * dy)
tmp = pd.DataFrame(plots, columns=["px", "py", "domx", "domy", "trimmed_dy"])
tmp["plt_area"] = tmp["px"] * tmp["py"]
tmp["goal_diff"] = goal - tmp.plt_area
tmp["domain_y_diff"] = tmp.domy * plot_ratio - tmp.trimmed_dy
tmp["trimmed_area"] = tmp["domx"] * tmp["trimmed_dy"]
tmp["full_domain"] = tmp["domx"] * tmp["domy"]
tmp["ratio_diff"] = abs((((tmp.trimmed_area + round(tmp.domain_y_diff * tmp.domx))) / tmp.full_domain - plot_ratio))
normalized_ratio_diff = (tmp.ratio_diff + plot_ratio) / plot_ratio
normalized_goal_diff = (tmp.goal_diff + goal) / goal
tmp["weighted_sorter"] = (tmp.px + tmp.py) ** (normalized_ratio_diff * normalized_goal_diff)
# tmp["ratio_diff"] = abs(((tmp.trimmed_area) / tmp.full_domain - plot_ratio))
tmp = tmp.sort_values(
by=["weighted_sorter", "goal_diff", "ratio_diff", "domain_y_diff", "trimmed_area"],
ascending=[True, True, True, True, False],
)
# tmp = tmp.sort_values(by=["goal_diff", "domain_y_diff", "trimmed_area"], ascending=[True, True, False])
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = tmp[["px", "py", "domx", "domy", "trimmed_dy"]].iloc[0]
return tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y
def calc_house_size(plot_x, plot_y, house_footprint, dx, dy):
goal = house_footprint / (dx * dy)
f1 = range(1, plot_x + 1)
f2 = range(1, plot_y + 1)
true_x, true_y = f1[0], f2[0]
for x in f1:
for y in f2:
padded_x, padded_y = x - 0, y - 0
nums = sorted([padded_x, padded_y])
if nums[0] * 2 < nums[1]:
continue
if abs(goal - padded_x * padded_y) < abs(goal - true_x * true_y):
true_x, true_y = padded_x, padded_y
elif (abs(goal - padded_x * padded_y) == abs(goal - true_x * true_y)) and (
abs(padded_x - padded_y) < abs(true_x - true_y)
):
true_x, true_y = padded_x, padded_y
return true_x, true_y
class BaseDomainArea(ABC):
subplot: Optional["BaseDomainArea"]
x: int
y: int
z: Optional[int]
matrix: np.ndarray
def __str__(self) -> str:
string = ""
for row in self.matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
@abstractmethod
def get_matrix(self) -> np.ndarray:
"""Get the numpy matrix representation of the domain area"""
def _validate_matrix_size(self, subplot):
for value in ["x", "y"]:
cell_val = getattr(self, value)
subplot_val = getattr(subplot, value)
if subplot_val and cell_val < subplot_val:
raise ValueError(
f"The {value} ({cell_val}) value of {self.__class__.__name__}"
f" must be larger than the house ({subplot_val}) going on it!"
)
def save_matrix(self, filename: str, matrix_name: str = None) -> None:
matrix = self.matrix if matrix_name is None else getattr(self, matrix_name)
np.savetxt(filename, matrix, delimiter=",")
class House(BaseDomainArea):
def __init__(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
house = np.full((self.x, self.y), self.z)
return house
class Cell(BaseDomainArea):
def __init__(self, subplot: House, x: int, y: int) -> None:
self.subplot = subplot
self.x = x
self.y = y
self._validate_matrix_size(subplot=self.subplot)
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
left = (self.x - self.subplot.x) // 2
top = (self.y - self.subplot.y) // 2
plot = np.zeros((self.x, self.y), dtype=int)
plot[left : left + self.subplot.x, top : top + self.subplot.y] = self.subplot.matrix
return plot
class Domain(BaseDomainArea):
def __init__(self, subplot: Cell, tdomain_x, tdomain_y, full_x, full_y, trimmed_y, plot_ratio, stack_height) -> None:
self.subplot = subplot
self.temp_x = tdomain_x
self.temp_y = tdomain_y
self.full_x = full_x
self.full_y = full_y
self.trimmed_y = trimmed_y
self.plot_ratio = plot_ratio
self.stack_height = stack_height
# self._validate_matrix_size(subplot=self.subplot)
self.matrix, self.trees_matrix = self.get_matrix()
def print_tree_matrix(self) -> str:
string = ""
for row in self.trees_matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
def get_matrix(self) -> np.ndarray:
houses_row = np.tile(
self.subplot.matrix,
(
self.temp_x // self.subplot.x,
1,
),
)
number_of_house_rows = self.trimmed_y // self.subplot.y
number_of_full_tree_rows = self.temp_y - self.trimmed_y - 1
mixed_row_ratio = self.temp_y * self.plot_ratio - self.trimmed_y
tree_row = np.full((self.temp_x, 1), -1)
mixed_row = np.array(
[-1 if i <= mixed_row_ratio * self.temp_x else 0 for i in range(1, self.temp_x + 1)]
).reshape(self.temp_x, 1)
rows = [[houses_row.copy()] for _ in range(number_of_house_rows)]
trees = [tree_row.copy() for _ in range(number_of_full_tree_rows)]
trees.insert(number_of_house_rows // 2, mixed_row)
while trees:
for row in rows:
if not trees:
break
row.append(trees.pop())
domain_with_trees = np.concatenate([np.concatenate(row, axis=1) for row in rows], axis=1)
dwtx = domain_with_trees.shape[0]
dwty = domain_with_trees.shape[1]
xs = int(np.floor((self.full_x - dwtx) / 2)), int(np.ceil((self.full_x - dwtx) / 2))
full_domain = np.pad(domain_with_trees, (xs, (self.full_y - dwty, 0)))
mid_x = self.full_x // 2
full_domain[mid_x - 2:mid_x + 2, :1] = self.stack_height # stack for surface scalar to come out of
domain = np.where(full_domain != -1, full_domain, 0)
trees = np.where(full_domain == -1, full_domain, 0)
return domain.T, trees.T
@classmethod
def from_domain_config(cls, house, config):
cell = Cell(house, tree_domain_fraction=config["trees"]["domain_fraction"], **config["plot_size"])
x = config["domain"]["x"]
y = config["domain"]["y"]
return cls(subplot=cell, x=x, y=y)
@classmethod
def from_plot_size(cls, house, config, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, stack_height):
cell = Cell(house, x=tplot_x, y=tplot_y)
# x = config["domain"]["x"]
# y = config["domain"]["y"]
return cls(cell, tdomain_x, tdomain_y, config["domain"]["x"], config["domain"]["y"], trimmed_y, plot_ratio, stack_height)
def setup_domain(cfg):
domain_x, domain_y = cfg["domain"]["x"], (round(cfg["domain"]["y"] * cfg["domain"]["urban_ratio"]))
plot_footprint, plot_ratio, dx, dy = (
cfg["plot"]["plot_footprint"],
cfg["plot"]["plot_ratio"],
cfg["domain"]["dx"],
cfg["domain"]["dy"],
)
plots = calc_plot_sizes(
domain_x,
domain_y,
plot_footprint,
cfg["house"]["footprint"],
plot_ratio,
dx,
dy,
cfg["domain"]["y"],
)
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy)
house_x, house_y = calc_house_size(tplot_x, tplot_y, cfg["house"]["footprint"], dx, dy)
house = House(house_x, house_y, cfg["house"]["height"])
return Domain.from_plot_size(house, cfg, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, cfg["domain"]["stack_height"])
if __name__ == "__main__":
from .load_wrapper_config import get_wrapper_config
config = get_wrapper_config()
domain = setup_domain(config)
domain
|
[
"numpy.tile",
"numpy.ceil",
"numpy.where",
"numpy.floor",
"numpy.zeros",
"numpy.savetxt",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.full",
"numpy.pad"
] |
[((1874, 1945), 'pandas.DataFrame', 'pd.DataFrame', (['plots'], {'columns': "['px', 'py', 'domx', 'domy', 'trimmed_dy']"}), "(plots, columns=['px', 'py', 'domx', 'domy', 'trimmed_dy'])\n", (1886, 1945), True, 'import pandas as pd\n'), ((4922, 4965), 'numpy.savetxt', 'np.savetxt', (['filename', 'matrix'], {'delimiter': '""","""'}), "(filename, matrix, delimiter=',')\n", (4932, 4965), True, 'import numpy as np\n'), ((5207, 5240), 'numpy.full', 'np.full', (['(self.x, self.y)', 'self.z'], {}), '((self.x, self.y), self.z)\n', (5214, 5240), True, 'import numpy as np\n'), ((5669, 5706), 'numpy.zeros', 'np.zeros', (['(self.x, self.y)'], {'dtype': 'int'}), '((self.x, self.y), dtype=int)\n', (5677, 5706), True, 'import numpy as np\n'), ((6614, 6678), 'numpy.tile', 'np.tile', (['self.subplot.matrix', '(self.temp_x // self.subplot.x, 1)'], {}), '(self.subplot.matrix, (self.temp_x // self.subplot.x, 1))\n', (6621, 6678), True, 'import numpy as np\n'), ((6986, 7015), 'numpy.full', 'np.full', (['(self.temp_x, 1)', '(-1)'], {}), '((self.temp_x, 1), -1)\n', (6993, 7015), True, 'import numpy as np\n'), ((7831, 7887), 'numpy.pad', 'np.pad', (['domain_with_trees', '(xs, (self.full_y - dwty, 0))'], {}), '(domain_with_trees, (xs, (self.full_y - dwty, 0)))\n', (7837, 7887), True, 'import numpy as np\n'), ((8047, 8090), 'numpy.where', 'np.where', (['(full_domain != -1)', 'full_domain', '(0)'], {}), '(full_domain != -1, full_domain, 0)\n', (8055, 8090), True, 'import numpy as np\n'), ((8107, 8150), 'numpy.where', 'np.where', (['(full_domain == -1)', 'full_domain', '(0)'], {}), '(full_domain == -1, full_domain, 0)\n', (8115, 8150), True, 'import numpy as np\n'), ((7577, 7604), 'numpy.concatenate', 'np.concatenate', (['row'], {'axis': '(1)'}), '(row, axis=1)\n', (7591, 7604), True, 'import numpy as np\n'), ((7733, 7767), 'numpy.floor', 'np.floor', (['((self.full_x - dwtx) / 2)'], {}), '((self.full_x - dwtx) / 2)\n', (7741, 7767), True, 'import numpy as np\n'), ((7774, 7807), 'numpy.ceil', 'np.ceil', (['((self.full_x - dwtx) / 2)'], {}), '((self.full_x - dwtx) / 2)\n', (7781, 7807), True, 'import numpy as np\n')]
|
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import mujoco_py
import numpy as np
import os.path as osp
from init_args_serializer import Serializable
from typing import Optional
import pyrado
from pyrado.environments.barrett_wam import (
goal_pos_init_sim_4dof,
goal_pos_init_sim_7dof,
init_qpos_des_4dof,
init_qpos_des_7dof,
act_space_bic_4dof,
act_space_bic_7dof,
wam_q_limits_up_7dof,
wam_q_limits_lo_7dof,
torque_space_wam_4dof,
torque_space_wam_7dof,
wam_pgains_7dof,
wam_dgains_7dof,
wam_pgains_4dof,
wam_dgains_4dof,
)
from pyrado.environments.mujoco.base import MujocoSimEnv
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.spaces.singular import SingularStateSpace
from pyrado.tasks.base import Task
from pyrado.tasks.condition_only import ConditionOnlyTask
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode
from pyrado.tasks.goalless import GoallessTask
from pyrado.tasks.masked import MaskedTask
from pyrado.tasks.parallel import ParallelTasks
from pyrado.tasks.reward_functions import ZeroPerStepRewFcn, ExpQuadrErrRewFcn, QuadrErrRewFcn
from pyrado.tasks.sequential import SequentialTasks
from pyrado.utils.data_types import EnvSpec
from pyrado.utils.input_output import print_cbt
class WAMBallInCupSim(MujocoSimEnv, Serializable):
"""
WAM robotic arm from Barrett technologies for the ball-in-the-cup task, controlled by a PD controller.
.. note::
When using the `reset()` function, always pass a meaningful `init_state`
.. seealso::
[1] https://github.com/psclklnk/self-paced-rl/tree/master/sprl/envs/ball_in_a_cup.py
"""
name: str = "wam-bic"
def __init__(
self,
num_dof: int,
frame_skip: int = 4,
dt: Optional[float] = None,
max_steps: int = pyrado.inf,
fixed_init_state: bool = True,
stop_on_collision: bool = True,
observe_ball: bool = False,
observe_cup: bool = False,
task_args: Optional[dict] = None,
):
"""
Constructor
:param num_dof: number of degrees of freedom (4 or 7), depending on which Barrett WAM setup being used
:param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of
the time step size `dt`
:param dt: by default the time step size is the one from the mujoco config file multiplied by the number of
frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be
overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`.
:param max_steps: max number of simulation time steps
:param fixed_init_state: enables/disables deterministic, fixed initial state
:param stop_on_collision: set the `failed` flag in the `dict` returned by `_mujoco_step()` to true, if the ball
collides with something else than the desired parts of the cup. This causes the
episode to end. Keep in mind that in case of a negative step reward and no final
cost on failing, this might result in undesired behavior.
:param observe_ball: if `True`, include the 2-dim (x-z plane) cartesian ball position into the observation
:param observe_cup: if `True`, include the 2-dim (x-z plane) cartesian cup position into the observation
:param task_args: arguments for the task construction
"""
Serializable._init(self, locals())
self.fixed_init_state = fixed_init_state
self.observe_ball = observe_ball
self.observe_cup = observe_cup
# Initialize num DoF specific variables
self._num_dof = num_dof
if num_dof == 4:
graph_file_name = "wam_4dof_bic.xml"
self.qpos_des_init = init_qpos_des_4dof
self.p_gains = wam_pgains_4dof
self.d_gains = wam_dgains_4dof
init_ball_pos = np.array([0.723, 0.0, 1.168])
init_cup_goal = goal_pos_init_sim_4dof
elif num_dof == 7:
graph_file_name = "wam_7dof_bic.xml"
self.qpos_des_init = init_qpos_des_7dof
self.p_gains = wam_pgains_7dof
self.d_gains = wam_dgains_7dof
init_ball_pos = np.array([0.828, 0.0, 1.131])
init_cup_goal = goal_pos_init_sim_7dof
else:
raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7")
model_path = osp.join(pyrado.MUJOCO_ASSETS_DIR, graph_file_name)
super().__init__(model_path, frame_skip, dt, max_steps, task_args)
# Actual initial joint position (when the WAM moved to the home position)
if num_dof == 4:
self.init_qpos[:4] = np.array([0.0, 0.63, 0.0, 1.27])
self.init_qpos[4] = -0.34 # angle of the first rope segment relative to the cup bottom plate
else:
self.init_qpos[:7] = np.array([0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57])
self.init_qpos[7] = -0.21 # angle of the first rope segment relative to the cup bottom plate
# Set the actual stable initial position. This position would be reached after some time using the internal
# PD controller to stabilize at self._qpos_des_init.
# The initial position of the ball in cartesian coordinates
self._init_state = np.concatenate([self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal])
if self.fixed_init_state:
self._init_space = SingularStateSpace(self._init_state)
else:
# Add plus/minus one degree to each motor joint and the first rope segment joint
init_state_up = self._init_state.copy()
init_state_up[: self._num_dof] += np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof]
init_state_lo = self._init_state.copy()
init_state_lo[: self._num_dof] -= np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof]
self._init_space = BoxSpace(init_state_lo, init_state_up)
# Bodies to check fo collision
self._collision_bodies = [
"wam/base_link",
"wam/shoulder_yaw_link",
"wam/shoulder_pitch_link",
"wam/upper_arm_link",
"wam/forearm_link",
"wrist_palm_link",
"wam/wrist_pitch_link",
"wam/wrist_yaw_link",
]
if self._num_dof == 4:
self._collision_bodies = self._collision_bodies[:6]
# We access a private attribute since a method like 'model.geom_names[geom_id]' cannot be used because
# not every geom has a name
self._collision_geom_ids = [self.model._geom_name2id[name] for name in ["cup_geom1", "cup_geom2"]]
self.stop_on_collision = stop_on_collision
self.camera_config = dict(
distance=2.7,
trackbodyid=0, # id of the body to track
elevation=-30, # camera rotation around the axis in the plane
azimuth=-90, # camera rotation around the camera's vertical axis
)
@property
def num_dof(self) -> int:
""" Get the number of degrees of freedom. """
return self._num_dof
@property
def torque_space(self) -> Space:
""" Get the space of joint torques. """
return torque_space_wam_7dof if self._num_dof == 7 else torque_space_wam_4dof
@property
def state_space(self) -> Space:
# The state space has the same shape as the init space (including ball and cup)
state_shape = np.concatenate([self.init_qpos, self.init_qvel, np.empty(3), np.empty(3)]).shape
state_lo, state_up = np.full(state_shape, -pyrado.inf), np.full(state_shape, pyrado.inf)
# Ensure that joint limits of the arm are not reached (5 deg safety margin)
state_lo[: self._num_dof] = wam_q_limits_lo_7dof[: self._num_dof]
state_up[: self._num_dof] = wam_q_limits_up_7dof[: self._num_dof]
return BoxSpace(state_lo, state_up)
@property
def obs_space(self) -> Space:
# Observing the normalized time and optionally the cup and ball position
obs_lo, obs_up, labels = [0.0], [1.0], ["t"]
if self.observe_ball:
obs_lo.extend([-3.0, -3.0])
obs_up.extend([3.0, 3.0])
labels.extend(["ball_x", "ball_z"])
if self.observe_cup:
obs_lo.extend([-3.0, -3.0])
obs_up.extend([3.0, 3.0])
labels.extend(["cup_x", "cup_z"])
return BoxSpace(obs_lo, obs_up, labels=labels)
@property
def act_space(self) -> Space:
# Running a PD controller on joint positions and velocities
return act_space_bic_7dof if self._num_dof == 7 else act_space_bic_4dof
@classmethod
def get_nominal_domain_param(cls, num_dof: int = 7) -> dict:
if num_dof == 7:
return dict(
cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65)
rope_length=0.41, # length of the rope [m]
ball_mass=0.024, # mass of the ball [kg]
joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_5_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_6_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_7_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-]
joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-]
joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-]
joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-]
joint_5_dryfriction=0.4, # dry friction coefficient of motor joint 5 [-]
joint_6_dryfriction=0.4, # dry friction coefficient of motor joint 6 [-]
joint_7_dryfriction=0.4, # dry friction coefficient of motor joint 7 [-]
rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6)
)
elif num_dof == 4:
return dict(
cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65)
rope_length=0.41, # length of the rope [m]
ball_mass=0.024, # mass of the ball [kg]
joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small)
joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-]
joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-]
joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-]
joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-]
rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6)
)
else:
raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7")
def _create_task(self, task_args: dict) -> Task:
if task_args.get("sparse_rew_fcn", False):
# Create a task with binary reward
return self._create_main_task(task_args)
else:
# Create two (or three) parallel running task.
# 1.) Main task: Desired state task for the cartesian ball distance
# 2.) Deviation task: Desired state task for the cartesian- and joint deviation from the init position
# 3.) Binary Bonus: Adds a binary bonus when ball is catched [inactive by default]
return ParallelTasks(
[
self._create_main_task(task_args),
self._create_deviation_task(task_args),
self._create_main_task(
dict(
sparse_rew_fcn=True,
success_bonus=task_args.get("success_bonus", 0),
)
),
]
)
def _create_main_task(self, task_args: dict) -> Task:
# Create a DesStateTask that masks everything but the ball position
idcs = list(range(self.state_space.flat_dim - 6, self.state_space.flat_dim - 3)) # Cartesian ball position
spec = EnvSpec(
self.spec.obs_space,
self.spec.act_space,
self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)),
)
# If we do not use copy(), state_des coming from MuJoCo is a reference and updates automatically at each step.
# Note: sim.forward() + get_body_xpos() results in wrong output for state_des, as sim has not been updated to
# init_space.sample(), which is first called in reset()
if task_args.get("sparse_rew_fcn", False):
factor = task_args.get("success_bonus", 1)
# Binary final reward task
main_task = FinalRewTask(
ConditionOnlyTask(
spec,
condition_fcn=self.check_ball_in_cup,
is_success_condition=True,
),
mode=FinalRewMode(always_positive=True),
factor=factor,
)
# Yield -1 on fail after the main task ist done (successfully or not)
dont_fail_after_succ_task = FinalRewTask(
GoallessTask(spec, ZeroPerStepRewFcn()),
mode=FinalRewMode(always_negative=True),
factor=factor,
)
# Augment the binary task with an endless dummy task, to avoid early stopping
task = SequentialTasks((main_task, dont_fail_after_succ_task))
return MaskedTask(self.spec, task, idcs)
else:
state_des = self.sim.data.get_site_xpos("cup_goal") # this is a reference
# state_des_ball = self.sim.data.get_site_xpos("cup_goal") # this is a reference
# state_des_cup = np.array([0.82521, 0, 1.4469]) if self._num_dof == 7 else np.array([0.758, 0, 1.5])
# state_des = np.concatenate([state_des_ball, state_des_cup])
R_default = np.diag([0, 0, 1, 1e-2, 1e-2, 1e-1]) if self._num_dof == 7 else np.diag([0, 0, 1e-2, 1e-2])
rew_fcn = ExpQuadrErrRewFcn(
Q=task_args.get("Q", np.diag([2e1, 1e-4, 2e1])), # distance ball - cup; shouldn't move in y-direction
R=task_args.get("R", R_default), # last joint is really unreliable for 7 dof, thus punish more
)
task = DesStateTask(spec, state_des, rew_fcn)
# Wrap the masked DesStateTask to add a bonus for the best state in the rollout
return BestStateFinalRewTask(
MaskedTask(self.spec, task, idcs),
factor=task_args.get("final_factor", 0.05 * self.max_steps),
)
def _create_deviation_task(self, task_args: dict) -> Task:
idcs = list(range(self.state_space.flat_dim - 3, self.state_space.flat_dim)) # Cartesian cup goal position
spec = EnvSpec(
self.spec.obs_space,
self.spec.act_space,
self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)),
)
# init cup goal position
state_des = goal_pos_init_sim_7dof if self._num_dof == 7 else goal_pos_init_sim_4dof
rew_fcn = QuadrErrRewFcn(
Q=task_args.get("Q_dev", np.diag([2e-1, 1e-6, 5e0])), # Cartesian distance from init cup position
R=task_args.get(
"R_dev", np.zeros((self.act_space.shape[0], self.act_space.shape[0]))
), # joint space distance from init pose, interferes with R_default from _create_main_task
)
task = DesStateTask(spec, state_des, rew_fcn)
return MaskedTask(self.spec, task, idcs)
def _adapt_model_file(self, xml_model: str, domain_param: dict) -> str:
# First replace special domain parameters
cup_scale = domain_param.pop("cup_scale", None)
rope_length = domain_param.pop("rope_length", None)
if cup_scale is not None:
# See [1, l.93-96]
xml_model = xml_model.replace("[scale_mesh]", str(cup_scale * 0.001))
xml_model = xml_model.replace("[pos_mesh]", str(0.055 - (cup_scale - 1.0) * 0.023))
xml_model = xml_model.replace("[pos_goal]", str(0.1165 + (cup_scale - 1.0) * 0.0385))
xml_model = xml_model.replace("[size_cup]", str(cup_scale * 0.038))
xml_model = xml_model.replace("[size_cup_inner]", str(cup_scale * 0.03))
if rope_length is not None:
# The rope consists of 30 capsules
xml_model = xml_model.replace("[pos_capsule]", str(rope_length / 30))
# Each joint is at the top of each capsule (therefore negative direction from center)
xml_model = xml_model.replace("[pos_capsule_joint]", str(-rope_length / 60))
# Pure visualization component
xml_model = xml_model.replace("[size_capsule_geom]", str(rope_length / 72))
# Resolve mesh directory and replace the remaining domain parameters
return super()._adapt_model_file(xml_model, domain_param)
def _mujoco_step(self, act: np.ndarray) -> dict:
assert self.act_space.contains(act, verbose=True)
# Get the desired positions and velocities for the selected joints
qpos_des = self.qpos_des_init.copy() # the desired trajectory is relative to self._qpos_des_init
qvel_des = np.zeros_like(qpos_des)
if self._num_dof == 4:
np.add.at(qpos_des, [1, 3], act[:2])
np.add.at(qvel_des, [1, 3], act[2:])
elif self._num_dof == 7:
np.add.at(qpos_des, [1, 3, 5], act[:3])
np.add.at(qvel_des, [1, 3, 5], act[3:])
# Compute the position and velocity errors
err_pos = qpos_des - self.state[: self._num_dof]
err_vel = qvel_des - self.state[self.model.nq : self.model.nq + self._num_dof]
# Compute the torques for the PD controller and clip them to their max values
torque = self.p_gains * err_pos + self.d_gains * err_vel
torque = self.torque_space.project_to(torque)
# Apply the torques to the robot
self.sim.data.qfrc_applied[: self._num_dof] = torque
# Call MuJoCo
try:
self.sim.step()
mjsim_crashed = False
except mujoco_py.builder.MujocoException:
# When MuJoCo recognized instabilities in the simulation, it simply kills it.
# Instead, we want the episode to end with a failure.
mjsim_crashed = True
qpos, qvel = self.sim.data.qpos.copy(), self.sim.data.qvel.copy()
ball_pos = self.sim.data.get_body_xpos("ball").copy()
cup_goal = self.sim.data.get_site_xpos("cup_goal").copy()
self.state = np.concatenate([qpos, qvel, ball_pos, cup_goal])
# If desired, check for collisions of the ball with the robot
ball_collided = self.check_ball_collisions() if self.stop_on_collision else False
# If state is out of bounds (this is normally checked by the task, but does not work because of the mask)
state_oob = False if self.state_space.contains(self.state) else True
return dict(
qpos_des=qpos_des,
qvel_des=qvel_des,
qpos=qpos[: self._num_dof],
qvel=qvel[: self._num_dof],
ball_pos=ball_pos,
cup_pos=cup_goal,
failed=mjsim_crashed or ball_collided or state_oob,
)
def check_ball_collisions(self, verbose: bool = False) -> bool:
"""
Check if an undesired collision with the ball occurs.
:param verbose: print messages on collision
:return: `True` if the ball collides with something else than the central parts of the cup
"""
for i in range(self.sim.data.ncon):
# Get current contact object
contact = self.sim.data.contact[i]
# Extract body-id and body-name of both contact geoms
body1 = self.model.geom_bodyid[contact.geom1]
body1_name = self.model.body_names[body1]
body2 = self.model.geom_bodyid[contact.geom2]
body2_name = self.model.body_names[body2]
# Evaluate if the ball collides with part of the WAM (collision bodies)
# or the connection of WAM and cup (geom_ids)
c1 = body1_name == "ball" and (
body2_name in self._collision_bodies or contact.geom2 in self._collision_geom_ids
)
c2 = body2_name == "ball" and (
body1_name in self._collision_bodies or contact.geom1 in self._collision_geom_ids
)
if c1 or c2:
if verbose:
print_cbt(
f"Undesired collision of {body1_name} and {body2_name} detected!",
"y",
)
return True
return False
def check_ball_in_cup(self, *args, verbose: bool = False):
"""
Check if the ball is in the cup.
:param verbose: print messages when ball is in the cup
:return: `True` if the ball is in the cup
"""
for i in range(self.sim.data.ncon):
# Get current contact object
contact = self.sim.data.contact[i]
# Extract body-id and body-name of both contact geoms
body1 = self.model.geom_bodyid[contact.geom1]
body1_name = self.model.body_names[body1]
body2 = self.model.geom_bodyid[contact.geom2]
body2_name = self.model.body_names[body2]
# Evaluate if the ball collides with part of the WAM (collision bodies)
# or the connection of WAM and cup (geom_ids)
cup_inner_id = self.model._geom_name2id["cup_inner"]
c1 = body1_name == "ball" and contact.geom2 == cup_inner_id
c2 = body2_name == "ball" and contact.geom1 == cup_inner_id
if c1 or c2:
if verbose:
print_cbt(f"The ball is in the cup at time step {self.curr_step}.", "y")
return True
return False
def observe(self, state: np.ndarray) -> np.ndarray:
# TODO: Debug print-outs, should be removed in future...
# if self._curr_step == 0:
# print_cbt(f'cup xpos: {self.sim.data.get_body_xpos("cup").copy()}', 'b') # center of frame
# print_cbt(f'cup xipos: {self.sim.data.get_body_xipos("cup").copy()}', 'b') # center of mass
# Observe the normalized time
obs = [self._curr_step / self.max_steps]
# Extract the (x, z) cartesian position of cup and ball (the robot operates in the x-z plane).
# Note: the cup_goal is the mujoco site object marking the goal position for the ball. It is not identical
# to the coordinate system origin of the rigid body object 'cup'
if self.observe_ball:
obs.extend([state[-3], state[-1]])
if self.observe_cup:
obs.extend([state[-6], state[-4]])
return np.array(obs)
|
[
"pyrado.tasks.sequential.SequentialTasks",
"numpy.array",
"pyrado.tasks.final_reward.FinalRewMode",
"numpy.add.at",
"pyrado.tasks.condition_only.ConditionOnlyTask",
"numpy.empty",
"numpy.concatenate",
"pyrado.ValueErr",
"pyrado.tasks.masked.MaskedTask",
"pyrado.spaces.box.BoxSpace",
"pyrado.spaces.singular.SingularStateSpace",
"pyrado.utils.input_output.print_cbt",
"os.path.join",
"pyrado.tasks.desired_state.DesStateTask",
"numpy.diag",
"numpy.zeros",
"pyrado.tasks.reward_functions.ZeroPerStepRewFcn",
"numpy.full",
"numpy.zeros_like"
] |
[((6388, 6439), 'os.path.join', 'osp.join', (['pyrado.MUJOCO_ASSETS_DIR', 'graph_file_name'], {}), '(pyrado.MUJOCO_ASSETS_DIR, graph_file_name)\n', (6396, 6439), True, 'import os.path as osp\n'), ((7273, 7351), 'numpy.concatenate', 'np.concatenate', (['[self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal]'], {}), '([self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal])\n', (7287, 7351), True, 'import numpy as np\n'), ((9917, 9945), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['state_lo', 'state_up'], {}), '(state_lo, state_up)\n', (9925, 9945), False, 'from pyrado.spaces.box import BoxSpace\n'), ((10453, 10492), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['obs_lo', 'obs_up'], {'labels': 'labels'}), '(obs_lo, obs_up, labels=labels)\n', (10461, 10492), False, 'from pyrado.spaces.box import BoxSpace\n'), ((18421, 18459), 'pyrado.tasks.desired_state.DesStateTask', 'DesStateTask', (['spec', 'state_des', 'rew_fcn'], {}), '(spec, state_des, rew_fcn)\n', (18433, 18459), False, 'from pyrado.tasks.desired_state import DesStateTask\n'), ((18476, 18509), 'pyrado.tasks.masked.MaskedTask', 'MaskedTask', (['self.spec', 'task', 'idcs'], {}), '(self.spec, task, idcs)\n', (18486, 18509), False, 'from pyrado.tasks.masked import MaskedTask\n'), ((20201, 20224), 'numpy.zeros_like', 'np.zeros_like', (['qpos_des'], {}), '(qpos_des)\n', (20214, 20224), True, 'import numpy as np\n'), ((21557, 21605), 'numpy.concatenate', 'np.concatenate', (['[qpos, qvel, ball_pos, cup_goal]'], {}), '([qpos, qvel, ball_pos, cup_goal])\n', (21571, 21605), True, 'import numpy as np\n'), ((25854, 25867), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (25862, 25867), True, 'import numpy as np\n'), ((5875, 5904), 'numpy.array', 'np.array', (['[0.723, 0.0, 1.168]'], {}), '([0.723, 0.0, 1.168])\n', (5883, 5904), True, 'import numpy as np\n'), ((6656, 6688), 'numpy.array', 'np.array', (['[0.0, 0.63, 0.0, 1.27]'], {}), '([0.0, 0.63, 0.0, 1.27])\n', (6664, 6688), True, 'import numpy as np\n'), ((6842, 6893), 'numpy.array', 'np.array', (['[0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57]'], {}), '([0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57])\n', (6850, 6893), True, 'import numpy as np\n'), ((7417, 7453), 'pyrado.spaces.singular.SingularStateSpace', 'SingularStateSpace', (['self._init_state'], {}), '(self._init_state)\n', (7435, 7453), False, 'from pyrado.spaces.singular import SingularStateSpace\n'), ((7938, 7976), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['init_state_lo', 'init_state_up'], {}), '(init_state_lo, init_state_up)\n', (7946, 7976), False, 'from pyrado.spaces.box import BoxSpace\n'), ((9600, 9633), 'numpy.full', 'np.full', (['state_shape', '(-pyrado.inf)'], {}), '(state_shape, -pyrado.inf)\n', (9607, 9633), True, 'import numpy as np\n'), ((9635, 9667), 'numpy.full', 'np.full', (['state_shape', 'pyrado.inf'], {}), '(state_shape, pyrado.inf)\n', (9642, 9667), True, 'import numpy as np\n'), ((16310, 16365), 'pyrado.tasks.sequential.SequentialTasks', 'SequentialTasks', (['(main_task, dont_fail_after_succ_task)'], {}), '((main_task, dont_fail_after_succ_task))\n', (16325, 16365), False, 'from pyrado.tasks.sequential import SequentialTasks\n'), ((16386, 16419), 'pyrado.tasks.masked.MaskedTask', 'MaskedTask', (['self.spec', 'task', 'idcs'], {}), '(self.spec, task, idcs)\n', (16396, 16419), False, 'from pyrado.tasks.masked import MaskedTask\n'), ((17225, 17263), 'pyrado.tasks.desired_state.DesStateTask', 'DesStateTask', (['spec', 'state_des', 'rew_fcn'], {}), '(spec, state_des, rew_fcn)\n', (17237, 17263), False, 'from pyrado.tasks.desired_state import DesStateTask\n'), ((20268, 20304), 'numpy.add.at', 'np.add.at', (['qpos_des', '[1, 3]', 'act[:2]'], {}), '(qpos_des, [1, 3], act[:2])\n', (20277, 20304), True, 'import numpy as np\n'), ((20317, 20353), 'numpy.add.at', 'np.add.at', (['qvel_des', '[1, 3]', 'act[2:]'], {}), '(qvel_des, [1, 3], act[2:])\n', (20326, 20353), True, 'import numpy as np\n'), ((6198, 6227), 'numpy.array', 'np.array', (['[0.828, 0.0, 1.131]'], {}), '([0.828, 0.0, 1.131])\n', (6206, 6227), True, 'import numpy as np\n'), ((6311, 6365), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'given': 'num_dof', 'eq_constraint': '"""4 or 7"""'}), "(given=num_dof, eq_constraint='4 or 7')\n", (6326, 6365), False, 'import pyrado\n'), ((13619, 13673), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'given': 'num_dof', 'eq_constraint': '"""4 or 7"""'}), "(given=num_dof, eq_constraint='4 or 7')\n", (13634, 13673), False, 'import pyrado\n'), ((15634, 15726), 'pyrado.tasks.condition_only.ConditionOnlyTask', 'ConditionOnlyTask', (['spec'], {'condition_fcn': 'self.check_ball_in_cup', 'is_success_condition': '(True)'}), '(spec, condition_fcn=self.check_ball_in_cup,\n is_success_condition=True)\n', (15651, 15726), False, 'from pyrado.tasks.condition_only import ConditionOnlyTask\n'), ((16828, 16863), 'numpy.diag', 'np.diag', (['[0, 0, 1, 0.01, 0.01, 0.1]'], {}), '([0, 0, 1, 0.01, 0.01, 0.1])\n', (16835, 16863), True, 'import numpy as np\n'), ((16892, 16919), 'numpy.diag', 'np.diag', (['[0, 0, 0.01, 0.01]'], {}), '([0, 0, 0.01, 0.01])\n', (16899, 16919), True, 'import numpy as np\n'), ((17415, 17448), 'pyrado.tasks.masked.MaskedTask', 'MaskedTask', (['self.spec', 'task', 'idcs'], {}), '(self.spec, task, idcs)\n', (17425, 17448), False, 'from pyrado.tasks.masked import MaskedTask\n'), ((20399, 20438), 'numpy.add.at', 'np.add.at', (['qpos_des', '[1, 3, 5]', 'act[:3]'], {}), '(qpos_des, [1, 3, 5], act[:3])\n', (20408, 20438), True, 'import numpy as np\n'), ((20451, 20490), 'numpy.add.at', 'np.add.at', (['qvel_des', '[1, 3, 5]', 'act[3:]'], {}), '(qvel_des, [1, 3, 5], act[3:])\n', (20460, 20490), True, 'import numpy as np\n'), ((7673, 7716), 'numpy.array', 'np.array', (['[0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0]'], {}), '([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])\n', (7681, 7716), True, 'import numpy as np\n'), ((7846, 7889), 'numpy.array', 'np.array', (['[0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0]'], {}), '([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])\n', (7854, 7889), True, 'import numpy as np\n'), ((9538, 9549), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (9546, 9549), True, 'import numpy as np\n'), ((9551, 9562), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (9559, 9562), True, 'import numpy as np\n'), ((15824, 15858), 'pyrado.tasks.final_reward.FinalRewMode', 'FinalRewMode', ([], {'always_positive': '(True)'}), '(always_positive=True)\n', (15836, 15858), False, 'from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode\n'), ((16076, 16095), 'pyrado.tasks.reward_functions.ZeroPerStepRewFcn', 'ZeroPerStepRewFcn', ([], {}), '()\n', (16093, 16095), False, 'from pyrado.tasks.reward_functions import ZeroPerStepRewFcn, ExpQuadrErrRewFcn, QuadrErrRewFcn\n'), ((16119, 16153), 'pyrado.tasks.final_reward.FinalRewMode', 'FinalRewMode', ([], {'always_negative': '(True)'}), '(always_negative=True)\n', (16131, 16153), False, 'from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode\n'), ((18103, 18129), 'numpy.diag', 'np.diag', (['[0.2, 1e-06, 5.0]'], {}), '([0.2, 1e-06, 5.0])\n', (18110, 18129), True, 'import numpy as np\n'), ((18231, 18291), 'numpy.zeros', 'np.zeros', (['(self.act_space.shape[0], self.act_space.shape[0])'], {}), '((self.act_space.shape[0], self.act_space.shape[0]))\n', (18239, 18291), True, 'import numpy as np\n'), ((23516, 23601), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['f"""Undesired collision of {body1_name} and {body2_name} detected!"""', '"""y"""'], {}), "(f'Undesired collision of {body1_name} and {body2_name} detected!',\n 'y')\n", (23525, 23601), False, 'from pyrado.utils.input_output import print_cbt\n'), ((24810, 24882), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['f"""The ball is in the cup at time step {self.curr_step}."""', '"""y"""'], {}), "(f'The ball is in the cup at time step {self.curr_step}.', 'y')\n", (24819, 24882), False, 'from pyrado.utils.input_output import print_cbt\n'), ((16998, 17027), 'numpy.diag', 'np.diag', (['[20.0, 0.0001, 20.0]'], {}), '([20.0, 0.0001, 20.0])\n', (17005, 17027), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target<
y = np.dot(X, w) + noise
clf = ARDRegression(fit_intercept=False, n_iter=1000)
clf.fit(X, y)
ols = LinearRegression(fit_intercept=False)
ols.fit(X, y)
from copy import deepcopy
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean
from sds.distributions.gaussian import GaussianWithPrecision
from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision
from sds.distributions.gamma import Gamma
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1, )),
betas=1e-6 * np.ones((1, )))
parameter_precision_prior = Gamma(dim=n_features, alphas=np.ones((n_features, )),
betas=1e-6 * np.ones((n_features, )))
likelihood_precision_posterior = deepcopy(likelihood_precision_prior)
parameter_precision_posterior = deepcopy(parameter_precision_prior)
parameter_posterior = None
for i in range(100):
# parameter posterior
alphas = parameter_precision_posterior.mean()
parameter_prior = GaussianWithPrecision(dim=n_features,
mu=np.zeros((n_features, )),
lmbda=np.diag(alphas))
parameter_posterior = deepcopy(parameter_prior)
beta = likelihood_precision_posterior.mean()
likelihood_known_precision = SingleOutputLinearGaussianWithKnownPrecision(column_dim=n_features,
lmbda=beta,
affine=False)
stats = likelihood_known_precision.statistics(X, y)
parameter_posterior.nat_param = parameter_prior.nat_param + stats
# likelihood precision posterior
param = parameter_posterior.mean()
likelihood_known_mean = SingleOutputLinearGaussianWithKnownMean(column_dim=n_features,
W=param, affine=False)
stats = likelihood_known_mean.statistics(X, y)
likelihood_precision_posterior.nat_param = likelihood_precision_prior.nat_param + stats
# parameter precision posterior
parameter_likelihood = GaussianWithKnownMeanAndDiagonalPrecision(dim=n_features)
param = parameter_posterior.mean()
stats = parameter_likelihood.statistics(param)
parameter_precision_posterior.nat_param = parameter_precision_prior.nat_param + stats
our_ard = parameter_posterior.mode()
from sds.distributions.composite import MatrixNormalGamma
from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision
M = np.zeros((1, n_features))
K = 1e-16 * np.eye(n_features)
alphas = 1e-16 * np.ones((1, ))
betas = 1e-16 * np.ones((1, ))
prior = MatrixNormalGamma(column_dim=n_features, row_dim=1,
M=M, K=K, alphas=alphas, betas=betas)
posterior = deepcopy(prior)
likelihood = LinearGaussianWithDiagonalPrecision(column_dim=n_features,
row_dim=1,
affine=False)
stats = likelihood.statistics(X, np.atleast_2d(y).T)
posterior.nat_param = prior.nat_param + stats
our_ols = posterior.mode()[0]
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label="Sklearn ARD")
plt.plot(our_ard, color='red', linestyle='-', linewidth=2, label="Our ARD")
# plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2, label="Sklearn OLS")
# plt.plot(our_ols.flatten(), color='cyan', linestyle='-', linewidth=2, label="Our OLS")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.show()
|
[
"numpy.sqrt",
"sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownPrecision",
"matplotlib.pyplot.ylabel",
"sklearn.linear_model.ARDRegression",
"copy.deepcopy",
"numpy.atleast_2d",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sds.distributions.gaussian.GaussianWithKnownMeanAndDiagonalPrecision",
"numpy.dot",
"numpy.random.seed",
"numpy.eye",
"numpy.ones",
"sds.distributions.composite.MatrixNormalGamma",
"sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownMean",
"matplotlib.pyplot.title",
"numpy.random.randn",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.diag",
"numpy.zeros",
"numpy.random.randint",
"sds.distributions.lingauss.LinearGaussianWithDiagonalPrecision",
"matplotlib.pyplot.figure"
] |
[((170, 187), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (184, 187), True, 'import numpy as np\n'), ((248, 286), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (263, 286), True, 'import numpy as np\n'), ((352, 372), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (360, 372), True, 'import numpy as np\n'), ((428, 464), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_features', '(10)'], {}), '(0, n_features, 10)\n', (445, 464), True, 'import numpy as np\n'), ((740, 787), 'sklearn.linear_model.ARDRegression', 'ARDRegression', ([], {'fit_intercept': '(False)', 'n_iter': '(1000)'}), '(fit_intercept=False, n_iter=1000)\n', (753, 787), False, 'from sklearn.linear_model import ARDRegression, LinearRegression\n'), ((809, 846), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (825, 846), False, 'from sklearn.linear_model import ARDRegression, LinearRegression\n'), ((1555, 1591), 'copy.deepcopy', 'deepcopy', (['likelihood_precision_prior'], {}), '(likelihood_precision_prior)\n', (1563, 1591), False, 'from copy import deepcopy\n'), ((1624, 1659), 'copy.deepcopy', 'deepcopy', (['parameter_precision_prior'], {}), '(parameter_precision_prior)\n', (1632, 1659), False, 'from copy import deepcopy\n'), ((3380, 3405), 'numpy.zeros', 'np.zeros', (['(1, n_features)'], {}), '((1, n_features))\n', (3388, 3405), True, 'import numpy as np\n'), ((3509, 3602), 'sds.distributions.composite.MatrixNormalGamma', 'MatrixNormalGamma', ([], {'column_dim': 'n_features', 'row_dim': '(1)', 'M': 'M', 'K': 'K', 'alphas': 'alphas', 'betas': 'betas'}), '(column_dim=n_features, row_dim=1, M=M, K=K, alphas=alphas,\n betas=betas)\n', (3526, 3602), False, 'from sds.distributions.composite import MatrixNormalGamma\n'), ((3638, 3653), 'copy.deepcopy', 'deepcopy', (['prior'], {}), '(prior)\n', (3646, 3653), False, 'from copy import deepcopy\n'), ((3667, 3754), 'sds.distributions.lingauss.LinearGaussianWithDiagonalPrecision', 'LinearGaussianWithDiagonalPrecision', ([], {'column_dim': 'n_features', 'row_dim': '(1)', 'affine': '(False)'}), '(column_dim=n_features, row_dim=1,\n affine=False)\n', (3702, 3754), False, 'from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision\n'), ((3980, 4006), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (3990, 4006), True, 'import matplotlib.pyplot as plt\n'), ((4007, 4040), 'matplotlib.pyplot.title', 'plt.title', (['"""Weights of the model"""'], {}), "('Weights of the model')\n", (4016, 4040), True, 'import matplotlib.pyplot as plt\n'), ((4041, 4118), 'matplotlib.pyplot.plot', 'plt.plot', (['w'], {'color': '"""orange"""', 'linestyle': '"""-"""', 'linewidth': '(2)', 'label': '"""Ground truth"""'}), "(w, color='orange', linestyle='-', linewidth=2, label='Ground truth')\n", (4049, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4210), 'matplotlib.pyplot.plot', 'plt.plot', (['clf.coef_'], {'color': '"""darkblue"""', 'linestyle': '"""-"""', 'linewidth': '(2)', 'label': '"""Sklearn ARD"""'}), "(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label=\n 'Sklearn ARD')\n", (4127, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4281), 'matplotlib.pyplot.plot', 'plt.plot', (['our_ard'], {'color': '"""red"""', 'linestyle': '"""-"""', 'linewidth': '(2)', 'label': '"""Our ARD"""'}), "(our_ard, color='red', linestyle='-', linewidth=2, label='Our ARD')\n", (4214, 4281), True, 'import matplotlib.pyplot as plt\n'), ((4464, 4486), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Features"""'], {}), "('Features')\n", (4474, 4486), True, 'import matplotlib.pyplot as plt\n'), ((4487, 4522), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Values of the weights"""'], {}), "('Values of the weights')\n", (4497, 4522), True, 'import matplotlib.pyplot as plt\n'), ((4523, 4540), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (4533, 4540), True, 'import matplotlib.pyplot as plt\n'), ((4542, 4552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4550, 4552), True, 'import matplotlib.pyplot as plt\n'), ((712, 724), 'numpy.dot', 'np.dot', (['X', 'w'], {}), '(X, w)\n', (718, 724), True, 'import numpy as np\n'), ((2011, 2036), 'copy.deepcopy', 'deepcopy', (['parameter_prior'], {}), '(parameter_prior)\n', (2019, 2036), False, 'from copy import deepcopy\n'), ((2120, 2218), 'sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownPrecision', 'SingleOutputLinearGaussianWithKnownPrecision', ([], {'column_dim': 'n_features', 'lmbda': 'beta', 'affine': '(False)'}), '(column_dim=n_features, lmbda=\n beta, affine=False)\n', (2164, 2218), False, 'from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision\n'), ((2602, 2691), 'sds.distributions.lingauss.SingleOutputLinearGaussianWithKnownMean', 'SingleOutputLinearGaussianWithKnownMean', ([], {'column_dim': 'n_features', 'W': 'param', 'affine': '(False)'}), '(column_dim=n_features, W=param,\n affine=False)\n', (2641, 2691), False, 'from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean\n'), ((2964, 3021), 'sds.distributions.gaussian.GaussianWithKnownMeanAndDiagonalPrecision', 'GaussianWithKnownMeanAndDiagonalPrecision', ([], {'dim': 'n_features'}), '(dim=n_features)\n', (3005, 3021), False, 'from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision\n'), ((3418, 3436), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (3424, 3436), True, 'import numpy as np\n'), ((3454, 3467), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (3461, 3467), True, 'import numpy as np\n'), ((3485, 3498), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (3492, 3498), True, 'import numpy as np\n'), ((1286, 1299), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (1293, 1299), True, 'import numpy as np\n'), ((1424, 1446), 'numpy.ones', 'np.ones', (['(n_features,)'], {}), '((n_features,))\n', (1431, 1446), True, 'import numpy as np\n'), ((3883, 3899), 'numpy.atleast_2d', 'np.atleast_2d', (['y'], {}), '(y)\n', (3896, 3899), True, 'import numpy as np\n'), ((654, 669), 'numpy.sqrt', 'np.sqrt', (['alpha_'], {}), '(alpha_)\n', (661, 669), True, 'import numpy as np\n'), ((1350, 1363), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (1357, 1363), True, 'import numpy as np\n'), ((1496, 1518), 'numpy.ones', 'np.ones', (['(n_features,)'], {}), '((n_features,))\n', (1503, 1518), True, 'import numpy as np\n'), ((1892, 1915), 'numpy.zeros', 'np.zeros', (['(n_features,)'], {}), '((n_features,))\n', (1900, 1915), True, 'import numpy as np\n'), ((1968, 1983), 'numpy.diag', 'np.diag', (['alphas'], {}), '(alphas)\n', (1975, 1983), True, 'import numpy as np\n'), ((537, 553), 'numpy.sqrt', 'np.sqrt', (['lambda_'], {}), '(lambda_)\n', (544, 553), True, 'import numpy as np\n')]
|
import numpy as np
import pybullet as p
import itertools
from robot import Robot
class World():
def __init__(self):
# create the physics simulator
self.physicsClient = p.connect(p.GUI)
p.setGravity(0,0,-9.81)
self.max_communication_distance = 2.0
# We will integrate every 4ms (250Hz update)
self.dt = 1./250.
p.setPhysicsEngineParameter(self.dt, numSubSteps=1)
# Create the plane.
self.planeId = p.loadURDF("../models/plane.urdf")
p.changeDynamics(self.planeId, -1, lateralFriction=5., rollingFriction=0)
self.goalId = p.loadURDF("../models/goal.urdf")
self.goalId = p.loadURDF("../models/goal2.urdf")
# the balls
self.ball1 = p.loadURDF("../models/ball1.urdf")
p.resetBasePositionAndOrientation(self.ball1, [2., 4., 0.5], (0., 0., 0.5, 0.5))
self.ball2 = p.loadURDF("../models/ball2.urdf")
p.resetBasePositionAndOrientation(self.ball2, [4., 2., 0.5], (0., 0., 0.5, 0.5))
p.resetDebugVisualizerCamera(7.0,90.0, -43.0, (1., 1., 0.0))
# Add objects
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [0., -1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [0., 1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [3., -1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [3., 1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [1., 2., 0], (0., 0., 0., 1.))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [2., -2., 0], (0., 0., 0., 1.))
# tube
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-1., 5., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-1., 6., 0], (0., 0., 0., 1.))
# #arena
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2, 4., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 7., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 9., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 11., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 13., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-3., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-5., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-7., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8, 4., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 6., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 8., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 10., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 12., 0], (0., 0., 0.5, 0.5))
# create 6 robots
self.robots = []
for (i,j) in itertools.product(range(3), range(2)):
self.robots.append(Robot([1. * i + 0.5, 1. * j - 0.5, 0.3], 2*i+j, self.dt))
p.stepSimulation()
self.time = 0.0
self.stepSimulation()
self.stepSimulation()
def reset(self):
"""
Resets the position of all the robots
"""
for r in self.robots:
r.reset()
p.stepSimulation()
def stepSimulation(self):
"""
Simulates one step simulation
"""
# for each robot construct list of neighbors
for r in self.robots:
r.neighbors = [] #reset neighbors
r.messages_received = [] #reset message received
pos1, or1 = r.get_pos_and_orientation()
for j,r2 in enumerate(self.robots):
if(r.id != r2.id):
pos2, or2 = r2.get_pos_and_orientation()
if(np.linalg.norm(pos1-pos2) < self.max_communication_distance):
r.neighbors.append(j)
# for each robot send and receive messages
for i,r in enumerate(self.robots):
for msg in r.messages_to_send:
if msg[0] in r.neighbors: #then we can send the message
self.robots[msg[0]].messages_received.append([i, msg[1]]) #add the sender id
r.messages_to_send = []
# update the controllers
if self.time > 1.0:
for r in self.robots:
r.compute_controller()
# do one simulation step
p.stepSimulation()
self.time += self.dt
|
[
"pybullet.resetDebugVisualizerCamera",
"robot.Robot",
"pybullet.loadSDF",
"pybullet.connect",
"pybullet.setGravity",
"pybullet.setPhysicsEngineParameter",
"pybullet.changeDynamics",
"numpy.linalg.norm",
"pybullet.stepSimulation",
"pybullet.resetBasePositionAndOrientation",
"pybullet.loadURDF"
] |
[((194, 210), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (203, 210), True, 'import pybullet as p\n'), ((219, 244), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {}), '(0, 0, -9.81)\n', (231, 244), True, 'import pybullet as p\n'), ((386, 437), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', (['self.dt'], {'numSubSteps': '(1)'}), '(self.dt, numSubSteps=1)\n', (413, 437), True, 'import pybullet as p\n'), ((490, 524), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/plane.urdf"""'], {}), "('../models/plane.urdf')\n", (500, 524), True, 'import pybullet as p\n'), ((533, 607), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.planeId', '(-1)'], {'lateralFriction': '(5.0)', 'rollingFriction': '(0)'}), '(self.planeId, -1, lateralFriction=5.0, rollingFriction=0)\n', (549, 607), True, 'import pybullet as p\n'), ((630, 663), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/goal.urdf"""'], {}), "('../models/goal.urdf')\n", (640, 663), True, 'import pybullet as p\n'), ((686, 720), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/goal2.urdf"""'], {}), "('../models/goal2.urdf')\n", (696, 720), True, 'import pybullet as p\n'), ((771, 805), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/ball1.urdf"""'], {}), "('../models/ball1.urdf')\n", (781, 805), True, 'import pybullet as p\n'), ((814, 903), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.ball1', '[2.0, 4.0, 0.5]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(self.ball1, [2.0, 4.0, 0.5], (0.0, 0.0, \n 0.5, 0.5))\n', (847, 903), True, 'import pybullet as p\n'), ((916, 950), 'pybullet.loadURDF', 'p.loadURDF', (['"""../models/ball2.urdf"""'], {}), "('../models/ball2.urdf')\n", (926, 950), True, 'import pybullet as p\n'), ((959, 1048), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.ball2', '[4.0, 2.0, 0.5]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(self.ball2, [4.0, 2.0, 0.5], (0.0, 0.0, \n 0.5, 0.5))\n', (992, 1048), True, 'import pybullet as p\n'), ((1049, 1112), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', (['(7.0)', '(90.0)', '(-43.0)', '(1.0, 1.0, 0.0)'], {}), '(7.0, 90.0, -43.0, (1.0, 1.0, 0.0))\n', (1077, 1112), True, 'import pybullet as p\n'), ((1202, 1281), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[0.0, -1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [0.0, -1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1235, 1281), True, 'import pybullet as p\n'), ((1339, 1417), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[0.0, 1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [0.0, 1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1372, 1417), True, 'import pybullet as p\n'), ((1475, 1554), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[3.0, -1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [3.0, -1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1508, 1554), True, 'import pybullet as p\n'), ((1612, 1690), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[3.0, 1.0, 0]', '(0.0, 0.0, 0.5, 0.5)'], {}), '(wallId, [3.0, 1.0, 0], (0.0, 0.0, 0.5, 0.5))\n', (1645, 1690), True, 'import pybullet as p\n'), ((1748, 1826), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[1.0, 2.0, 0]', '(0.0, 0.0, 0.0, 1.0)'], {}), '(wallId, [1.0, 2.0, 0], (0.0, 0.0, 0.0, 1.0))\n', (1781, 1826), True, 'import pybullet as p\n'), ((1882, 1961), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['wallId', '[2.0, -2.0, 0]', '(0.0, 0.0, 0.0, 1.0)'], {}), '(wallId, [2.0, -2.0, 0], (0.0, 0.0, 0.0, 1.0))\n', (1915, 1961), True, 'import pybullet as p\n'), ((4600, 4618), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4616, 4618), True, 'import pybullet as p\n'), ((5790, 5808), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (5806, 5808), True, 'import pybullet as p\n'), ((1158, 1190), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1167, 1190), True, 'import pybullet as p\n'), ((1295, 1327), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1304, 1327), True, 'import pybullet as p\n'), ((1431, 1463), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1440, 1463), True, 'import pybullet as p\n'), ((1568, 1600), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1577, 1600), True, 'import pybullet as p\n'), ((1704, 1736), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1713, 1736), True, 'import pybullet as p\n'), ((1838, 1870), 'pybullet.loadSDF', 'p.loadSDF', (['"""../models/walls.sdf"""'], {}), "('../models/walls.sdf')\n", (1847, 1870), True, 'import pybullet as p\n'), ((4327, 4345), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4343, 4345), True, 'import pybullet as p\n'), ((4257, 4319), 'robot.Robot', 'Robot', (['[1.0 * i + 0.5, 1.0 * j - 0.5, 0.3]', '(2 * i + j)', 'self.dt'], {}), '([1.0 * i + 0.5, 1.0 * j - 0.5, 0.3], 2 * i + j, self.dt)\n', (4262, 4319), False, 'from robot import Robot\n'), ((5138, 5165), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos1 - pos2)'], {}), '(pos1 - pos2)\n', (5152, 5165), True, 'import numpy as np\n')]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = ops_lib.Graph()
with graph.as_default():
importer.import_graph_def(graph_def, input_map={}, name="")
with session.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
tf_logging.info("Tensors have {0} different values ({1}%), with mean"
" difference {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100,
mean_difference, mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1",
value=[.5, .6, .7, .8, .9],
dtype=dtypes.float32,
shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = graph_pb2.GraphDef()
g.node.extend([
input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
weight_2_node, matmul_2_node
])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
g, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(1, ops.count("QuantizedReshape"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]),
2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_non_float_concat(self):
concat_dim = quantize_graph.create_constant_node(
"concat_dim", value=0, dtype=dtypes.int32, shape=[])
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
b = quantize_graph.create_constant_node(
"b",
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.int32,
shape=[2, 2, 3])
concat = quantize_graph.create_node("Concat", "concat",
[concat_dim.name, a.name, b.name])
quantize_graph.set_attr_int(concat, "N", 2)
quantize_graph.set_attr_dtype(concat, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([concat_dim, a, b, concat])
test_graph(g, {}, [concat.name])
def test_non_float_reshape(self):
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
shape = quantize_graph.create_constant_node(
"shape", value=[12], dtype=dtypes.int32, shape=[1])
reshape = quantize_graph.create_node("Reshape", "reshape",
[a.name, shape.name])
quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([a, shape, reshape])
test_graph(g, {}, [reshape.name])
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=0, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(
a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("Placeholder", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("Placeholder", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[bias_add_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([
input_node, offset_node, bias_add_node, min_node, max_node,
fake_quant_node
])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-100, 100])
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# The fallback constants are not in the graph.
self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
def test_bias_add_w_fallback_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_node, offset_node, bias_add_node])
test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)
# Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-.5, 15.5])
eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# No RequantizationRange
self.assertEqual(0, ops.count("RequantizationRange"))
# The fallback constants are in the graph.
self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node(
"Dequantize", a_dequantize_name,
[a_constant_name, a_constant_min_name, a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node(
"QuantizeV2", a_quantize_name,
[a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node(
"Dequantize", b_dequantize_name,
[b_constant_name, b_constant_min_name, b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node(
"QuantizeV2", b_quantize_name,
[b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_quantize_name, b_quantize_name, a_quantize_name + ":1",
a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
graph_def.node.extend([mat_mul_node])
expected_output = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_constant_name, b_constant_name, a_constant_min_name,
a_constant_max_name, b_constant_min_name, b_constant_max_name
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
expected_output.node.extend([mat_mul_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
rewriter = quantize_graph.GraphRewriter(
graph_def, [mat_mul_name], quantized_input_range=None)
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
test.main()
|
[
"tensorflow.tools.quantization.quantize_graph.set_attr_dtype",
"tensorflow.python.framework.importer.import_graph_def",
"numpy.array",
"tensorflow.tools.quantization.quantize_graph.quantize_weight_eightbit",
"tensorflow.core.framework.graph_pb2.GraphDef",
"numpy.reshape",
"tensorflow.tools.quantization.quantize_graph.set_attr_int_list",
"tensorflow.tools.quantization.quantize_graph.quantize_array",
"tensorflow.tools.quantization.quantize_graph.unique_node_name_from_input",
"tensorflow.tools.quantization.quantize_graph.node_name_from_input",
"tensorflow.tools.quantization.quantize_graph.set_attr_float",
"tensorflow.tools.quantization.quantize_graph.set_attr_int",
"tensorflow.tools.quantization.quantize_graph.create_node",
"tensorflow.tools.quantization.quantize_graph.GraphRewriter",
"tensorflow.tools.quantization.quantize_graph.set_attr_bool",
"tensorflow.tools.quantization.quantize_graph.set_attr_shape",
"tensorflow.tools.quantization.quantize_graph.set_attr_string",
"tensorflow.python.client.session.Session",
"tensorflow.tools.quantization.quantize_graph.create_constant_node",
"tensorflow.python.framework.graph_util.extract_sub_graph",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.graph_util.remove_training_nodes"
] |
[((1481, 1496), 'tensorflow.python.framework.ops.Graph', 'ops_lib.Graph', ([], {}), '()\n', (1494, 1496), True, 'from tensorflow.python.framework import ops as ops_lib\n'), ((1888, 1908), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (1906, 1908), False, 'from tensorflow.core.framework import graph_pb2\n'), ((1924, 2026), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': 'a', 'dtype': 'dtypes.float32', 'shape': '[m, k]'}), '(a_constant_name, value=a, dtype=dtypes.\n float32, shape=[m, k])\n', (1959, 2026), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2088, 2190), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': 'b', 'dtype': 'dtypes.float32', 'shape': '[k, n]'}), '(b_constant_name, value=b, dtype=dtypes.\n float32, shape=[k, n])\n', (2123, 2190), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2254, 2344), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MatMul"""', 'mat_mul_name', '[a_constant_name, b_constant_name]'], {}), "('MatMul', mat_mul_name, [a_constant_name,\n b_constant_name])\n", (2280, 2344), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2387, 2451), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T"""', 'dtypes.float32'], {}), "(mat_mul_node, 'T', dtypes.float32)\n", (2416, 2451), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2454, 2518), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['mat_mul_node', '"""transpose_a"""', '(False)'], {}), "(mat_mul_node, 'transpose_a', False)\n", (2482, 2518), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((2521, 2585), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['mat_mul_node', '"""transpose_b"""', '(False)'], {}), "(mat_mul_node, 'transpose_b', False)\n", (2549, 2585), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3000, 3020), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (3018, 3020), False, 'from tensorflow.core.framework import graph_pb2\n'), ((3040, 3207), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': 'input_values', 'dtype': 'dtypes.float32', 'shape': '[image_batch_count, image_height, image_width, depth]'}), '(input_constant_name, value=input_values,\n dtype=dtypes.float32, shape=[image_batch_count, image_height,\n image_width, depth])\n', (3075, 3207), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3293, 3457), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['filter_constant_name'], {'value': 'filter_values', 'dtype': 'dtypes.float32', 'shape': '[filter_size, filter_size, depth, filter_count]'}), '(filter_constant_name, value=\n filter_values, dtype=dtypes.float32, shape=[filter_size, filter_size,\n depth, filter_count])\n', (3328, 3457), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3537, 3633), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Conv2D"""', 'conv_name', '[input_constant_name, filter_constant_name]'], {}), "('Conv2D', conv_name, [input_constant_name,\n filter_constant_name])\n", (3563, 3633), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3639, 3700), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['conv_node', '"""T"""', 'dtypes.float32'], {}), "(conv_node, 'T', dtypes.float32)\n", (3668, 3700), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3703, 3781), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['conv_node', '"""strides"""', '[1, stride, stride, 1]'], {}), "(conv_node, 'strides', [1, stride, stride, 1])\n", (3735, 3781), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((3784, 3845), 'tensorflow.tools.quantization.quantize_graph.set_attr_string', 'quantize_graph.set_attr_string', (['conv_node', '"""padding"""', 'padding'], {}), "(conv_node, 'padding', padding)\n", (3814, 3845), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((6689, 6778), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (6717, 6778), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((7266, 7362), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""weights_rounded"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'weights_rounded',\n quantized_input_range=None)\n", (7294, 7362), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((42440, 42451), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (42449, 42451), False, 'from tensorflow.python.platform import test\n'), ((1528, 1587), 'tensorflow.python.framework.importer.import_graph_def', 'importer.import_graph_def', (['graph_def'], {'input_map': '{}', 'name': '""""""'}), "(graph_def, input_map={}, name='')\n", (1553, 1587), False, 'from tensorflow.python.framework import importer\n'), ((1595, 1623), 'tensorflow.python.client.session.Session', 'session.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (1610, 1623), False, 'from tensorflow.python.client import session\n'), ((7860, 7966), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['shape_constant_name'], {'value': '(-0.8)', 'dtype': 'dtypes.float32', 'shape': '[1]'}), '(shape_constant_name, value=-0.8, dtype=\n dtypes.float32, shape=[1])\n', (7895, 7966), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((7997, 8069), 'tensorflow.tools.quantization.quantize_graph.quantize_weight_eightbit', 'quantize_graph.quantize_weight_eightbit', (['shape_constant', "b'MIN_COMBINED'"], {}), "(shape_constant, b'MIN_COMBINED')\n", (8036, 8069), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9532, 9637), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[0, 1, 2, 3]', 'dtype': 'dtypes.float32', 'shape': '[4, 1]'}), "('input', value=[0, 1, 2, 3], dtype=\n dtypes.float32, shape=[4, 1])\n", (9567, 9637), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9662, 9783), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""weight_1"""'], {'value': '[0.5, 0.6, 0.7, 0.8, 0.9]', 'dtype': 'dtypes.float32', 'shape': '[1, 5]'}), "('weight_1', value=[0.5, 0.6, 0.7, 0.8, \n 0.9], dtype=dtypes.float32, shape=[1, 5])\n", (9697, 9783), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9927, 10031), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""new_shape_node"""'], {'value': '[10, 2]', 'dtype': 'dtypes.int32', 'shape': '[2]'}), "('new_shape_node', value=[10, 2], dtype=\n dtypes.int32, shape=[2])\n", (9962, 10031), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10055, 10150), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Reshape"""', '"""reshape"""', '[matmul_1_node.name, new_shape_node.name]'], {}), "('Reshape', 'reshape', [matmul_1_node.name,\n new_shape_node.name])\n", (10081, 10150), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10160, 10224), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['reshape_node', '"""T"""', 'dtypes.float32'], {}), "(reshape_node, 'T', dtypes.float32)\n", (10189, 10224), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10285, 10391), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""weight_2"""'], {'value': '[1.5, 2.5]', 'dtype': 'dtypes.float32', 'shape': '[2, 1]'}), "('weight_2', value=[1.5, 2.5], dtype=\n dtypes.float32, shape=[2, 1])\n", (10320, 10391), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((10478, 10498), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (10496, 10498), False, 'from tensorflow.core.framework import graph_pb2\n'), ((10789, 10860), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['g', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(g, 'eightbit', quantized_input_range=None)\n", (10817, 10860), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11629, 11642), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (11637, 11642), True, 'import numpy as np\n'), ((11654, 11691), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(1)'], {}), '(arr, 1)\n', (11683, 11691), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11735, 11772), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(2)'], {}), '(arr, 2)\n', (11764, 11772), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11863, 11882), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (11871, 11882), True, 'import numpy as np\n'), ((11894, 11932), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(10)'], {}), '(arr, 10)\n', (11923, 11932), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12034, 12060), 'numpy.array', 'np.array', (['[0, 0.3, 0.6, 1]'], {}), '([0, 0.3, 0.6, 1])\n', (12042, 12060), True, 'import numpy as np\n'), ((12072, 12109), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(1)'], {}), '(arr, 1)\n', (12101, 12109), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12189, 12226), 'tensorflow.tools.quantization.quantize_graph.quantize_array', 'quantize_graph.quantize_array', (['arr', '(2)'], {}), '(arr, 2)\n', (12218, 12226), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12493, 12586), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""concat_dim"""'], {'value': '(0)', 'dtype': 'dtypes.int32', 'shape': '[]'}), "('concat_dim', value=0, dtype=dtypes.\n int32, shape=[])\n", (12528, 12586), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12599, 12728), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""a"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.int32', 'shape': '[2, 2, 3]'}), "('a', value=[1, 2, 3, 4, 5, 6, 7, 8, 9, \n 10, 11, 12], dtype=dtypes.int32, shape=[2, 2, 3])\n", (12634, 12728), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12765, 12902), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""b"""'], {'value': '[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]', 'dtype': 'dtypes.int32', 'shape': '[2, 2, 3]'}), "('b', value=[13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24], dtype=dtypes.int32, shape=[2, 2, 3])\n", (12800, 12902), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((12945, 13031), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Concat"""', '"""concat"""', '[concat_dim.name, a.name, b.name]'], {}), "('Concat', 'concat', [concat_dim.name, a.name, b.\n name])\n", (12971, 13031), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13071, 13114), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['concat', '"""N"""', '(2)'], {}), "(concat, 'N', 2)\n", (13098, 13114), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13119, 13175), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['concat', '"""T"""', 'dtypes.int32'], {}), "(concat, 'T', dtypes.int32)\n", (13148, 13175), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13185, 13205), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (13203, 13205), False, 'from tensorflow.core.framework import graph_pb2\n'), ((13334, 13463), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""a"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.int32', 'shape': '[2, 2, 3]'}), "('a', value=[1, 2, 3, 4, 5, 6, 7, 8, 9, \n 10, 11, 12], dtype=dtypes.int32, shape=[2, 2, 3])\n", (13369, 13463), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13504, 13595), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""shape"""'], {'value': '[12]', 'dtype': 'dtypes.int32', 'shape': '[1]'}), "('shape', value=[12], dtype=dtypes.int32,\n shape=[1])\n", (13539, 13595), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13615, 13685), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Reshape"""', '"""reshape"""', '[a.name, shape.name]'], {}), "('Reshape', 'reshape', [a.name, shape.name])\n", (13641, 13685), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13731, 13788), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['reshape', '"""T"""', 'dtypes.int32'], {}), "(reshape, 'T', dtypes.int32)\n", (13760, 13788), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((13798, 13818), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (13816, 13818), False, 'from tensorflow.core.framework import graph_pb2\n'), ((14085, 14105), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (14103, 14105), False, 'from tensorflow.core.framework import graph_pb2\n'), ((14127, 14227), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['shape_constant_name'], {'value': '(0)', 'dtype': 'dtypes.int32', 'shape': '[]'}), '(shape_constant_name, value=0, dtype=\n dtypes.int32, shape=[])\n', (14162, 14227), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14299, 14442), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[2, 2, 3]'}), '(a_constant_name, value=[1, 2, 3, 4, 5, \n 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 2, 3])\n', (14334, 14442), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14534, 14685), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]', 'dtype': 'dtypes.float32', 'shape': '[2, 2, 3]'}), '(b_constant_name, value=[13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 24], dtype=dtypes.float32, shape=[2, 2, 3])\n', (14569, 14685), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14779, 14889), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Concat"""', 'concat_name', '[shape_constant_name, a_constant_name, b_constant_name]'], {}), "('Concat', concat_name, [shape_constant_name,\n a_constant_name, b_constant_name])\n", (14805, 14889), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14907, 14955), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['concat_node', '"""N"""', '(2)'], {}), "(concat_node, 'N', 2)\n", (14934, 14955), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((14960, 15023), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['concat_node', '"""T"""', 'dtypes.float32'], {}), "(concat_node, 'T', dtypes.float32)\n", (14989, 15023), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((15186, 15275), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (15214, 15275), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((15700, 15720), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (15718, 15720), False, 'from tensorflow.core.framework import graph_pb2\n'), ((15742, 15885), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[2, 6]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 6])\n', (15777, 15885), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((15986, 16086), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['split_constant_name'], {'value': '(1)', 'dtype': 'dtypes.int32', 'shape': '[]'}), '(split_constant_name, value=1, dtype=\n dtypes.int32, shape=[])\n', (16021, 16086), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16158, 16253), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Split"""', 'split_name', '[split_constant_name, input_constant_name]'], {}), "('Split', split_name, [split_constant_name,\n input_constant_name])\n", (16184, 16253), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16263, 16318), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['split_node', '"""num_split"""', '(2)'], {}), "(split_node, 'num_split', 2)\n", (16290, 16318), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16323, 16385), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['split_node', '"""T"""', 'dtypes.float32'], {}), "(split_node, 'T', dtypes.float32)\n", (16352, 16385), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16454, 16555), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['concat_constant_name'], {'value': '(1)', 'dtype': 'dtypes.int32', 'shape': '[]'}), '(concat_constant_name, value=1, dtype=\n dtypes.int32, shape=[])\n', (16489, 16555), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16629, 16745), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Concat"""', 'concat_name', "[concat_constant_name, split_name + ':0', split_name + ':1']"], {}), "('Concat', concat_name, [concat_constant_name, \n split_name + ':0', split_name + ':1'])\n", (16655, 16745), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16762, 16810), 'tensorflow.tools.quantization.quantize_graph.set_attr_int', 'quantize_graph.set_attr_int', (['concat_node', '"""N"""', '(2)'], {}), "(concat_node, 'N', 2)\n", (16789, 16810), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((16815, 16878), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['concat_node', '"""T"""', 'dtypes.float32'], {}), "(concat_node, 'T', dtypes.float32)\n", (16844, 16878), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17424, 17444), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (17442, 17444), False, 'from tensorflow.core.framework import graph_pb2\n'), ((17466, 17609), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[2, 6]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 6])\n', (17501, 17609), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17709, 17785), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'identity_name', '[input_constant_name]'], {}), "('Identity', identity_name, [input_constant_name])\n", (17735, 17785), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17837, 17902), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['identity_node', '"""T"""', 'dtypes.float32'], {}), "(identity_node, 'T', dtypes.float32)\n", (17866, 17902), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17989, 18064), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Mul"""', 'mul_name', '[identity_name, identity_name]'], {}), "('Mul', mul_name, [identity_name, identity_name])\n", (18015, 18064), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18111, 18171), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mul_node', '"""T"""', 'dtypes.float32'], {}), "(mul_node, 'T', dtypes.float32)\n", (18140, 18171), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18563, 18583), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (18581, 18583), False, 'from tensorflow.core.framework import graph_pb2\n'), ((18596, 18646), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""NoOp"""', 'no_op_name', '[]'], {}), "('NoOp', no_op_name, [])\n", (18622, 18646), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18699, 18797), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (18734, 18797), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((18861, 18937), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""CheckNumerics"""', 'a_check_name', '[a_constant_name]'], {}), "('CheckNumerics', a_check_name, [a_constant_name])\n", (18887, 18937), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19048, 19165), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'a_identity_name', "[a_constant_name, '^' + a_check_name, '^' + no_op_name]"], {}), "('Identity', a_identity_name, [a_constant_name, \n '^' + a_check_name, '^' + no_op_name])\n", (19074, 19165), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19240, 19338), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (19275, 19338), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19402, 19478), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""CheckNumerics"""', 'b_check_name', '[b_constant_name]'], {}), "('CheckNumerics', b_check_name, [b_constant_name])\n", (19428, 19478), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19589, 19688), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'b_identity_name', "[b_constant_name, '^' + b_check_name]"], {}), "('Identity', b_identity_name, [b_constant_name, \n '^' + b_check_name])\n", (19615, 19688), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19753, 19832), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Add"""', 'add_name', '[a_identity_name, b_identity_name]'], {}), "('Add', add_name, [a_identity_name, b_identity_name])\n", (19779, 19832), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((19879, 19939), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['add_node', '"""T"""', 'dtypes.float32'], {}), "(add_node, 'T', dtypes.float32)\n", (19908, 19939), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20001, 20021), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (20019, 20021), False, 'from tensorflow.core.framework import graph_pb2\n'), ((20034, 20084), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""NoOp"""', 'no_op_name', '[]'], {}), "('NoOp', no_op_name, [])\n", (20060, 20084), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20143, 20241), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (20178, 20241), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20314, 20411), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Identity"""', 'a_identity_name', "[a_constant_name, '^' + no_op_name]"], {}), "('Identity', a_identity_name, [a_constant_name, \n '^' + no_op_name])\n", (20340, 20411), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20484, 20582), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(1)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_name, value=1, dtype=dtypes.\n float32, shape=[])\n', (20519, 20582), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20648, 20727), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Add"""', 'add_name', '[a_identity_name, b_constant_name]'], {}), "('Add', add_name, [a_identity_name, b_constant_name])\n", (20674, 20727), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((20774, 20834), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['add_node', '"""T"""', 'dtypes.float32'], {}), "(add_node, 'T', dtypes.float32)\n", (20803, 20834), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((21007, 21050), 'tensorflow.python.framework.graph_util.remove_training_nodes', 'graph_util.remove_training_nodes', (['graph_def'], {}), '(graph_def)\n', (21039, 21050), False, 'from tensorflow.python.framework import graph_util\n'), ((21073, 21121), 'tensorflow.python.framework.graph_util.extract_sub_graph', 'graph_util.extract_sub_graph', (['output', '[add_name]'], {}), '(output, [add_name])\n', (21101, 21121), False, 'from tensorflow.python.framework import graph_util\n'), ((21487, 21507), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (21505, 21507), False, 'from tensorflow.core.framework import graph_pb2\n'), ((21529, 21681), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 6, 2]'}), '(input_constant_name, value=[1, 4, 2, 5,\n 3, 6, -1, -4, -2, -5, -3, -6], dtype=dtypes.float32, shape=[1, 1, 6, 2])\n', (21564, 21681), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((21781, 21889), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['mean_constant_name'], {'value': '[10, 20]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(mean_constant_name, value=[10, 20],\n dtype=dtypes.float32, shape=[2])\n', (21816, 21889), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((21968, 22084), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['variance_constant_name'], {'value': '[0.25, 0.5]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(variance_constant_name, value=[0.25, \n 0.5], dtype=dtypes.float32, shape=[2])\n', (22003, 22084), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22186, 22296), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['beta_constant_name'], {'value': '[0.1, 0.6]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(beta_constant_name, value=[0.1, 0.6],\n dtype=dtypes.float32, shape=[2])\n', (22221, 22296), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22372, 22479), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['gamma_constant_name'], {'value': '[0, 0]', 'dtype': 'dtypes.float32', 'shape': '[2]'}), '(gamma_constant_name, value=[0, 0],\n dtype=dtypes.float32, shape=[2])\n', (22407, 22479), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22557, 22752), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BatchNormWithGlobalNormalization"""', 'batch_norm_name', '[input_constant_name, mean_constant_name, variance_constant_name,\n beta_constant_name, gamma_constant_name]'], {}), "('BatchNormWithGlobalNormalization',\n batch_norm_name, [input_constant_name, mean_constant_name,\n variance_constant_name, beta_constant_name, gamma_constant_name])\n", (22583, 22752), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22792, 22859), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['batch_norm_node', '"""T"""', 'dtypes.float32'], {}), "(batch_norm_node, 'T', dtypes.float32)\n", (22821, 22859), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22864, 22950), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['batch_norm_node', '"""scale_after_normalization"""', '(False)'], {}), "(batch_norm_node, 'scale_after_normalization', \n False)\n", (22892, 22950), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((22983, 23056), 'tensorflow.tools.quantization.quantize_graph.set_attr_float', 'quantize_graph.set_attr_float', (['batch_norm_node', '"""variance_epsilon"""', '(0.001)'], {}), "(batch_norm_node, 'variance_epsilon', 0.001)\n", (23012, 23056), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23287, 23307), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (23305, 23307), False, 'from tensorflow.core.framework import graph_pb2\n'), ((23329, 23478), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (23364, 23478), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23578, 23653), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MaxPool"""', 'max_pool_name', '[input_constant_name]'], {}), "('MaxPool', max_pool_name, [input_constant_name])\n", (23604, 23653), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23705, 23775), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['max_pool_node', '"""ksize"""', '[1, 2, 2, 1]'], {}), "(max_pool_node, 'ksize', [1, 2, 2, 1])\n", (23737, 23775), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23780, 23852), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['max_pool_node', '"""strides"""', '[1, 1, 1, 1]'], {}), "(max_pool_node, 'strides', [1, 1, 1, 1])\n", (23812, 23852), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((23857, 23922), 'tensorflow.tools.quantization.quantize_graph.set_attr_string', 'quantize_graph.set_attr_string', (['max_pool_node', '"""padding"""', "b'SAME'"], {}), "(max_pool_node, 'padding', b'SAME')\n", (23887, 23922), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24149, 24169), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (24167, 24169), False, 'from tensorflow.core.framework import graph_pb2\n'), ((24191, 24340), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (24226, 24340), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24440, 24515), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""AvgPool"""', 'avg_pool_name', '[input_constant_name]'], {}), "('AvgPool', avg_pool_name, [input_constant_name])\n", (24466, 24515), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24567, 24632), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['avg_pool_node', '"""T"""', 'dtypes.float32'], {}), "(avg_pool_node, 'T', dtypes.float32)\n", (24596, 24632), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24637, 24707), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['avg_pool_node', '"""ksize"""', '[1, 2, 2, 1]'], {}), "(avg_pool_node, 'ksize', [1, 2, 2, 1])\n", (24669, 24707), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24712, 24784), 'tensorflow.tools.quantization.quantize_graph.set_attr_int_list', 'quantize_graph.set_attr_int_list', (['avg_pool_node', '"""strides"""', '[1, 1, 1, 1]'], {}), "(avg_pool_node, 'strides', [1, 1, 1, 1])\n", (24744, 24784), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((24789, 24854), 'tensorflow.tools.quantization.quantize_graph.set_attr_string', 'quantize_graph.set_attr_string', (['avg_pool_node', '"""padding"""', "b'SAME'"], {}), "(avg_pool_node, 'padding', b'SAME')\n", (24819, 24854), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25069, 25089), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (25087, 25089), False, 'from tensorflow.core.framework import graph_pb2\n'), ((25111, 25260), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (25146, 25260), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25356, 25424), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Relu"""', 'relu_name', '[input_constant_name]'], {}), "('Relu', relu_name, [input_constant_name])\n", (25382, 25424), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25472, 25533), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['relu_node', '"""T"""', 'dtypes.float32'], {}), "(relu_node, 'T', dtypes.float32)\n", (25501, 25533), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25697, 25834), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), "('input', value=[1, 2, 3, 4, 5, 6, 7, 8,\n 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n", (25732, 25834), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25880, 25941), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Relu"""', '"""relu"""', '[input_node.name]'], {}), "('Relu', 'relu', [input_node.name])\n", (25906, 25941), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((25946, 26007), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['relu_node', '"""T"""', 'dtypes.float32'], {}), "(relu_node, 'T', dtypes.float32)\n", (25975, 26007), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26024, 26121), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""min_bias_add"""'], {'value': '(0)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('min_bias_add', value=0, dtype=dtypes.\n float32, shape=[])\n", (26059, 26121), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26141, 26239), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""max_bias_add"""'], {'value': '(12)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('max_bias_add', value=12, dtype=dtypes.\n float32, shape=[])\n", (26176, 26239), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26266, 26386), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""FakeQuantWithMinMaxVars"""', '"""fake_quant"""', '[relu_node.name, min_node.name, max_node.name]'], {}), "('FakeQuantWithMinMaxVars', 'fake_quant', [\n relu_node.name, min_node.name, max_node.name])\n", (26292, 26386), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((26422, 26442), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (26440, 26442), False, 'from tensorflow.core.framework import graph_pb2\n'), ((26710, 26799), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (26738, 26799), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((27284, 27304), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (27302, 27304), False, 'from tensorflow.core.framework import graph_pb2\n'), ((27326, 27475), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 2, 6, 1]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1])\n', (27361, 27475), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((27572, 27642), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Relu6"""', 'relu6_name', '[input_constant_name]'], {}), "('Relu6', relu6_name, [input_constant_name])\n", (27598, 27642), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((27691, 27753), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['relu6_node', '"""T"""', 'dtypes.float32'], {}), "(relu6_node, 'T', dtypes.float32)\n", (27720, 27753), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28019, 28039), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (28037, 28039), False, 'from tensorflow.core.framework import graph_pb2\n'), ((28061, 28210), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['input_constant_name'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 2, 6]'}), '(input_constant_name, value=[1, 2, 3, 4,\n 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 1, 2, 6])\n', (28096, 28210), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28312, 28432), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['offset_constant_name'], {'value': '[1, 2, 3, 4, 5, 6]', 'dtype': 'dtypes.float32', 'shape': '[6]'}), '(offset_constant_name, value=[1, 2, 3, 4,\n 5, 6], dtype=dtypes.float32, shape=[6])\n', (28347, 28432), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28533, 28634), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', 'bias_add_name', '[input_constant_name, offset_constant_name]'], {}), "('BiasAdd', bias_add_name, [input_constant_name,\n offset_constant_name])\n", (28559, 28634), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((28644, 28709), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_node', '"""T"""', 'dtypes.float32'], {}), "(bias_add_node, 'T', dtypes.float32)\n", (28673, 28709), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29277, 29331), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Placeholder"""', '"""input"""', '[]'], {}), "('Placeholder', 'input', [])\n", (29303, 29331), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29336, 29399), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['input_n', '"""dtype"""', 'dtypes.float32'], {}), "(input_n, 'dtype', dtypes.float32)\n", (29365, 29399), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29404, 29464), 'tensorflow.tools.quantization.quantize_graph.set_attr_shape', 'quantize_graph.set_attr_shape', (['input_n', '"""shape"""', 'input_shape'], {}), "(input_n, 'shape', input_shape)\n", (29433, 29464), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29480, 29588), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""offset"""'], {'value': '[1, 2, 3, 4, 5, 6]', 'dtype': 'dtypes.float32', 'shape': '[6]'}), "('offset', value=[1, 2, 3, 4, 5, 6],\n dtype=dtypes.float32, shape=[6])\n", (29515, 29588), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29611, 29696), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', '"""bias_add"""', '[input_n.name, offset_n.name]'], {}), "('BiasAdd', 'bias_add', [input_n.name, offset_n.name]\n )\n", (29637, 29696), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29740, 29802), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_n', '"""T"""', 'dtypes.float32'], {}), "(bias_add_n, 'T', dtypes.float32)\n", (29769, 29802), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29826, 29846), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (29844, 29846), False, 'from tensorflow.core.framework import graph_pb2\n'), ((30701, 30774), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MatMul"""', '"""mat_mul"""', '[n.name for n in inputs]'], {}), "('MatMul', 'mat_mul', [n.name for n in inputs])\n", (30727, 30774), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30825, 30889), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T"""', 'dtypes.float32'], {}), "(mat_mul_node, 'T', dtypes.float32)\n", (30854, 30889), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30913, 30933), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (30931, 30933), False, 'from tensorflow.core.framework import graph_pb2\n'), ((32355, 32425), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""', 'input_range'], {}), "(float_graph_def, 'eightbit', input_range)\n", (32383, 32425), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((32955, 33044), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None)\n", (32983, 33044), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33552, 33681), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 2, 5]'}), "('input', value=[1, 2, 3, 4, 5, 6, 7, 8,\n 9, 10], dtype=dtypes.float32, shape=[1, 1, 2, 5])\n", (33587, 33681), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33729, 33835), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""offset"""'], {'value': '[1, 2, 3, 4, 5]', 'dtype': 'dtypes.float32', 'shape': '[5]'}), "('offset', value=[1, 2, 3, 4, 5], dtype=\n dtypes.float32, shape=[5])\n", (33764, 33835), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33860, 33950), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', '"""bias_add"""', '[input_node.name, offset_node.name]'], {}), "('BiasAdd', 'bias_add', [input_node.name,\n offset_node.name])\n", (33886, 33950), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((33960, 34025), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_node', '"""T"""', 'dtypes.float32'], {}), "(bias_add_node, 'T', dtypes.float32)\n", (33989, 34025), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34042, 34142), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""min_bias_add"""'], {'value': '(-0.5)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('min_bias_add', value=-0.5, dtype=\n dtypes.float32, shape=[])\n", (34077, 34142), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34161, 34261), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""max_bias_add"""'], {'value': '(15.5)', 'dtype': 'dtypes.float32', 'shape': '[]'}), "('max_bias_add', value=15.5, dtype=\n dtypes.float32, shape=[])\n", (34196, 34261), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34288, 34412), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""FakeQuantWithMinMaxVars"""', '"""fake_quant"""', '[bias_add_node.name, min_node.name, max_node.name]'], {}), "('FakeQuantWithMinMaxVars', 'fake_quant', [\n bias_add_node.name, min_node.name, max_node.name])\n", (34314, 34412), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((34448, 34468), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (34466, 34468), False, 'from tensorflow.core.framework import graph_pb2\n'), ((34901, 35031), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None', 'fallback_quantization_range': '[-100, 100]'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None, fallback_quantization_range=[-100, 100])\n", (34929, 35031), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((35765, 35894), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""input"""'], {'value': '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]', 'dtype': 'dtypes.float32', 'shape': '[1, 1, 2, 5]'}), "('input', value=[1, 2, 3, 4, 5, 6, 7, 8,\n 9, 10], dtype=dtypes.float32, shape=[1, 1, 2, 5])\n", (35800, 35894), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((35942, 36048), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['"""offset"""'], {'value': '[1, 2, 3, 4, 5]', 'dtype': 'dtypes.float32', 'shape': '[5]'}), "('offset', value=[1, 2, 3, 4, 5], dtype=\n dtypes.float32, shape=[5])\n", (35977, 36048), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((36073, 36163), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""BiasAdd"""', '"""bias_add"""', '[input_node.name, offset_node.name]'], {}), "('BiasAdd', 'bias_add', [input_node.name,\n offset_node.name])\n", (36099, 36163), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((36173, 36238), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['bias_add_node', '"""T"""', 'dtypes.float32'], {}), "(bias_add_node, 'T', dtypes.float32)\n", (36202, 36238), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((36262, 36282), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (36280, 36282), False, 'from tensorflow.core.framework import graph_pb2\n'), ((36553, 36684), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['float_graph_def', '"""eightbit"""'], {'quantized_input_range': 'None', 'fallback_quantization_range': '[-0.5, 15.5]'}), "(float_graph_def, 'eightbit',\n quantized_input_range=None, fallback_quantization_range=[-0.5, 15.5])\n", (36581, 36684), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((37914, 37934), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (37932, 37934), False, 'from tensorflow.core.framework import graph_pb2\n'), ((37952, 38052), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(a_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (37987, 38052), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38118, 38220), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_min_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_min_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (38153, 38220), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38290, 38392), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_max_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_max_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (38325, 38392), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38465, 38590), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Dequantize"""', 'a_dequantize_name', '[a_constant_name, a_constant_min_name, a_constant_max_name]'], {}), "('Dequantize', a_dequantize_name, [\n a_constant_name, a_constant_min_name, a_constant_max_name])\n", (38491, 38590), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38607, 38674), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['a_dequantize_node', '"""T"""', 'dtypes.uint8'], {}), "(a_dequantize_node, 'T', dtypes.uint8)\n", (38636, 38674), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38744, 38879), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizeV2"""', 'a_quantize_name', "[a_dequantize_name, a_dequantize_name + ':1', a_dequantize_name + ':2']"], {}), "('QuantizeV2', a_quantize_name, [\n a_dequantize_name, a_dequantize_name + ':1', a_dequantize_name + ':2'])\n", (38770, 38879), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((38896, 38961), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['a_quantize_node', '"""T"""', 'dtypes.uint8'], {}), "(a_quantize_node, 'T', dtypes.uint8)\n", (38925, 38961), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39024, 39124), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(b_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (39059, 39124), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39190, 39292), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_min_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_min_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (39225, 39292), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39362, 39464), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_max_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_max_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (39397, 39464), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39537, 39662), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Dequantize"""', 'b_dequantize_name', '[b_constant_name, b_constant_min_name, b_constant_max_name]'], {}), "('Dequantize', b_dequantize_name, [\n b_constant_name, b_constant_min_name, b_constant_max_name])\n", (39563, 39662), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39679, 39746), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['b_dequantize_node', '"""T"""', 'dtypes.uint8'], {}), "(b_dequantize_node, 'T', dtypes.uint8)\n", (39708, 39746), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39816, 39951), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizeV2"""', 'b_quantize_name', "[b_dequantize_name, b_dequantize_name + ':1', b_dequantize_name + ':2']"], {}), "('QuantizeV2', b_quantize_name, [\n b_dequantize_name, b_dequantize_name + ':1', b_dequantize_name + ':2'])\n", (39842, 39951), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((39968, 40033), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['b_quantize_node', '"""T"""', 'dtypes.uint8'], {}), "(b_quantize_node, 'T', dtypes.uint8)\n", (39997, 40033), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40098, 40299), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizedMatMul"""', 'mat_mul_name', "[a_quantize_name, b_quantize_name, a_quantize_name + ':1', a_quantize_name +\n ':2', b_quantize_name + ':1', b_quantize_name + ':2']"], {}), "('QuantizedMatMul', mat_mul_name, [\n a_quantize_name, b_quantize_name, a_quantize_name + ':1', \n a_quantize_name + ':2', b_quantize_name + ':1', b_quantize_name + ':2'])\n", (40124, 40299), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40316, 40379), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T1"""', 'dtypes.uint8'], {}), "(mat_mul_node, 'T1', dtypes.uint8)\n", (40345, 40379), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40384, 40447), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T2"""', 'dtypes.int32'], {}), "(mat_mul_node, 'T2', dtypes.int32)\n", (40413, 40447), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40513, 40533), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (40531, 40533), False, 'from tensorflow.core.framework import graph_pb2\n'), ((40551, 40651), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(a_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (40586, 40651), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40723, 40825), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_min_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_min_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (40758, 40825), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((40901, 41003), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['a_constant_max_name'], {'value': '(2)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(a_constant_max_name, value=2, dtype=\n dtypes.float32, shape=[])\n', (40936, 41003), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41075, 41175), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_name'], {'value': '(0,)', 'dtype': 'dtypes.quint8', 'shape': '[]'}), '(b_constant_name, value=(0,), dtype=\n dtypes.quint8, shape=[])\n', (41110, 41175), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41247, 41349), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_min_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_min_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (41282, 41349), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41425, 41527), 'tensorflow.tools.quantization.quantize_graph.create_constant_node', 'quantize_graph.create_constant_node', (['b_constant_max_name'], {'value': '(3)', 'dtype': 'dtypes.float32', 'shape': '[]'}), '(b_constant_max_name, value=3, dtype=\n dtypes.float32, shape=[])\n', (41460, 41527), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41601, 41789), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""QuantizedMatMul"""', 'mat_mul_name', '[a_constant_name, b_constant_name, a_constant_min_name, a_constant_max_name,\n b_constant_min_name, b_constant_max_name]'], {}), "('QuantizedMatMul', mat_mul_name, [\n a_constant_name, b_constant_name, a_constant_min_name,\n a_constant_max_name, b_constant_min_name, b_constant_max_name])\n", (41627, 41789), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41807, 41870), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T1"""', 'dtypes.uint8'], {}), "(mat_mul_node, 'T1', dtypes.uint8)\n", (41836, 41870), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((41875, 41938), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['mat_mul_node', '"""T2"""', 'dtypes.int32'], {}), "(mat_mul_node, 'T2', dtypes.int32)\n", (41904, 41938), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((42117, 42204), 'tensorflow.tools.quantization.quantize_graph.GraphRewriter', 'quantize_graph.GraphRewriter', (['graph_def', '[mat_mul_name]'], {'quantized_input_range': 'None'}), '(graph_def, [mat_mul_name],\n quantized_input_range=None)\n', (42145, 42204), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((42295, 42347), 'tensorflow.python.framework.graph_util.extract_sub_graph', 'graph_util.extract_sub_graph', (['output', '[mat_mul_name]'], {}), '(output, [mat_mul_name])\n', (42323, 42347), False, 'from tensorflow.python.framework import graph_util\n'), ((9226, 9286), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""MatMul"""', 'name', '[a.name, b.name]'], {}), "('MatMul', name, [a.name, b.name])\n", (9252, 9286), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9293, 9346), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['n', '"""T"""', 'dtypes.float32'], {}), "(n, 'T', dtypes.float32)\n", (9322, 9346), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9353, 9406), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['n', '"""transpose_a"""', '(False)'], {}), "(n, 'transpose_a', False)\n", (9381, 9406), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((9413, 9466), 'tensorflow.tools.quantization.quantize_graph.set_attr_bool', 'quantize_graph.set_attr_bool', (['n', '"""transpose_b"""', '(False)'], {}), "(n, 'transpose_b', False)\n", (9441, 9466), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((11436, 11448), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11444, 11448), True, 'import numpy as np\n'), ((11562, 11578), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (11570, 11578), True, 'import numpy as np\n'), ((17072, 17122), 'tensorflow.tools.quantization.quantize_graph.node_name_from_input', 'quantize_graph.node_name_from_input', (['"""^SomeName:2"""'], {}), "('^SomeName:2')\n", (17107, 17122), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((17241, 17298), 'tensorflow.tools.quantization.quantize_graph.unique_node_name_from_input', 'quantize_graph.unique_node_name_from_input', (['"""^SomeName:2"""'], {}), "('^SomeName:2')\n", (17283, 17298), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((29972, 30036), 'numpy.reshape', 'np.reshape', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]', 'input_shape'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)\n', (29982, 30036), True, 'import numpy as np\n'), ((30469, 30530), 'tensorflow.tools.quantization.quantize_graph.create_node', 'quantize_graph.create_node', (['"""Placeholder"""', "('input_%s' % i)", '[]'], {}), "('Placeholder', 'input_%s' % i, [])\n", (30495, 30530), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30537, 30597), 'tensorflow.tools.quantization.quantize_graph.set_attr_dtype', 'quantize_graph.set_attr_dtype', (['node', '"""dtype"""', 'dtypes.float32'], {}), "(node, 'dtype', dtypes.float32)\n", (30566, 30597), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((30604, 30655), 'tensorflow.tools.quantization.quantize_graph.set_attr_shape', 'quantize_graph.set_attr_shape', (['node', '"""shape"""', 'shape'], {}), "(node, 'shape', shape)\n", (30633, 30655), False, 'from tensorflow.tools.quantization import quantize_graph\n'), ((31053, 31094), 'numpy.reshape', 'np.reshape', (['[1, 2, 3, 4, 5, 6]', 'shapes[0]'], {}), '([1, 2, 3, 4, 5, 6], shapes[0])\n', (31063, 31094), True, 'import numpy as np\n'), ((31139, 31202), 'numpy.reshape', 'np.reshape', (['[0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]', 'shapes[1]'], {}), '([0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], shapes[1])\n', (31149, 31202), True, 'import numpy as np\n'), ((31971, 31994), 'numpy.array', 'np.array', (['arr', 'np.uint8'], {}), '(arr, np.uint8)\n', (31979, 31994), True, 'import numpy as np\n'), ((28957, 28977), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (28975, 28977), False, 'from tensorflow.core.framework import graph_pb2\n'), ((29139, 29159), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ([], {}), '()\n', (29157, 29159), False, 'from tensorflow.core.framework import graph_pb2\n'), ((11954, 11973), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (11962, 11973), True, 'import numpy as np\n'), ((12131, 12161), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5, 0.5])\n', (12139, 12161), True, 'import numpy as np\n'), ((12248, 12282), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.75, 0.75]'], {}), '([0.25, 0.25, 0.75, 0.75])\n', (12256, 12282), True, 'import numpy as np\n'), ((12385, 12423), 'numpy.array', 'np.array', (['[[0.25, 0.25], [0.75, 0.75]]'], {}), '([[0.25, 0.25], [0.75, 0.75]])\n', (12393, 12423), True, 'import numpy as np\n')]
|
import numpy as np
class Board:
"""
0 - black
1 - white
"""
def __init__(self):
board = [
[0, 1] * 4,
[1, 0] * 4
] * 4
players_board = [
[0, 1] * 4, # player 1
[1, 0] * 4
] + [[0] * 8] * 4 + [ # 4 rows of nothing
[0, 2] * 4, # player 2
[2, 0] * 4
]
self.board = np.array(board)
self.players_board = np.array(players_board)
self.x_size = 8
self.y_size = 8
# def move(self, x, y, current_player):
# self.board[x, y] = current_player
# def are_same_and_non_zero(self, array):
# return np.unique(array).size == 1 and array[0] != 0
# def is_board_full(self):
# return not np.any(np.unique(self.board) == 0)
def is_finished(self):
"""is game finished"""
return True
# for i in range(0, self.x_size): # rows
# if self.are_same_and_non_zero(self.board[i, :]):
# self.player_who_won = self.board[i, 0]
# self.result = 'Won {} - row {}'.format(self.player(self.player_who_won), i)
# return True
# for i in range(0, self.y_size): # columns
# if self.are_same_and_non_zero(self.board[:, i]):
# self.player_who_won = self.board[0, i]
# self.result = 'Won {} - col {}'.format(self.player(self.player_who_won), i)
# return True
# if self.are_same_and_non_zero(np.diag(self.board)): # diagonal
# self.player_who_won = self.board[1, 1]
# self.result = 'Won {} - diagonal {}'.format(self.player(self.player_who_won), i)
# return True
# if self.are_same_and_non_zero(np.diag(np.flipud(self.board))): # anty-diagonal
# self.player_who_won = self.board[1, 1]
# self.result = 'Won {} - anty-diagonal {}'.format(self.player(self.player_who_won), i)
# return True
# if self.is_board_full():
# self.player_who_won = 0 # nobody
# self.result = 'Draw'
# return True # draw
return False
def show(self):
# print(self.board)
# print(self.players_board)
return
# def player(self, player_no):
# if player_no == 1: return 'Player 1 (X)'
# if player_no == 2: return 'Player 2 (O)'
# def show_player_info(self, player_no):
# print("It's turn of ", self.player(player_no))
|
[
"numpy.array"
] |
[((331, 346), 'numpy.array', 'np.array', (['board'], {}), '(board)\n', (339, 346), True, 'import numpy as np\n'), ((372, 395), 'numpy.array', 'np.array', (['players_board'], {}), '(players_board)\n', (380, 395), True, 'import numpy as np\n')]
|
import numpy as np
from pysz import compress, decompress
def test_compress_decompress():
a = np.linspace(0, 100, num=1000000).reshape((100, 100, 100)).astype(np.float32)
tolerance = 0.0001
compressed = compress(a, tolerance=tolerance)
recovered = decompress(compressed, a.shape, a.dtype)
assert(a.shape == recovered.shape)
assert(np.allclose(a, recovered, atol=tolerance))
test_compress_decompress()
|
[
"pysz.decompress",
"numpy.linspace",
"pysz.compress",
"numpy.allclose"
] |
[((216, 248), 'pysz.compress', 'compress', (['a'], {'tolerance': 'tolerance'}), '(a, tolerance=tolerance)\n', (224, 248), False, 'from pysz import compress, decompress\n'), ((266, 306), 'pysz.decompress', 'decompress', (['compressed', 'a.shape', 'a.dtype'], {}), '(compressed, a.shape, a.dtype)\n', (276, 306), False, 'from pysz import compress, decompress\n'), ((362, 403), 'numpy.allclose', 'np.allclose', (['a', 'recovered'], {'atol': 'tolerance'}), '(a, recovered, atol=tolerance)\n', (373, 403), True, 'import numpy as np\n'), ((99, 131), 'numpy.linspace', 'np.linspace', (['(0)', '(100)'], {'num': '(1000000)'}), '(0, 100, num=1000000)\n', (110, 131), True, 'import numpy as np\n')]
|
import subprocess
from .Genome_fasta import get_fasta
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import pysam
def run(parser):
args = parser.parse_args()
bases,chrs = get_fasta(args.genome)
l={}
for c in chrs:
l[c]=len(bases[c])
chrs = set(chrs)
#p = subprocess.Popen('bamToBed -i '+args.bamfile,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
reads_num=0
reads_cg_num=[0,0,0] #CG,cg,Cg
cgnum_per_read=[]
with pysam.AlignmentFile(args.bamfile) as f:
for line in f:
#t = line.decode('utf-8').strip().split()
chr = line.reference_name#t[0]
start= line.reference_start
end= line.reference_end
strand= not line.is_reverse # True +strand; False -strand
if not chr in chrs: continue
end=min(end+1,l[chr])
reads_num+=1
if strand:#=='+':
cg=[bases[chr].count('CG',start,end)+bases[chr].count('Cg',start,end),bases[chr].count('cG',start,end)+bases[chr].count('cg',start,end)]
else:
cg=[bases[chr].count('GC',start,end)+bases[chr].count('gC',start,end),bases[chr].count('Gc',start,end)+bases[chr].count('gc',start,end)]
#We need to consider strand specific situation.
#'+' strand we have CG but '-' we should count 'GC'.
#print cg
# for i in range(1,ls):
# r2=read[i]
# r1=read[i-1]
# if 'G'==r2 or 'g'==r2:
# if 'C'==r1: cg[0]+=1
# if 'c'==r1: cg[1]+=1
#count = int(cg[0]>0)+int(cg[1]>0)
if cg[0]+cg[1]==0: continue
#print cg
cgnum_per_read.append(sum(cg))
if cg[0]>0 and cg[1]>0:
reads_cg_num[2]+=1
continue
if cg[0]>0:
reads_cg_num[0]+=1
else:
reads_cg_num[1]+=1
#print reads_cg_num
#print reads_num
plt.figure()
plt.subplot(211)
labels = ['noCG','NonRepeat CG','Repeat cg','CGcg mix']
colors = ['r','b','g','y']
explode=(0.05,0,0,0)
sizes=[reads_num-sum(reads_cg_num)]+reads_cg_num
patches,l_text,p_text = plt.pie(sizes,explode=explode,labels=labels,colors=colors, labeldistance = 1.1,autopct = '%3.1f%%',shadow = False, startangle = 90,pctdistance = 0.6)
plt.axis('equal')
#plt.legend(loc=2,bbox_to_anchor=(0, 0))
ax=plt.subplot(212)
t=np.zeros(20)
for num in cgnum_per_read:
t[min(num-1,19)]+=1
labels = list(map(str,np.arange(1,20)))+['20+']
#print(t)
t = (np.array(t).astype(float)/sum(reads_cg_num))*100
plt.bar(np.arange(20),t)
ax.set_xticks(np.arange(20))
ax.set_xticklabels(labels)
ax.set_ylabel('Percentage of reads including CG')
ax.set_xlabel('CG number per read')
plt.text(4,max(t)+4,'All reads including CG site: '+str(sum(reads_cg_num)))
#print args.output+'.pdf'
plt.savefig(args.output+'.pdf')
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-b','--bamfile',help="bam file name", metavar="FILE")
parser.add_argument('-g','--genome',help="Genome fasta file path")
parser.add_argument('-o','--output',help="pie figure's filename")
run(parser)
|
[
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.use",
"matplotlib.pyplot.pie",
"pysam.AlignmentFile",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"numpy.arange"
] |
[((72, 93), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (86, 93), False, 'import matplotlib\n'), ((1984, 1996), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1994, 1996), True, 'from matplotlib import pyplot as plt\n'), ((2001, 2017), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2012, 2017), True, 'from matplotlib import pyplot as plt\n'), ((2215, 2365), 'matplotlib.pyplot.pie', 'plt.pie', (['sizes'], {'explode': 'explode', 'labels': 'labels', 'colors': 'colors', 'labeldistance': '(1.1)', 'autopct': '"""%3.1f%%"""', 'shadow': '(False)', 'startangle': '(90)', 'pctdistance': '(0.6)'}), "(sizes, explode=explode, labels=labels, colors=colors, labeldistance\n =1.1, autopct='%3.1f%%', shadow=False, startangle=90, pctdistance=0.6)\n", (2222, 2365), True, 'from matplotlib import pyplot as plt\n'), ((2370, 2387), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2378, 2387), True, 'from matplotlib import pyplot as plt\n'), ((2440, 2456), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2451, 2456), True, 'from matplotlib import pyplot as plt\n'), ((2463, 2475), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (2471, 2475), True, 'import numpy as np\n'), ((2960, 2993), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.output + '.pdf')"], {}), "(args.output + '.pdf')\n", (2971, 2993), True, 'from matplotlib import pyplot as plt\n'), ((3051, 3076), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3074, 3076), False, 'import argparse\n'), ((523, 556), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['args.bamfile'], {}), '(args.bamfile)\n', (542, 556), False, 'import pysam\n'), ((2671, 2684), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (2680, 2684), True, 'import numpy as np\n'), ((2706, 2719), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (2715, 2719), True, 'import numpy as np\n'), ((2561, 2577), 'numpy.arange', 'np.arange', (['(1)', '(20)'], {}), '(1, 20)\n', (2570, 2577), True, 'import numpy as np\n'), ((2610, 2621), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2618, 2621), True, 'import numpy as np\n')]
|
import numpy as np
from unittest import TestCase
import numpy.testing as npt
from distancematrix.util import diag_indices_of
from distancematrix.consumer.distance_matrix import DistanceMatrix
class TestContextualMatrixProfile(TestCase):
def setUp(self):
self.dist_matrix = np.array([
[8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09],
[4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19],
[0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94],
[0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15],
[9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38],
[7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67],
[2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64],
[6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.],
[4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92],
[1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]])
def mock_initialise(self, dm):
dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1])
def test_process_diagonal(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_diagonal_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for diag in range(-8, self.dist_matrix.shape[1], 3):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
correct[diag_ind] = self.dist_matrix[diag_ind]
npt.assert_equal(dm.distance_matrix, correct)
def test_process_column(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for column in range(0, self.dist_matrix.shape[1]):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_column_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for column in [2, 3, 4, 5, 10, 11, 12]:
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
correct[:, column] = self.dist_matrix[:, column]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_column(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0]))
dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1]))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[:2, 1] = self.dist_matrix[:2, 1]
npt.assert_equal(dm.distance_matrix, expected)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(1)
dm.shift_series(3)
correct = np.full((5, 5), np.nan)
correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5]
npt.assert_equal(dm.distance_matrix, correct)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8])
dm.shift_query(2)
dm.shift_series(1)
dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8]))
correct = np.full((5, 5), np.nan)
correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8]
correct[:, 4] = self.dist_matrix[3:8, 8]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_diagonal(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0]))
diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1)
dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind])))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[0, 1] = self.dist_matrix[0, 1]
expected[1, 2] = self.dist_matrix[1, 2]
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(2)
dm.shift_series(1)
expected = self.dist_matrix[2:7, 1:6].copy()
expected[-2:, :] = np.nan
expected[:, -1:] = np.nan
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
|
[
"numpy.atleast_2d",
"numpy.testing.assert_equal",
"numpy.full_like",
"distancematrix.util.diag_indices_of",
"numpy.array",
"distancematrix.consumer.distance_matrix.DistanceMatrix",
"numpy.full"
] |
[((289, 1251), 'numpy.array', 'np.array', (['[[8.67, 1.1, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, \n 4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.1, 6.26, 9.4, 4.14, 5.53,\n 4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78, 7.01, \n 4.36, 5.24, 8.81, 7.9, 5.84, 8.9, 7.88, 3.37, 4.7, 6.94], [0.94, 8.7, \n 3.87, 6.29, 0.32, 1.79, 5.8, 2.61, 1.43, 6.32, 1.62, 0.2, 2.28, 7.11, \n 2.15], [9.9, 4.51, 2.11, 2.83, 5.52, 8.55, 6.9, 0.24, 1.58, 4.26, 8.75,\n 3.71, 9.93, 8.33, 0.38], [7.3, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56,\n 5.09, 7.07, 1.9, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28, 4.37,\n 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.4, 4.41, 7.64], [6.26, 0.29,\n 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39,\n 9.0], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03,\n 5.64, 5.1, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.5, 6.72, 5.11,\n 0.8, 9.3, 9.77, 4.71, 3.26, 7.29, 6.26]]'], {}), '([[8.67, 1.1, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41,\n 4.07, 4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.1, 6.26, 9.4, 4.14,\n 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78, \n 7.01, 4.36, 5.24, 8.81, 7.9, 5.84, 8.9, 7.88, 3.37, 4.7, 6.94], [0.94, \n 8.7, 3.87, 6.29, 0.32, 1.79, 5.8, 2.61, 1.43, 6.32, 1.62, 0.2, 2.28, \n 7.11, 2.15], [9.9, 4.51, 2.11, 2.83, 5.52, 8.55, 6.9, 0.24, 1.58, 4.26,\n 8.75, 3.71, 9.93, 8.33, 0.38], [7.3, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42,\n 5.56, 5.09, 7.07, 1.9, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28,\n 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.4, 4.41, 7.64], [6.26,\n 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01,\n 0.39, 9.0], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78,\n 0.03, 5.64, 5.1, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.5, 6.72,\n 5.11, 0.8, 9.3, 9.77, 4.71, 3.26, 7.29, 6.26]])\n', (297, 1251), True, 'import numpy as np\n'), ((1505, 1521), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (1519, 1521), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((1795, 1849), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix'], {}), '(dm.distance_matrix, self.dist_matrix)\n', (1811, 1849), True, 'import numpy.testing as npt\n'), ((1921, 1937), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (1935, 1937), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((1990, 2041), 'numpy.full_like', 'np.full_like', (['self.dist_matrix', 'np.nan'], {'dtype': 'float'}), '(self.dist_matrix, np.nan, dtype=float)\n', (2002, 2041), True, 'import numpy as np\n'), ((2316, 2361), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (2332, 2361), True, 'import numpy.testing as npt\n'), ((2411, 2427), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (2425, 2427), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((2612, 2666), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix'], {}), '(dm.distance_matrix, self.dist_matrix)\n', (2628, 2666), True, 'import numpy.testing as npt\n'), ((2736, 2752), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (2750, 2752), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((2805, 2856), 'numpy.full_like', 'np.full_like', (['self.dist_matrix', 'np.nan'], {'dtype': 'float'}), '(self.dist_matrix, np.nan, dtype=float)\n', (2817, 2856), True, 'import numpy as np\n'), ((3058, 3103), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (3074, 3103), True, 'import numpy.testing as npt\n'), ((3163, 3179), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (3177, 3179), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((3368, 3391), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (3375, 3391), True, 'import numpy as np\n'), ((3498, 3544), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'expected'], {}), '(dm.distance_matrix, expected)\n', (3514, 3544), True, 'import numpy.testing as npt\n'), ((3679, 3741), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[:5, :5]'], {}), '(dm.distance_matrix, self.dist_matrix[:5, :5])\n', (3695, 3741), True, 'import numpy.testing as npt\n'), ((3815, 3838), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (3822, 3838), True, 'import numpy as np\n'), ((3902, 3947), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (3918, 3947), True, 'import numpy.testing as npt\n'), ((4084, 4148), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[1:6, 3:8]'], {}), '(dm.distance_matrix, self.dist_matrix[1:6, 3:8])\n', (4100, 4148), True, 'import numpy.testing as npt\n'), ((4292, 4315), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (4299, 4315), True, 'import numpy as np\n'), ((4428, 4473), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'correct'], {}), '(dm.distance_matrix, correct)\n', (4444, 4473), True, 'import numpy.testing as npt\n'), ((4535, 4551), 'distancematrix.consumer.distance_matrix.DistanceMatrix', 'DistanceMatrix', ([], {}), '()\n', (4549, 4551), False, 'from distancematrix.consumer.distance_matrix import DistanceMatrix\n'), ((4673, 4717), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix[:3, :3]', '(1)'], {}), '(self.dist_matrix[:3, :3], 1)\n', (4688, 4717), False, 'from distancematrix.util import diag_indices_of\n'), ((4826, 4849), 'numpy.full', 'np.full', (['(5, 5)', 'np.nan'], {}), '((5, 5), np.nan)\n', (4833, 4849), True, 'import numpy as np\n'), ((5002, 5048), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'expected'], {}), '(dm.distance_matrix, expected)\n', (5018, 5048), True, 'import numpy.testing as npt\n'), ((5244, 5306), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[:5, :5]'], {}), '(dm.distance_matrix, self.dist_matrix[:5, :5])\n', (5260, 5306), True, 'import numpy.testing as npt\n'), ((5490, 5536), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'expected'], {}), '(dm.distance_matrix, expected)\n', (5506, 5536), True, 'import numpy.testing as npt\n'), ((5731, 5793), 'numpy.testing.assert_equal', 'npt.assert_equal', (['dm.distance_matrix', 'self.dist_matrix[:5, :5]'], {}), '(dm.distance_matrix, self.dist_matrix[:5, :5])\n', (5747, 5793), True, 'import numpy.testing as npt\n'), ((1665, 1704), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix', 'diag'], {}), '(self.dist_matrix, diag)\n', (1680, 1704), False, 'from distancematrix.util import diag_indices_of\n'), ((2127, 2166), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix', 'diag'], {}), '(self.dist_matrix, diag)\n', (2142, 2166), False, 'from distancematrix.util import diag_indices_of\n'), ((3241, 3278), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[0, 0]'], {}), '(self.dist_matrix[0, 0])\n', (3254, 3278), True, 'import numpy as np\n'), ((3309, 3347), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:2, 1]'], {}), '(self.dist_matrix[:2, 1])\n', (3322, 3347), True, 'import numpy as np\n'), ((4232, 4271), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[3:8, 8]'], {}), '(self.dist_matrix[3:8, 8])\n', (4245, 4271), True, 'import numpy as np\n'), ((4615, 4652), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[0, 0]'], {}), '(self.dist_matrix[0, 0])\n', (4628, 4652), True, 'import numpy as np\n'), ((5106, 5153), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix[:5, :5]', 'diag'], {}), '(self.dist_matrix[:5, :5], diag)\n', (5121, 5153), False, 'from distancematrix.util import diag_indices_of\n'), ((5594, 5641), 'distancematrix.util.diag_indices_of', 'diag_indices_of', (['self.dist_matrix[:5, :5]', 'diag'], {}), '(self.dist_matrix[:5, :5], diag)\n', (5609, 5641), False, 'from distancematrix.util import diag_indices_of\n'), ((1743, 1784), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (1756, 1784), True, 'import numpy as np\n'), ((2205, 2246), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (2218, 2246), True, 'import numpy as np\n'), ((2559, 2601), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:, column]'], {}), '(self.dist_matrix[:, column])\n', (2572, 2601), True, 'import numpy as np\n'), ((2944, 2986), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:, column]'], {}), '(self.dist_matrix[:, column])\n', (2957, 2986), True, 'import numpy as np\n'), ((3619, 3669), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[:5, :5][:, column]'], {}), '(self.dist_matrix[:5, :5][:, column])\n', (3632, 3669), True, 'import numpy as np\n'), ((4022, 4074), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[1:6, 3:8][:, column]'], {}), '(self.dist_matrix[1:6, 3:8][:, column])\n', (4035, 4074), True, 'import numpy as np\n'), ((4763, 4804), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (4776, 4804), True, 'import numpy as np\n'), ((5192, 5233), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (5205, 5233), True, 'import numpy as np\n'), ((5680, 5721), 'numpy.atleast_2d', 'np.atleast_2d', (['self.dist_matrix[diag_ind]'], {}), '(self.dist_matrix[diag_ind])\n', (5693, 5721), True, 'import numpy as np\n')]
|
from unittest import TestCase
import numpy as np
from robustnessgym.cachedops.spacy import Spacy
from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation
from tests.testbeds import MockTestBedv0
class TestLengthSubpopulation(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
self.testbed.dataset = Spacy()(self.testbed.dataset, columns=["text"])
def test_score(self):
# Create the length subpopulation
length = LengthSubpopulation(intervals=[(1, 3), (4, 5)])
# Compute scores
scores = length.score(self.testbed.dataset[:], columns=["text"])
self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5])))
print(self.testbed.dataset.column_names)
print(Spacy.retrieve(self.testbed.dataset[:], ["text"]))
# Apply the subpopulation
slices, slice_matrix = length(self.testbed.dataset, columns=["text"])
# Check that the slice membership lines up
self.assertTrue(np.allclose(slice_matrix, np.array([[0, 1]] * 6)))
|
[
"tests.testbeds.MockTestBedv0",
"robustnessgym.cachedops.spacy.Spacy",
"numpy.array",
"robustnessgym.cachedops.spacy.Spacy.retrieve",
"robustnessgym.slicebuilders.subpopulations.length.LengthSubpopulation"
] |
[((309, 324), 'tests.testbeds.MockTestBedv0', 'MockTestBedv0', ([], {}), '()\n', (322, 324), False, 'from tests.testbeds import MockTestBedv0\n'), ((490, 537), 'robustnessgym.slicebuilders.subpopulations.length.LengthSubpopulation', 'LengthSubpopulation', ([], {'intervals': '[(1, 3), (4, 5)]'}), '(intervals=[(1, 3), (4, 5)])\n', (509, 537), False, 'from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation\n'), ((356, 363), 'robustnessgym.cachedops.spacy.Spacy', 'Spacy', ([], {}), '()\n', (361, 363), False, 'from robustnessgym.cachedops.spacy import Spacy\n'), ((776, 825), 'robustnessgym.cachedops.spacy.Spacy.retrieve', 'Spacy.retrieve', (['self.testbed.dataset[:]', "['text']"], {}), "(self.testbed.dataset[:], ['text'])\n", (790, 825), False, 'from robustnessgym.cachedops.spacy import Spacy\n'), ((681, 709), 'numpy.array', 'np.array', (['[5, 5, 5, 5, 5, 5]'], {}), '([5, 5, 5, 5, 5, 5])\n', (689, 709), True, 'import numpy as np\n'), ((1042, 1064), 'numpy.array', 'np.array', (['([[0, 1]] * 6)'], {}), '([[0, 1]] * 6)\n', (1050, 1064), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 30 17:15:27 2014
@author: Parke
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib as mplot
import matplotlib.pyplot as plt
import mypy.my_numpy as mnp
dpi = 100
fullwidth = 10.0
halfwidth = 5.0
# use these with line.set_dashes and iterate through more linestyles than come with matplotlib
# consider ussing a ::2 slice for fewer
dashes = [[],
[30, 10],
[20, 8],
[10, 5],
[3, 2],
[30, 5, 3, 5, 10, 5, 3, 5],
[15] + [5, 3]*3 + [5],
[15] + [5, 3]*2 + [5],
[15] + [5, 3] + [5]]
def click_coords(fig=None, timeout=600.):
if fig is None:
fig = plt.gcf()
xy = []
def onclick(event):
if not event.inaxes:
fig.canvas.stop_event_loop()
else:
xy.append([event.xdata, event.ydata])
print("Gathering coordinates of mouse clicks. Click outside of the axes " \
"when done.")
cid = fig.canvas.mpl_connect('button_press_event', onclick)
fig.canvas.start_event_loop(timeout=timeout)
fig.canvas.mpl_disconnect(cid)
return np.array(xy)
def common_axes(fig, pos=None):
if pos is None:
bigax = fig.add_subplot(111)
else:
bigax = fig.add_axes(pos)
[bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']]
bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off')
bigax.set_zorder(-10)
return bigax
def log_frac(x, frac):
l0, l1 = list(map(np.log10, x))
ld = l1 - l0
l = ld*frac + l0
return 10**l
def log2linear(x, errneg=None, errpos=None):
xl = 10**x
result = [xl]
if errneg is not None:
xn = xl - 10**(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = 10**(x + errpos) - xl
result.append(xp)
return result
def linear2log(x, errneg=None, errpos=None):
xl = np.log10(x)
result = [x]
if errneg is not None:
xn = xl - np.log10(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = np.log10(x + errpos) - xl
result.append(xp)
return result
def step(*args, **kwargs):
edges, values = args[0], args[1]
# deal with potentially gappy 2-column bin specifications
edges = np.asarray(edges)
if edges.ndim == 2:
if np.any(edges[1:,0] < edges[:-1,1]):
raise ValueError('Some bins overlap')
if np.any(edges[1:,0] < edges[:-1,0]):
raise ValueError('Bins must be in increasing order.')
gaps = edges[1:,0] > edges[:-1,1]
edges = np.unique(edges)
if np.any(gaps):
values = np.insert(values, np.nonzero(gaps), np.nan)
edges = mnp.lace(edges[:-1], edges[1:])
values = mnp.lace(values, values)
args = list(args)
args[0], args[1] = edges, values
ax = kwargs.pop('ax', plt.gca())
return ax.plot(*args, **kwargs)
def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'):
if scale == 'log':
lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale)
return 10 ** lx, 10 ** ly
if xfrac is not None:
if xfrac == 0:
return x[0], y[0]
if xfrac == 1:
return x[-1], y[-1]
else:
d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2))
d = np.insert(d, 0, 0)
f = d/d[-1]
xp, yp = [np.interp(xfrac, f, a) for a in [x,y]]
return xp, yp
if xlbl is not None:
return xlbl, np.interp(xlbl, x, y)
def textSize(ax_or_fig=None, coordinate='data'):
"""
Return x & y scale factors for converting text sizes in points to another coordinate. Useful for properly spacing
text labels and such when you need to know sizes before the text is made (otherwise you can use textBoxSize).
Coordinate can be 'data', 'axes', or 'figure'.
If data coordinates are requested and the data is plotted on a log scale, then the factor will be given in dex.
"""
if ax_or_fig is None:
fig = plt.gcf()
ax = fig.gca()
else:
if isinstance(ax_or_fig, plt.Figure):
fig = ax_or_fig
ax = fig.gca()
elif isinstance(ax_or_fig, plt.Axes):
ax = ax_or_fig
fig = ax.get_figure()
else:
raise TypeError('ax_or_fig must be a Figure or Axes instance, if given.')
w_fig_in, h_fig_in = ax.get_figure().get_size_inches()
if coordinate == 'fig':
return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72)
w_ax_norm, h_ax_norm = ax.get_position().size
w_ax_in = w_ax_norm * w_fig_in
h_ax_in = h_ax_norm * h_fig_in
w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72
if coordinate == 'axes':
return 1.0/w_ax_pts, 1.0/h_ax_pts
if coordinate == 'data':
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if ax.get_xscale() == 'log': xlim = np.log10(xlim)
if ax.get_yscale() == 'log': ylim = np.log10(ylim)
w_ax_data = xlim[1] - xlim[0]
h_ax_data = ylim[1] - ylim[0]
return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts
def tight_axis_limits(ax=None, xory='both', margin=0.05):
if ax is None: ax = plt.gca()
def newlim(oldlim):
delta = abs(oldlim[1] - oldlim[0])
pad = delta*margin
if oldlim[1] > oldlim[0]:
return (oldlim[0] - pad, oldlim[1] + pad)
else:
return (oldlim[0] + pad, oldlim[1] - pad)
def newlim_log(oldlim):
loglim = [np.log10(l) for l in oldlim]
newloglim = newlim(loglim)
return (10.0**newloglim[0], 10.0**newloglim[1])
def newlim_either(oldlim,axlim,scale):
if axlim[1] < axlim [0]: oldlim = oldlim[::-1]
if scale == 'linear':
return newlim(oldlim)
elif scale == 'log':
return newlim_log(oldlim)
elif scale == 'symlog':
raise NotImplementedError('Past Parke to future Parke, you did\'t write an implementation for symlog'
'scaled axes.')
if xory == 'x' or xory == 'both':
datalim = ax.dataLim.extents[[0,2]]
axlim = ax.get_xlim()
scale = ax.get_xscale()
ax.set_xlim(newlim_either(datalim,axlim,scale))
if xory == 'y' or xory == 'both':
datalim = ax.dataLim.extents[[1,3]]
axlim = ax.get_ylim()
scale = ax.get_yscale()
ax.set_ylim(newlim_either(datalim,axlim,scale))
#TODO: discard this function?
def standard_figure(app, slideAR=1.6, height=1.0):
"""Generate a figure of standard size for publishing.
implemented values for app (application) are:
'fullslide'
height is the fractional height of the figure relative to the "standard"
height. For slides the standard is the full height of a slide.
returns the figure object and default font size
"""
if app == 'fullslide':
fontsize = 20
figsize = [fullwidth, fullwidth/slideAR*height]
fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi)
mplot.rcParams.update({'font.size': fontsize})
return fig, fontsize
def pcolor_reg(x, y, z, **kw):
"""
Similar to `pcolor`, but assume that the grid is uniform,
and do plotting with the (much faster) `imshow` function.
"""
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should be 1-dimensional")
if z.ndim != 2 or z.shape != (y.size, x.size):
raise ValueError("z.shape should be (y.size, x.size)")
dx = np.diff(x)
dy = np.diff(y)
if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2):
raise ValueError("The grid must be uniform")
if np.issubdtype(z.dtype, np.complexfloating):
zp = np.zeros(z.shape, float)
zp[...] = z[...]
z = zp
plt.imshow(z, origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest',
aspect='auto',
**kw)
plt.axis('tight')
def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw):
if ax is None: ax = plt.gca()
p = ax.plot(x, y, **kw) if fmt is None else ax.plot(x, y, fmt, **kw)
if len(yerr.shape) == 2:
ylo = y - yerr[0,:]
yhi = y + yerr[1,:]
else:
ylo, yhi = y - yerr, y + yerr
if ecolor is None: ecolor = p[0].get_color()
# deal with matplotlib sometimes not showing polygon when it extends beyond plot range
xlim = ax.get_xlim()
inrange = mnp.inranges(x, xlim)
if not np.all(inrange):
n = np.sum(inrange)
yends = np.interp(xlim, x, y)
yloends = np.interp(xlim, x, ylo)
yhiends = np.interp(xlim, x, yhi)
x = np.insert(x[inrange], [0, n], xlim)
y = np.insert(y[inrange], [0, n], yends)
ylo = np.insert(ylo[inrange], [0, n], yloends)
yhi = np.insert(yhi[inrange], [0, n], yhiends)
f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha)
return p[0],f
def onscreen_pres(mpl, screenwidth=1200):
"""
Set matplotlibrc values so that plots are readable as they are created
and maximized for an audience far from a screen.
Parameters
----------
mpl : module
Current matplotlib module. Use 'import matplotlib as mpl'.
screewidth : int
Width of the screen in question in pixels.
Returns
-------
None
"""
mpl.rcParams['lines.linewidth'] = 2
fontsize = round(14 / (800.0 / screenwidth))
mpl.rcParams['font.size'] = fontsize
def textBoxSize(txt, transformation=None, figure=None):
"""Get the width and height of a text object's bounding box transformed to the desired coordinates. Defaults to
figure coordinates if transformation is None."""
fig= txt.get_figure() if figure is None else figure
if transformation is None:
transformation = fig.transFigure
coordConvert = transformation.inverted().transform
bboxDisp = txt.get_window_extent(fig.canvas.renderer)
bboxConv = coordConvert(bboxDisp)
w = bboxConv[1,0] - bboxConv[0,0]
h = bboxConv[1,1] - bboxConv[0,1]
return w, h
def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0):
"""
Make a 3D diagram of stars positions relative to the Sun, with
semi-accurate colors and distances as desired. Coordinates must be in
degrees. Distance is assumed to be in pc (for axes labels).
Meant to be used with only a handful of stars.
"""
from mayavi import mlab
from color.maps import true_temp
n = len(ra)
dec, ra = dec*np.pi/180.0, ra*np.pi/180.0
makearr = lambda v: np.array([v] * n) if np.isscalar(v) else v
T, r, labels = list(map(makearr, (T, r, labels)))
# add the sun
ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0)))
r, T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun')))
# get xyz coordinates
z = dist * np.sin(dec)
h = dist * np.cos(dec)
x = h * np.cos(ra)
y = h * np.sin(ra)
# make figure
fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size)
# plot lines down to the dec=0 plane for all but the sun
lines = []
for x1, y1, z1 in list(zip(x, y, z))[:-1]:
xx, yy, zz = [x1, x1], [y1, y1], [0.0, z1]
line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5,
figure=fig)
lines.append(line)
# plot spheres
r_factor = np.max(dist) / 30.0
pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere',
scale_factor=r_factor, figure=fig, resolution=100)
pts.glyph.color_mode = 'color_by_scalar'
# center the glyphs on the data point
pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]
# set a temperature colormap
cmap = true_temp(T)
pts.module_manager.scalar_lut_manager.lut.table = cmap
# set the camera view
mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig)
if view is not None:
mlab.view(*view, figure=fig)
## add labels
# unit vec to camera
view = mlab.view()
az, el = view[:2]
hc = np.sin(el * np.pi / 180.0)
xc = hc * np.cos(az * np.pi / 180.0)
yc = hc * np.sin(az * np.pi / 180.0)
zc = -np.cos(el * np.pi / 180.0)
# unit vec orthoganal to camera
if xc**2 + yc**2 == 0.0:
xoff = 1.0
yoff = 0.0
zoff = 0.0
else:
xoff = yc / np.sqrt(xc**2 + yc**2)
yoff = np.sqrt(1.0 - xoff**2)
zoff = 0.0
# xoff, yoff, zoff = xc, yc, zc
# scale orthogonal vec by sphere size
r_label = 1.0 * r_factor
xoff, yoff, zoff = [r_label * v for v in [xoff, yoff, zoff]]
# plot labels
size = r_factor * txt_scale * 0.75
for xx, yy, zz, label in zip(x, y, z, labels):
mlab.text3d(xx + xoff, yy + yoff, zz + zoff, label, figure=fig,
color=(1,1,1), scale=size)
## add translucent dec=0 surface
n = 101
t = np.linspace(0.0, 2*np.pi, n)
r = np.max(dist * np.cos(dec))
x, y = r*np.cos(t), r*np.sin(t)
z = np.zeros(n+1)
x, y = [np.insert(a, 0, 0.0) for a in [x,y]]
triangles = [(0, i, i + 1) for i in range(1, n)]
mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig)
## add ra=0 line
line = mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig)
rtxt = '{:.1f} pc'.format(r)
orientation=np.array([180.0, 180.0, 0.0])
mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation)
if view is not None:
mlab.view(*view, figure=fig)
return fig
|
[
"numpy.log10",
"numpy.sqrt",
"numpy.array",
"numpy.sin",
"mypy.my_numpy.inranges",
"mayavi.mlab.view",
"numpy.isscalar",
"numpy.asarray",
"numpy.diff",
"numpy.max",
"numpy.issubdtype",
"numpy.linspace",
"mayavi.mlab.quiver3d",
"matplotlib.pyplot.axis",
"numpy.abs",
"numpy.allclose",
"mypy.my_numpy.lace",
"matplotlib.rcParams.update",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"numpy.any",
"mayavi.mlab.text3d",
"numpy.cos",
"numpy.interp",
"numpy.nonzero",
"color.maps.true_temp",
"mayavi.mlab.triangular_mesh",
"numpy.insert",
"numpy.unique",
"mayavi.mlab.figure",
"numpy.sum",
"numpy.zeros",
"mayavi.mlab.plot3d",
"matplotlib.pyplot.figure",
"numpy.all"
] |
[((1184, 1196), 'numpy.array', 'np.array', (['xy'], {}), '(xy)\n', (1192, 1196), True, 'import numpy as np\n'), ((1998, 2009), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (2006, 2009), True, 'import numpy as np\n'), ((2378, 2395), 'numpy.asarray', 'np.asarray', (['edges'], {}), '(edges)\n', (2388, 2395), True, 'import numpy as np\n'), ((2808, 2839), 'mypy.my_numpy.lace', 'mnp.lace', (['edges[:-1]', 'edges[1:]'], {}), '(edges[:-1], edges[1:])\n', (2816, 2839), True, 'import mypy.my_numpy as mnp\n'), ((2853, 2877), 'mypy.my_numpy.lace', 'mnp.lace', (['values', 'values'], {}), '(values, values)\n', (2861, 2877), True, 'import mypy.my_numpy as mnp\n'), ((7155, 7201), 'matplotlib.rcParams.update', 'mplot.rcParams.update', (["{'font.size': fontsize}"], {}), "({'font.size': fontsize})\n", (7176, 7201), True, 'import matplotlib as mplot\n'), ((7677, 7687), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (7684, 7687), True, 'import numpy as np\n'), ((7697, 7707), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (7704, 7707), True, 'import numpy as np\n'), ((7846, 7888), 'numpy.issubdtype', 'np.issubdtype', (['z.dtype', 'np.complexfloating'], {}), '(z.dtype, np.complexfloating)\n', (7859, 7888), True, 'import numpy as np\n'), ((8158, 8175), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (8166, 8175), True, 'import matplotlib.pyplot as plt\n'), ((8675, 8696), 'mypy.my_numpy.inranges', 'mnp.inranges', (['x', 'xlim'], {}), '(x, xlim)\n', (8687, 8696), True, 'import mypy.my_numpy as mnp\n'), ((11251, 11311), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(0, 0, 0)', 'fgcolor': '(1, 1, 1)', 'size': 'size'}), '(bgcolor=(0, 0, 0), fgcolor=(1, 1, 1), size=size)\n', (11262, 11311), False, 'from mayavi import mlab\n'), ((11686, 11799), 'mayavi.mlab.quiver3d', 'mlab.quiver3d', (['x', 'y', 'z', 'r', 'r', 'r'], {'scalars': 'T', 'mode': '"""sphere"""', 'scale_factor': 'r_factor', 'figure': 'fig', 'resolution': '(100)'}), "(x, y, z, r, r, r, scalars=T, mode='sphere', scale_factor=\n r_factor, figure=fig, resolution=100)\n", (11699, 11799), False, 'from mayavi import mlab\n'), ((12010, 12022), 'color.maps.true_temp', 'true_temp', (['T'], {}), '(T)\n', (12019, 12022), False, 'from color.maps import true_temp\n'), ((12113, 12162), 'mayavi.mlab.view', 'mlab.view', ([], {'focalpoint': '(0.0, 0.0, 0.0)', 'figure': 'fig'}), '(focalpoint=(0.0, 0.0, 0.0), figure=fig)\n', (12122, 12162), False, 'from mayavi import mlab\n'), ((12279, 12290), 'mayavi.mlab.view', 'mlab.view', ([], {}), '()\n', (12288, 12290), False, 'from mayavi import mlab\n'), ((12322, 12348), 'numpy.sin', 'np.sin', (['(el * np.pi / 180.0)'], {}), '(el * np.pi / 180.0)\n', (12328, 12348), True, 'import numpy as np\n'), ((13156, 13186), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', 'n'], {}), '(0.0, 2 * np.pi, n)\n', (13167, 13186), True, 'import numpy as np\n'), ((13264, 13279), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (13272, 13279), True, 'import numpy as np\n'), ((13384, 13470), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['x', 'y', 'z', 'triangles'], {'color': '(1, 1, 1)', 'opacity': '(0.3)', 'figure': 'fig'}), '(x, y, z, triangles, color=(1, 1, 1), opacity=0.3,\n figure=fig)\n', (13404, 13470), False, 'from mayavi import mlab\n'), ((13498, 13576), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, r]', '[0, 0]', '[0, 0]'], {'color': '(1, 1, 1)', 'line_width': '(1)', 'figure': 'fig'}), '([0, r], [0, 0], [0, 0], color=(1, 1, 1), line_width=1, figure=fig)\n', (13509, 13576), False, 'from mayavi import mlab\n'), ((13624, 13653), 'numpy.array', 'np.array', (['[180.0, 180.0, 0.0]'], {}), '([180.0, 180.0, 0.0])\n', (13632, 13653), True, 'import numpy as np\n'), ((13658, 13769), 'mayavi.mlab.text3d', 'mlab.text3d', (['r', '(0)', '(0)', 'rtxt'], {'figure': 'fig', 'scale': '(size * 1.25)', 'orient_to_camera': '(False)', 'orientation': 'orientation'}), '(r, 0, 0, rtxt, figure=fig, scale=size * 1.25, orient_to_camera=\n False, orientation=orientation)\n', (13669, 13769), False, 'from mayavi import mlab\n'), ((739, 748), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (746, 748), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2467), 'numpy.any', 'np.any', (['(edges[1:, 0] < edges[:-1, 1])'], {}), '(edges[1:, 0] < edges[:-1, 1])\n', (2437, 2467), True, 'import numpy as np\n'), ((2528, 2564), 'numpy.any', 'np.any', (['(edges[1:, 0] < edges[:-1, 0])'], {}), '(edges[1:, 0] < edges[:-1, 0])\n', (2534, 2564), True, 'import numpy as np\n'), ((2688, 2704), 'numpy.unique', 'np.unique', (['edges'], {}), '(edges)\n', (2697, 2704), True, 'import numpy as np\n'), ((2716, 2728), 'numpy.any', 'np.any', (['gaps'], {}), '(gaps)\n', (2722, 2728), True, 'import numpy as np\n'), ((2963, 2972), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2970, 2972), True, 'import matplotlib.pyplot as plt\n'), ((4159, 4168), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4166, 4168), True, 'import matplotlib.pyplot as plt\n'), ((5312, 5321), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5319, 5321), True, 'import matplotlib.pyplot as plt\n'), ((7104, 7149), 'matplotlib.pyplot.figure', 'mplot.pyplot.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (7123, 7149), True, 'import matplotlib as mplot\n'), ((7415, 7428), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7425, 7428), True, 'import numpy as np\n'), ((7430, 7443), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (7440, 7443), True, 'import numpy as np\n'), ((7445, 7458), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (7455, 7458), True, 'import numpy as np\n'), ((7903, 7927), 'numpy.zeros', 'np.zeros', (['z.shape', 'float'], {}), '(z.shape, float)\n', (7911, 7927), True, 'import numpy as np\n'), ((8279, 8288), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8286, 8288), True, 'import matplotlib.pyplot as plt\n'), ((8708, 8723), 'numpy.all', 'np.all', (['inrange'], {}), '(inrange)\n', (8714, 8723), True, 'import numpy as np\n'), ((8737, 8752), 'numpy.sum', 'np.sum', (['inrange'], {}), '(inrange)\n', (8743, 8752), True, 'import numpy as np\n'), ((8769, 8790), 'numpy.interp', 'np.interp', (['xlim', 'x', 'y'], {}), '(xlim, x, y)\n', (8778, 8790), True, 'import numpy as np\n'), ((8809, 8832), 'numpy.interp', 'np.interp', (['xlim', 'x', 'ylo'], {}), '(xlim, x, ylo)\n', (8818, 8832), True, 'import numpy as np\n'), ((8851, 8874), 'numpy.interp', 'np.interp', (['xlim', 'x', 'yhi'], {}), '(xlim, x, yhi)\n', (8860, 8874), True, 'import numpy as np\n'), ((8887, 8922), 'numpy.insert', 'np.insert', (['x[inrange]', '[0, n]', 'xlim'], {}), '(x[inrange], [0, n], xlim)\n', (8896, 8922), True, 'import numpy as np\n'), ((8935, 8971), 'numpy.insert', 'np.insert', (['y[inrange]', '[0, n]', 'yends'], {}), '(y[inrange], [0, n], yends)\n', (8944, 8971), True, 'import numpy as np\n'), ((8986, 9026), 'numpy.insert', 'np.insert', (['ylo[inrange]', '[0, n]', 'yloends'], {}), '(ylo[inrange], [0, n], yloends)\n', (8995, 9026), True, 'import numpy as np\n'), ((9041, 9081), 'numpy.insert', 'np.insert', (['yhi[inrange]', '[0, n]', 'yhiends'], {}), '(yhi[inrange], [0, n], yhiends)\n', (9050, 9081), True, 'import numpy as np\n'), ((11137, 11148), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (11143, 11148), True, 'import numpy as np\n'), ((11164, 11175), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (11170, 11175), True, 'import numpy as np\n'), ((11188, 11198), 'numpy.cos', 'np.cos', (['ra'], {}), '(ra)\n', (11194, 11198), True, 'import numpy as np\n'), ((11211, 11221), 'numpy.sin', 'np.sin', (['ra'], {}), '(ra)\n', (11217, 11221), True, 'import numpy as np\n'), ((11496, 11570), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['xx', 'yy', 'zz'], {'color': '(0.7, 0.7, 0.7)', 'line_width': '(0.5)', 'figure': 'fig'}), '(xx, yy, zz, color=(0.7, 0.7, 0.7), line_width=0.5, figure=fig)\n', (11507, 11570), False, 'from mayavi import mlab\n'), ((11656, 11668), 'numpy.max', 'np.max', (['dist'], {}), '(dist)\n', (11662, 11668), True, 'import numpy as np\n'), ((12195, 12223), 'mayavi.mlab.view', 'mlab.view', (['*view'], {'figure': 'fig'}), '(*view, figure=fig)\n', (12204, 12223), False, 'from mayavi import mlab\n'), ((12363, 12389), 'numpy.cos', 'np.cos', (['(az * np.pi / 180.0)'], {}), '(az * np.pi / 180.0)\n', (12369, 12389), True, 'import numpy as np\n'), ((12404, 12430), 'numpy.sin', 'np.sin', (['(az * np.pi / 180.0)'], {}), '(az * np.pi / 180.0)\n', (12410, 12430), True, 'import numpy as np\n'), ((12441, 12467), 'numpy.cos', 'np.cos', (['(el * np.pi / 180.0)'], {}), '(el * np.pi / 180.0)\n', (12447, 12467), True, 'import numpy as np\n'), ((12654, 12678), 'numpy.sqrt', 'np.sqrt', (['(1.0 - xoff ** 2)'], {}), '(1.0 - xoff ** 2)\n', (12661, 12678), True, 'import numpy as np\n'), ((12988, 13084), 'mayavi.mlab.text3d', 'mlab.text3d', (['(xx + xoff)', '(yy + yoff)', '(zz + zoff)', 'label'], {'figure': 'fig', 'color': '(1, 1, 1)', 'scale': 'size'}), '(xx + xoff, yy + yoff, zz + zoff, label, figure=fig, color=(1, 1,\n 1), scale=size)\n', (12999, 13084), False, 'from mayavi import mlab\n'), ((13290, 13310), 'numpy.insert', 'np.insert', (['a', '(0)', '(0.0)'], {}), '(a, 0, 0.0)\n', (13299, 13310), True, 'import numpy as np\n'), ((13796, 13824), 'mayavi.mlab.view', 'mlab.view', (['*view'], {'figure': 'fig'}), '(*view, figure=fig)\n', (13805, 13824), False, 'from mayavi import mlab\n'), ((2167, 2187), 'numpy.log10', 'np.log10', (['(x + errpos)'], {}), '(x + errpos)\n', (2175, 2187), True, 'import numpy as np\n'), ((3136, 3147), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (3144, 3147), True, 'import numpy as np\n'), ((3149, 3160), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (3157, 3160), True, 'import numpy as np\n'), ((3452, 3470), 'numpy.insert', 'np.insert', (['d', '(0)', '(0)'], {}), '(d, 0, 0)\n', (3461, 3470), True, 'import numpy as np\n'), ((3628, 3649), 'numpy.interp', 'np.interp', (['xlbl', 'x', 'y'], {}), '(xlbl, x, y)\n', (3637, 3649), True, 'import numpy as np\n'), ((5024, 5038), 'numpy.log10', 'np.log10', (['xlim'], {}), '(xlim)\n', (5032, 5038), True, 'import numpy as np\n'), ((5083, 5097), 'numpy.log10', 'np.log10', (['ylim'], {}), '(ylim)\n', (5091, 5097), True, 'import numpy as np\n'), ((5620, 5631), 'numpy.log10', 'np.log10', (['l'], {}), '(l)\n', (5628, 5631), True, 'import numpy as np\n'), ((7719, 7747), 'numpy.allclose', 'np.allclose', (['dx', 'dx[0]', '(0.01)'], {}), '(dx, dx[0], 0.01)\n', (7730, 7747), True, 'import numpy as np\n'), ((7755, 7783), 'numpy.allclose', 'np.allclose', (['dy', 'dy[0]', '(0.01)'], {}), '(dy, dy[0], 0.01)\n', (7766, 7783), True, 'import numpy as np\n'), ((10847, 10861), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (10858, 10861), True, 'import numpy as np\n'), ((10826, 10843), 'numpy.array', 'np.array', (['([v] * n)'], {}), '([v] * n)\n', (10834, 10843), True, 'import numpy as np\n'), ((12617, 12643), 'numpy.sqrt', 'np.sqrt', (['(xc ** 2 + yc ** 2)'], {}), '(xc ** 2 + yc ** 2)\n', (12624, 12643), True, 'import numpy as np\n'), ((13207, 13218), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (13213, 13218), True, 'import numpy as np\n'), ((13233, 13242), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (13239, 13242), True, 'import numpy as np\n'), ((13246, 13255), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (13252, 13255), True, 'import numpy as np\n'), ((2769, 2785), 'numpy.nonzero', 'np.nonzero', (['gaps'], {}), '(gaps)\n', (2779, 2785), True, 'import numpy as np\n'), ((3517, 3539), 'numpy.interp', 'np.interp', (['xfrac', 'f', 'a'], {}), '(xfrac, f, a)\n', (3526, 3539), True, 'import numpy as np\n'), ((1794, 1808), 'numpy.abs', 'np.abs', (['errneg'], {}), '(errneg)\n', (1800, 1808), True, 'import numpy as np\n'), ((2085, 2099), 'numpy.abs', 'np.abs', (['errneg'], {}), '(errneg)\n', (2091, 2099), True, 'import numpy as np\n'), ((3404, 3414), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (3411, 3414), True, 'import numpy as np\n'), ((3420, 3430), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3427, 3430), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
"""
script for calculating gc skew
<NAME>
<EMAIL>
"""
# python modules
import os
import sys
import argparse
import numpy as np
from scipy import signal
from itertools import cycle, product
# plotting modules
from matplotlib import use as mplUse
mplUse('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
plt.rcParams['pdf.fonttype'] = 42
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# ctb
from ctbBio.fasta import iterate_fasta as parse_fasta
def plot_two(title, subtitle, A, B, labels, legend, vert = False):
"""
plot with differnt y axes
title = title for chart
A = data for left axis [[x], [y]]
B = data for right axis
lables = [left label, right label, x label]
legend = [[left legend], [right legend]]
"""
fig, ax1 = plt.subplots()
colors = ['0.75', 'b', 'r', 'c', 'y', 'm', 'k', 'g']
a_colors = cycle(colors)
b_colors = cycle(colors[::-1])
a_label = cycle(legend[0])
b_label = cycle(legend[1])
# plot left axis and x - axis
for a in A:
x, y = a
ax1.set_ylabel(labels[0], labelpad = 3)
ax1.set_xlabel(labels[-1])
ax1.plot(x, y, c = next(a_colors), marker = 'o', ms = 4, label = next(a_label))
# add vertical lines
if vert is not False:
for i in vert:
x, c = i
ax1.axvline(x = x, c = c, label = next(a_label), linewidth = 2)
# plot right axis
ax2 = ax1.twinx()
for b in B:
x, y = b
ax2.set_ylabel(labels[1], labelpad = 8)
ax2.plot(x, y, c = next(b_colors), linewidth = 2, label = next(b_label))
xmin = min([min(i[1]) for i in A] + [min(i[0]) for i in B])
xmax = max([max(i[0]) for i in A] + [max(i[0]) for i in B])
ax2.set_xlim(xmin, xmax)
# title
plt.suptitle(title, fontsize = 16)
plt.title(subtitle, fontsize = 10)
# legend
ax1.legend(loc = 'upper left', \
bbox_to_anchor=(0.55, -0.125), \
prop = {'size':8}, \
framealpha = 0.0
)
plt.legend(loc = 'upper right', \
bbox_to_anchor=(0.45, -0.125), \
prop = {'size':8}, \
framealpha = 0.0\
)
# save
pdf = PdfPages('%s.pdf' % title.replace(' ', '_'))
pdf.savefig(bbox_inches = 'tight')
plt.close()
pdf.close()
def check_peaks(peaks, length):
"""
select pair of min and max that are not too close or
too far apart and have greatest y distance between one another
"""
# if ori/ter peaks are too close or too far apart, they are probably wrong
closest, farthest = int(length * float(0.45)), int(length * float(0.55))
pairs = []
for pair in list(product(*peaks)):
### added this to make sure gets origin and ter right
tr, pk = sorted(list(pair), key = lambda x: x[1], reverse = False) # trough and peak
a = (tr[0] - pk[0]) % length
b = (pk[0] - tr[0]) % length
pt = abs(tr[1] - pk[1]) # distance between values
if (a <= farthest and a >= closest) or (b <=farthest and b >= closest):
pairs.append([pt, tr, pk])
if len(pairs) == 0:
return [False, False]
pt, tr, pk = sorted(pairs, reverse = True)[0]
return [tr[0], pk[0]]
def find_ori_ter(c_skew, length):
"""
find origin and terminus of replication based on
cumulative GC Skew
"""
# find origin and terminus of replication based on
# cumulative gc skew min and max peaks
c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist()
c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist()
# return False if no peaks were detected
if len(c_skew_min) == 0 or len(c_skew_min) == 0:
return [False, False]
else:
c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min]
c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max]
ori, ter = check_peaks([c_skew_min, c_skew_max], length)
return ori, ter
def gc_skew(name, length, seq, window, slide, plot_skew):
"""
calculate gc skew and cumulative sum of gc skew over sequence windows
gc skew = ((G - C) / (G + C)) * window size * genome length
"""
# convert to G - C
replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0}
gmc = [] # G - C
for base in seq:
try:
gmc.append(replacements[base])
except:
gmc.append(0)
# convert to G + C
gpc = [abs(i) for i in gmc] # G + C
# calculate sliding windows for (G - C) and (G + C)
weights = np.ones(window)/window
gmc = [[i, c] for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())]
gpc = [[i, c] for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())]
# calculate gc skew and cummulative gc skew sum
skew = [[], []] # x and y for gc skew
c_skew = [[], []] # x and y for gc skew cummulative sums
cs = 0 # cummulative sum
# select windows to use based on slide
for i, m in gmc[0::slide]:
p = gpc[i][1]
if p == 0:
gcs = 0
else:
gcs = m/p
cs += gcs
skew[0].append(i)
c_skew[0].append(i)
skew[1].append(gcs)
c_skew[1].append(cs)
ori, ter = find_ori_ter(c_skew, length)
# plot data
if plot_skew is True:
title = '%s GC Skew' % (name)
subtitle = '(window = %s, slide = %s)' % (window, slide)
labels = ['GC Skew', 'Cumulative GC Skew', 'Position on Genome (bp)']
# remove some points for plotting (approx. 1,000 datapoints)
N = int(len(skew[0])/1000)
if N != 0:
skew = [skew[0][0::N], skew[1][0::N]]
if ori is False:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0]], [labels[1]]])
else:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \
'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \
vert = [(ori, 'r'), (ter, 'b')])
return ori, ter, skew, c_skew
def parse_genomes(fastas, single):
"""
generator for parsing fastas
if single is True, combine sequences in multifasta file
"""
if single is True:
for genome in fastas:
sequence = []
for seq in parse_fasta(genome):
sequence.extend(list(seq[1].upper()))
yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence)
else:
for genome in fastas:
for seq in parse_fasta(genome):
ID = seq[0].split('>', 1)[1].split()[0]
yield (ID, len(seq[1]), list(seq[1].upper()))
def open_files(files):
"""
open files in list, use stdin if first
item in list is '-'
"""
if files is None:
return files
if files[0] == '-':
return (sys.stdin)
return (open(i) for i in files)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = \
'# calculate gc skew and find Ori and Ter of replication')
parser.add_argument(\
'-f', nargs = '*', action = 'store', required = True, \
help = 'fasta(s)')
parser.add_argument(\
'-l', default = False, type = int, \
help = 'minimum contig length (default = 10 x window)')
parser.add_argument(\
'-w', default = 1000, type = int, \
help = 'window length (default = 1000)')
parser.add_argument(\
'-s', default = 10, type = int, \
help = 'slide length (default = 10)')
parser.add_argument(\
'--single', action = 'store_true', \
help = 'combine multi-fasta sequences into single genome')
parser.add_argument(\
'--no-plot', action = 'store_false', \
help = 'do not generate plots, print GC Skew to stdout')
args = vars(parser.parse_args())
fastas = open_files(args['f'])
single, plot_skew = args['single'], args['no_plot']
window, slide = args['w'], args['s']
min_len = args['l']
if min_len is False:
min_len = 10 * window
for name, length, seq in parse_genomes(fastas, single):
if length < min_len:
print('%s: Too Short' % (name), file=sys.stderr)
continue
ori, ter, skew, c_skew = gc_skew(name, length, seq, window, slide, plot_skew)
if ori == False:
ori, ter = 'n/a', 'n/a'
else:
ori, ter = '{:,}'.format(ori), '{:,}'.format(ter)
print('%s -> Origin: %s Terminus: %s' \
% (name, ori, ter), file=sys.stderr)
if plot_skew is False:
print('\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC Skew']))
for i, pos in enumerate(skew[0]):
out = [name, pos, skew[1][i], c_skew[1][i]]
print('\t'.join([str(i) for i in out]))
|
[
"ctbBio.fasta.iterate_fasta",
"itertools.cycle",
"numpy.ones",
"argparse.ArgumentParser",
"matplotlib.use",
"itertools.product",
"numpy.asarray",
"scipy.signal.fftconvolve",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.close",
"matplotlib.rc",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] |
[((272, 285), 'matplotlib.use', 'mplUse', (['"""Agg"""'], {}), "('Agg')\n", (278, 285), True, 'from matplotlib import use as mplUse\n'), ((431, 498), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n", (433, 498), False, 'from matplotlib import rc\n'), ((872, 886), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (884, 886), True, 'import matplotlib.pyplot as plt\n'), ((959, 972), 'itertools.cycle', 'cycle', (['colors'], {}), '(colors)\n', (964, 972), False, 'from itertools import cycle, product\n'), ((988, 1007), 'itertools.cycle', 'cycle', (['colors[::-1]'], {}), '(colors[::-1])\n', (993, 1007), False, 'from itertools import cycle, product\n'), ((1022, 1038), 'itertools.cycle', 'cycle', (['legend[0]'], {}), '(legend[0])\n', (1027, 1038), False, 'from itertools import cycle, product\n'), ((1053, 1069), 'itertools.cycle', 'cycle', (['legend[1]'], {}), '(legend[1])\n', (1058, 1069), False, 'from itertools import cycle, product\n'), ((1858, 1890), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': '(16)'}), '(title, fontsize=16)\n', (1870, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1929), 'matplotlib.pyplot.title', 'plt.title', (['subtitle'], {'fontsize': '(10)'}), '(subtitle, fontsize=10)\n', (1906, 1929), True, 'import matplotlib.pyplot as plt\n'), ((2118, 2217), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'bbox_to_anchor': '(0.45, -0.125)', 'prop': "{'size': 8}", 'framealpha': '(0.0)'}), "(loc='upper right', bbox_to_anchor=(0.45, -0.125), prop={'size': \n 8}, framealpha=0.0)\n", (2128, 2217), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2405), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2403, 2405), True, 'import matplotlib.pyplot as plt\n'), ((7128, 7227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""# calculate gc skew and find Ori and Ter of replication"""'}), "(description=\n '# calculate gc skew and find Ori and Ter of replication')\n", (7151, 7227), False, 'import argparse\n'), ((2787, 2802), 'itertools.product', 'product', (['*peaks'], {}), '(*peaks)\n', (2794, 2802), False, 'from itertools import cycle, product\n'), ((4690, 4705), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (4697, 4705), True, 'import numpy as np\n'), ((6496, 6515), 'ctbBio.fasta.iterate_fasta', 'parse_fasta', (['genome'], {}), '(genome)\n', (6507, 6515), True, 'from ctbBio.fasta import iterate_fasta as parse_fasta\n'), ((6710, 6729), 'ctbBio.fasta.iterate_fasta', 'parse_fasta', (['genome'], {}), '(genome)\n', (6721, 6729), True, 'from ctbBio.fasta import iterate_fasta as parse_fasta\n'), ((3606, 3627), 'numpy.asarray', 'np.asarray', (['c_skew[1]'], {}), '(c_skew[1])\n', (3616, 3627), True, 'import numpy as np\n'), ((3699, 3720), 'numpy.asarray', 'np.asarray', (['c_skew[1]'], {}), '(c_skew[1])\n', (3709, 3720), True, 'import numpy as np\n'), ((4753, 4793), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['gmc', 'weights', '"""same"""'], {}), "(gmc, weights, 'same')\n", (4771, 4793), False, 'from scipy import signal\n'), ((4845, 4885), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['gpc', 'weights', '"""same"""'], {}), "(gpc, weights, 'same')\n", (4863, 4885), False, 'from scipy import signal\n')]
|
# <NAME> (<EMAIL>)
# April 2018
import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(BASE_DIR, '..'))
from datasets import *
from generate_outputs import *
from scipy.optimize import linear_sum_assignment
#import matplotlib.pyplot as plt
import numpy as np
def compute_all_keypoints(sess, net, data):
P = data.point_clouds
assert(P.shape[0] == data.n_data)
assert(P.shape[1] == data.n_points)
KP = data.keypoints
assert(KP.shape[0] == data.n_data)
assert(KP.shape[1] == data.n_labels)
A = predict_A(P, sess, net)
assert(A.shape[0] == data.n_data)
assert(A.shape[1] == data.n_points)
assert(A.shape[2] == net.K)
pred_KP = np.argmax(A, axis=1)
return P, KP, pred_KP
def evaluate_PCK(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
# NOTE:
# Skip if the keypoint does not exist.
labels = [i for i in range(n_labels) if KP[k,i] >= 0]
# Find the closest prediction (w/o matching).
for i, label in enumerate(labels):
all_dists = np.zeros(K)
idx_i = KP[k,label]
assert(idx_i < n_points)
p_i = P[k,idx_i]
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[j] = np.linalg.norm(p_i - p_j)
j = np.argmin(all_dists)
dists_info.append((k, i, j, all_dists[j]))
dists_info = np.array(dists_info)
return dists_info
def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# Find the best mapping from labels to bases.
all_dists = np.zeros((n_data, n_labels, K))
label_counts = np.zeros(n_labels)
for k in range(n_data):
for i in range(n_labels):
# NOTE:
# Skip if the keypoint does not exist.
if KP[k,i] < 0: continue
idx_i = KP[k,i]
assert(idx_i < n_points)
p_i = P[k,idx_i]
label_counts[i] += 1.
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[k,i,j] += np.linalg.norm(p_i - p_j)
mean_dists = np.sum(all_dists, axis=0) / \
np.expand_dims(label_counts, axis=-1)
row_ind, col_ind = linear_sum_assignment(mean_dists)
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
for (i, j) in zip(row_ind, col_ind):
if KP[k,i] < 0: continue
dists_info.append((k, i, j, all_dists[k,i,j]))
dists_info = np.array(dists_info)
return dists_info
def save_results(dists_info, out_dir, postfix=None):
# dists_info: (point_cloud_index, label, basis_index, distance)
dists = dists_info[:,3]
if postfix is not None:
out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix))
else:
out_file = os.path.join(out_dir, 'distances.npy')
np.save(out_file, dists)
print("Saved '{}'.".format(out_file))
'''
# Draw plot.
n_matches = dists.size
x_list = np.linspace(0.0, 0.1, 20 + 1)
counts = np.zeros(x_list.size, dtype=int)
for i in range(x_list.size):
counts[i] = np.sum(dists <= x_list[i])
y_list = counts.astype(x_list.dtype) / float(n_matches)
plt.clf()
plt.plot(x_list, y_list)
plt.ylim(0., 1.)
plt.yticks(np.linspace(0., 1., 10 + 1))
if postfix is not None:
out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix))
else:
out_file = os.path.join(out_dir, 'pck.png')
plt.savefig(out_file)
print("Saved '{}'.".format(out_file))
'''
def evaluate(sess, net, data, out_dir):
if not os.path.exists(out_dir): os.makedirs(out_dir)
P, KP, pred_KP = compute_all_keypoints(sess, net, data)
dists = evaluate_PCK(P, KP, pred_KP)
save_results(dists, out_dir)
dists_after_matching = evaluate_PCK_after_label_basis_matching(
P, KP, pred_KP)
save_results(dists_after_matching, out_dir, postfix='after_matching')
|
[
"os.path.exists",
"scipy.optimize.linear_sum_assignment",
"os.makedirs",
"os.path.join",
"numpy.argmax",
"numpy.linalg.norm",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.expand_dims",
"numpy.argmin",
"os.path.abspath",
"numpy.save"
] |
[((159, 187), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""'], {}), "(BASE_DIR, '..')\n", (171, 187), False, 'import os, sys\n'), ((758, 778), 'numpy.argmax', 'np.argmax', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (767, 778), True, 'import numpy as np\n'), ((1732, 1752), 'numpy.array', 'np.array', (['dists_info'], {}), '(dists_info)\n', (1740, 1752), True, 'import numpy as np\n'), ((2008, 2039), 'numpy.zeros', 'np.zeros', (['(n_data, n_labels, K)'], {}), '((n_data, n_labels, K))\n', (2016, 2039), True, 'import numpy as np\n'), ((2059, 2077), 'numpy.zeros', 'np.zeros', (['n_labels'], {}), '(n_labels)\n', (2067, 2077), True, 'import numpy as np\n'), ((2707, 2740), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['mean_dists'], {}), '(mean_dists)\n', (2728, 2740), False, 'from scipy.optimize import linear_sum_assignment\n'), ((3019, 3039), 'numpy.array', 'np.array', (['dists_info'], {}), '(dists_info)\n', (3027, 3039), True, 'import numpy as np\n'), ((3393, 3417), 'numpy.save', 'np.save', (['out_file', 'dists'], {}), '(out_file, dists)\n', (3400, 3417), True, 'import numpy as np\n'), ((2604, 2629), 'numpy.sum', 'np.sum', (['all_dists'], {'axis': '(0)'}), '(all_dists, axis=0)\n', (2610, 2629), True, 'import numpy as np\n'), ((2646, 2683), 'numpy.expand_dims', 'np.expand_dims', (['label_counts'], {'axis': '(-1)'}), '(label_counts, axis=-1)\n', (2660, 2683), True, 'import numpy as np\n'), ((3349, 3387), 'os.path.join', 'os.path.join', (['out_dir', '"""distances.npy"""'], {}), "(out_dir, 'distances.npy')\n", (3361, 3387), False, 'import os, sys\n'), ((4146, 4169), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (4160, 4169), False, 'import os, sys\n'), ((4171, 4191), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4182, 4191), False, 'import os, sys\n'), ((114, 139), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (129, 139), False, 'import os, sys\n'), ((1309, 1320), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (1317, 1320), True, 'import numpy as np\n'), ((1638, 1658), 'numpy.argmin', 'np.argmin', (['all_dists'], {}), '(all_dists)\n', (1647, 1658), True, 'import numpy as np\n'), ((1595, 1620), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_i - p_j)'], {}), '(p_i - p_j)\n', (1609, 1620), True, 'import numpy as np\n'), ((2560, 2585), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_i - p_j)'], {}), '(p_i - p_j)\n', (2574, 2585), True, 'import numpy as np\n')]
|
from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline
from numpy import random, array, median, zeros, arange, hstack
from win32com.client import Dispatch
import math
myName = 'R_runmed_spline'
useMAD = True # use median absolute deviations instead of sum of squared residues
# -----------------------------------------------------------------------
def R_runmed_spline_MAIN(ARG3, Controller):
pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName]
# ARG3
x = ARG3[0][0]
y = ARG3[0][1]
sc = Dispatch("StatConnectorSrv.StatConnector")
sc.Init("R")
# get the best smoothing parameter
bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars)
# get the prediction error for this smoothing parameter
bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars)
# compare with original SSE
# is fit successful?
# return isSuccessfulFit, yFit, yEval, runMedData
SSE = sum(y ** 2)
MAD = 1.4826 * median(abs(y))
if useMAD:
SSE = MAD
if bestPredErr < SSE:
isSuccessfulFit = True
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars)
yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars)
#
ppmArrs[ind] = [yFit, yEval]
else:
isSuccessfulFit = False
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit = zeros(len(x), 'd')
yEval = zeros(len(xEval), 'd')
#
ppmArrs[ind] = [yFit, yEval]
sc.Close()
return isSuccessfulFit, bestPredErr, ppmArrs
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars):
sparRange = array([float(i) for i in pars['spar range'].split(',')])
sparStepsNum = int(pars['spar steps number'])
sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum, 5)
sparSet = arange(sparRange[0], sparRange[1], sparStep)
predErrSet = zeros(len(sparSet), 'd')
for i in range(len(sparSet)):
predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars)
predErrSet[i] = predErr
## p(zip(sparSet, predErrSet))
spar = sparSet[predErrSet == min(predErrSet)][-1] # take the last one (smoothest) if there are few
## print('spar ', spar)
return spar
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_predErr(x, y, **kwargs):
"""
just returns the prediction error
"""
K = int(kwargs['K'])
# --Related to K-fold CV---------------------------
L = len(x)
N = L / K ##min length of pieces
W = list(range(L))
Z = list(range(1, K + 1))
Z = [N for j in Z]
R = L % K
Z[0:R] = [j + 1 for j in Z[0:R]] # length of the pieces
random.shuffle(W)
ind = 0
predErr = 0
allResiduals = array([])
SSE = sum(y ** 2) # VLAD. Why do I need this???
# ---running through K training/testings-------------
for val in Z:
j = math.floor(val)
# ---making training/testing subsets-------------
test = W[ind:ind + j]
test.sort()
train = W[0:ind] + W[ind + j:]
train.sort()
ind += j
# -----------------------------------------------
# ---fit runmed_spline here----------------------
yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs)
residualsTest = y[test] - yFit
predErr += sum(residualsTest ** 2)
allResiduals = hstack((allResiduals, residualsTest))
# -----------------------------------------------
if useMAD:
predErr = 1.4826 * median(abs(allResiduals))
return predErr
# -----------------------------------------------------------------------
if __name__ == '__main__':
from numpy import linspace, cos, lexsort, zeros, sin
from pylab import plot, show, subplot, savefig, clf, ylim
from pprint import pprint as p
from time import clock as c
x1 = linspace(0, 30, 300)
## y1 = cos(x1)
## y1 = zeros(len(x1),'d') #nice test
y1 = x1 * 0.03
y1 += random.normal(scale=0.2, size=y1.shape)
ind = lexsort(keys=(y1, x1))
x1 = x1[ind]
y1 = y1[ind]
t1 = c()
isSuccessfulFit, yFit, yEval, runMedData, predErr = \
R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1])
t2 = c()
print('done in %s seconds' % (t2 - t1))
subplot(211)
plot(x1, y1, 'bo')
plot(runMedData[0], runMedData[1], 'y^')
plot(x1, yEval, 'r+-')
ylim([-1.5, +1.5])
subplot(212)
plot(x1, y1 - yEval, 'go')
ylim([-1.5, +1.5])
show()
|
[
"numpy.random.normal",
"pylab.ylim",
"win32com.client.Dispatch",
"time.clock",
"pylab.subplot",
"math.floor",
"pylab.plot",
"pylab.show",
"numpy.hstack",
"aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline",
"numpy.array",
"numpy.linspace",
"numpy.lexsort",
"numpy.arange",
"numpy.random.shuffle"
] |
[((619, 661), 'win32com.client.Dispatch', 'Dispatch', (['"""StatConnectorSrv.StatConnector"""'], {}), "('StatConnectorSrv.StatConnector')\n", (627, 661), False, 'from win32com.client import Dispatch\n'), ((2552, 2596), 'numpy.arange', 'arange', (['sparRange[0]', 'sparRange[1]', 'sparStep'], {}), '(sparRange[0], sparRange[1], sparStep)\n', (2558, 2596), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((3556, 3573), 'numpy.random.shuffle', 'random.shuffle', (['W'], {}), '(W)\n', (3570, 3573), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((3624, 3633), 'numpy.array', 'array', (['[]'], {}), '([])\n', (3629, 3633), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((4806, 4826), 'numpy.linspace', 'linspace', (['(0)', '(30)', '(300)'], {}), '(0, 30, 300)\n', (4814, 4826), False, 'from numpy import linspace, cos, lexsort, zeros, sin\n'), ((4930, 4969), 'numpy.random.normal', 'random.normal', ([], {'scale': '(0.2)', 'size': 'y1.shape'}), '(scale=0.2, size=y1.shape)\n', (4943, 4969), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((4981, 5003), 'numpy.lexsort', 'lexsort', ([], {'keys': '(y1, x1)'}), '(keys=(y1, x1))\n', (4988, 5003), False, 'from numpy import linspace, cos, lexsort, zeros, sin\n'), ((5052, 5055), 'time.clock', 'c', ([], {}), '()\n', (5053, 5055), True, 'from time import clock as c\n'), ((5217, 5220), 'time.clock', 'c', ([], {}), '()\n', (5218, 5220), True, 'from time import clock as c\n'), ((5273, 5285), 'pylab.subplot', 'subplot', (['(211)'], {}), '(211)\n', (5280, 5285), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5291, 5309), 'pylab.plot', 'plot', (['x1', 'y1', '"""bo"""'], {}), "(x1, y1, 'bo')\n", (5295, 5309), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5315, 5355), 'pylab.plot', 'plot', (['runMedData[0]', 'runMedData[1]', '"""y^"""'], {}), "(runMedData[0], runMedData[1], 'y^')\n", (5319, 5355), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5361, 5383), 'pylab.plot', 'plot', (['x1', 'yEval', '"""r+-"""'], {}), "(x1, yEval, 'r+-')\n", (5365, 5383), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5389, 5407), 'pylab.ylim', 'ylim', (['[-1.5, +1.5]'], {}), '([-1.5, +1.5])\n', (5393, 5407), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5413, 5425), 'pylab.subplot', 'subplot', (['(212)'], {}), '(212)\n', (5420, 5425), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5431, 5457), 'pylab.plot', 'plot', (['x1', '(y1 - yEval)', '"""go"""'], {}), "(x1, y1 - yEval, 'go')\n", (5435, 5457), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5463, 5481), 'pylab.ylim', 'ylim', (['[-1.5, +1.5]'], {}), '([-1.5, +1.5])\n', (5467, 5481), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((5487, 5493), 'pylab.show', 'show', ([], {}), '()\n', (5491, 5493), False, 'from pylab import plot, show, subplot, savefig, clf, ylim\n'), ((3779, 3794), 'math.floor', 'math.floor', (['val'], {}), '(val)\n', (3789, 3794), False, 'import math\n'), ((4132, 4193), 'aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline', 'R_runmed_smooth_spline', (['x[train]', 'y[train]', 'x[test]'], {}), '(x[train], y[train], x[test], **kwargs)\n', (4154, 4193), False, 'from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline\n'), ((4302, 4339), 'numpy.hstack', 'hstack', (['(allResiduals, residualsTest)'], {}), '((allResiduals, residualsTest))\n', (4308, 4339), False, 'from numpy import random, array, median, zeros, arange, hstack\n'), ((1446, 1507), 'aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline', 'R_runmed_smooth_spline', (['x', 'y', 'x'], {'spar': 'bestSpar', 'sc': 'sc'}), '(x, y, x, spar=bestSpar, sc=sc, **pars)\n', (1468, 1507), False, 'from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline\n'), ((1541, 1606), 'aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit.R_runmed_smooth_spline', 'R_runmed_smooth_spline', (['x', 'y', 'xEval'], {'spar': 'bestSpar', 'sc': 'sc'}), '(x, y, xEval, spar=bestSpar, sc=sc, **pars)\n', (1563, 1606), False, 'from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline\n')]
|
# -*- coding: utf-8 -*-
import pickle
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem,DataStructs
def get_classes(path):
f = open(path, 'rb')
dict_ = pickle.load(f)
f.close()
classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True)
classes = [(x,y) for x,y in classes]
return classes
def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True):
# Similar as the above function but takes smiles separately and returns pfp and rfp separately
rsmi = rsmi.encode('utf-8')
psmi = psmi.encode('utf-8')
try:
mol = Chem.MolFromSmiles(rsmi)
except Exception as e:
print(e)
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(rxnfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build reactant fp due to {}".format(e))
return
rfp = fp
try:
mol = Chem.MolFromSmiles(psmi)
except Exception as e:
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(pfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build product fp due to {}".format(e))
return
pfp = fp
rxn_fp = pfp - rfp
final_fp = np.concatenate((pfp, rxn_fp))
return final_fp
|
[
"pickle.load",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"rdkit.Chem.DataStructs.ConvertToNumpyArray",
"numpy.empty",
"numpy.concatenate"
] |
[((196, 210), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (207, 210), False, 'import pickle\n'), ((1698, 1727), 'numpy.concatenate', 'np.concatenate', (['(pfp, rxn_fp)'], {}), '((pfp, rxn_fp))\n', (1712, 1727), True, 'import numpy as np\n'), ((694, 718), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['rsmi'], {}), '(rsmi)\n', (712, 718), False, 'from rdkit import Chem\n'), ((809, 938), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', ([], {'mol': 'mol', 'radius': '(2)', 'nBits': 'rxnfpsize', 'useFeatures': 'useFeatures', 'useChirality': 'useChirality'}), '(mol=mol, radius=2, nBits=rxnfpsize,\n useFeatures=useFeatures, useChirality=useChirality)\n', (846, 938), False, 'from rdkit.Chem import AllChem, DataStructs\n'), ((963, 999), 'numpy.empty', 'np.empty', (['rxnfpsize'], {'dtype': '"""float32"""'}), "(rxnfpsize, dtype='float32')\n", (971, 999), True, 'import numpy as np\n'), ((1009, 1052), 'rdkit.Chem.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['fp_bit', 'fp'], {}), '(fp_bit, fp)\n', (1040, 1052), False, 'from rdkit.Chem import AllChem, DataStructs\n'), ((1201, 1225), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['psmi'], {}), '(psmi)\n', (1219, 1225), False, 'from rdkit import Chem\n'), ((1298, 1425), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', ([], {'mol': 'mol', 'radius': '(2)', 'nBits': 'pfpsize', 'useFeatures': 'useFeatures', 'useChirality': 'useChirality'}), '(mol=mol, radius=2, nBits=pfpsize,\n useFeatures=useFeatures, useChirality=useChirality)\n', (1335, 1425), False, 'from rdkit.Chem import AllChem, DataStructs\n'), ((1450, 1484), 'numpy.empty', 'np.empty', (['pfpsize'], {'dtype': '"""float32"""'}), "(pfpsize, dtype='float32')\n", (1458, 1484), True, 'import numpy as np\n'), ((1494, 1537), 'rdkit.Chem.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['fp_bit', 'fp'], {}), '(fp_bit, fp)\n', (1525, 1537), False, 'from rdkit.Chem import AllChem, DataStructs\n')]
|
"""Core experiments for the dependency label prediction task."""
import collections
import copy
import logging
from typing import (Any, Dict, Iterator, Optional, Sequence, Set, Tuple, Type,
Union)
from ldp import datasets, learning
from ldp.models import probes, projections
from ldp.parse import ptb
from ldp.parse import representations as reps
from ldp.utils.typing import Device
import numpy
import torch
import wandb
UNK = 'unk'
class DLPIndexer:
"""Map pairs of words to their syntactic relationship, if any."""
def __init__(self, samples: Sequence[ptb.Sample], unk: str = UNK):
"""Map each relation label to an integer.
Args:
samples (Sequence[ptb.Sample]): The samples from which to determine
possible relations.
unk (str): Label to use when un-indexed dependency label is
encountered.
"""
labels = {rel for sample in samples for rel in sample.relations}
self.indexer = {unk: 0}
for label in sorted(labels):
self.indexer[label] = len(self.indexer)
self.unk = unk
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads, relations = sample.heads, sample.relations
labels = torch.empty(len(heads), len(heads), dtype=torch.long)
labels.fill_(self.indexer[self.unk])
for word, (head, rel) in enumerate(zip(heads, relations)):
if head == -1:
labels[word, word] = self.indexer[rel]
else:
label = self.indexer.get(rel, self.indexer[self.unk])
labels[word, head] = label
return labels
def __len__(self) -> int:
"""Return the number of unique labels for this task."""
return len(self.indexer)
class ControlDLPIndexer:
"""Map pairs of words to arbitrary syntactic relationships."""
def __init__(self,
samples: Sequence[ptb.Sample],
dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None):
"""Map each relation label to an arbitrary (integer) label.
We only do this for pairs of words which have a head-dependent
relationship in the original dataset.
Args:
samples (Sequence[ptb.Samples]): The samples from which to pull
possible word pairs.
dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A
distribution to use when sampling tags per word type.
By default, is computed from the list of samples.
"""
if dist is None:
counts: Dict[str, int] = collections.defaultdict(lambda: 0)
for sample in samples:
for relation in sample.relations:
counts[relation] += 1
dist = numpy.array([float(count) for count in counts.values()])
dist /= numpy.sum(dist)
assert dist is not None, 'uninitialized distribution?'
self.dist = dist
self.rels: Dict[Tuple[str, str], int] = {}
for sample in samples:
sentence = sample.sentence
heads = sample.heads
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sentence[dep], sentence[head])
if words not in self.rels:
# Add one so that 0 is reserved for "no relationship" tag.
rel = numpy.random.choice(len(dist), p=dist) + 1
self.rels[words] = rel
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads = sample.heads
labels = torch.zeros(len(heads), len(heads), dtype=torch.long)
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sample.sentence[dep], sample.sentence[head])
labels[dep, head] = self.rels.get(words, 0)
return labels
def __len__(self) -> int:
"""Return the number of relationships, including the null one."""
return len(self.dist) + 1
class DLPTaskDataset(datasets.TaskDataset):
"""Iterate over (word representation pair, dependency label) pairs."""
def __init__(
self,
representations: reps.RepresentationLayerDataset,
annotations: Sequence[ptb.Sample],
indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer,
**kwargs: Any,
):
"""Initialize dataset by mapping each dependency label to an index.
The kwargs are forwarded to indexer when it is instantiated.
Args:
representations (representations.RepresentationsLayerDataset): Word
representations corresponding to the words to be paired and
labeled.
annotations (Sequence[ptb.PTBSample]): The PTB annotations from
which to pull dependency labels.
indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer
to use for mapping PTB dependency label annotations to integer
tensors. Instantiated with given annotations unless the
samples keyword is set in kwargs.
Raises:
ValueError: If number of representations/annotations do not match.
"""
if len(representations) != len(annotations):
raise ValueError(f'got {len(representations)} representations '
f'but {len(annotations)} annotations')
self.representations = representations
self.annotations = annotations
kwargs = kwargs.copy()
kwargs.setdefault('samples', annotations)
self.indexer = indexer(**kwargs)
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return (representations, integral POS tags) for index'th sentence.
Args:
index (int): Index of the sentence in the dataset.
Returns:
Tuple[torch.Tensor, torch.Tensor]: First tensor is shape
(sentence_length, representation_dimension) containing word
representations, and second is shape (sentence_length,)
containing integral POS tags.
"""
representations = self.representations[index]
annotations = self.annotations[index]
assert len(representations) == len(
annotations.sentence), 'diff sentence lengths?'
rels = self.indexer(annotations)
# Find all pairs of words sharing an edge.
indexes = set(range(len(representations)))
pairs = [(i, j) for i in indexes for j in indexes if rels[i, j]]
assert pairs and len(pairs) == len(representations), 'missing edges?'
# Stack everything before returning it.
bigrams = torch.stack([
torch.stack((representations[i], representations[j]))
for i, j in pairs
])
labels = torch.stack([rels[i, j] for i, j in pairs])
return bigrams, labels
def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]:
"""Yield all (sentence representations, sentence POS tags) samples."""
for index in range(len(self)):
yield self[index]
def __len__(self) -> int:
"""Return the number of sentences (batches) in the dataset."""
return len(self.annotations)
@property
def sample_representations_shape(self) -> Sequence[int]:
"""Return the dimensionality of the representation pairs."""
return (2, self.representations.dataset.dimension)
@property
def sample_features_shape(self) -> Sequence[int]:
"""Return the shape of each individual POS tag.
Since POS tags are integral scalars, there is no such shape!
"""
return ()
def count_samples(self) -> int:
"""Return the number of words in the dataset."""
return sum(
self.representations.dataset.length(index)
for index in range(len(self.representations)))
def count_unique_features(self) -> int:
"""Return number of unique POS seen in data."""
return len(self.indexer)
# Define the valid probe types for this task.
Probe = Union[probes.Linear, probes.MLP]
def train(train_dataset: datasets.TaskDataset,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
probe_t: Type[Probe] = probes.Linear,
project_to: Optional[int] = None,
share_projection: bool = False,
epochs: int = 25,
patience: int = 4,
lr: float = 1e-3,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Tuple[Probe, float]:
"""Train a probe on dependency label prediction.
Args:
train_dataset (TaskDataset): Training data for probe.
dev_dataset (TaskDataset): Validation data for probe, used for early
stopping.
test_dataset (TaskDataset): Test data for probe, used to compute
final accuracy after training.
probe_t (Type[Probe], optional): Probe type to train.
Defaults to probes.Linear.
project_to (Optional[int], optional): Project representations to this
dimensionality. Defaults to no projection.
share_projection (bool): If set, project the left and right components
of pairwise probes with the same projection. E.g. if the probe is
bilinear of the form xAy, we will always compute (Px)A(Py) as
opposed to (Px)A(Qy) for distinct projections P, Q. Defaults to NOT
shared.
epochs (int, optional): Maximum passes through the training dataset.
Defaults to 25.
patience (int, optional): Allow dev loss to not improve for this many
epochs, then stop training. Defaults to 4.
lr (float, optional): Learning rate for optimizer. Defaults to 1e-3.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (Optional[pathlib.Path], optional): If set, log
training data to wandb. By default, wandb is not used.
Returns:
Tuple[Probe, float]: The trained probe and its test accuracy.
"""
log = logging.getLogger(__name__)
device = device or 'cpu'
ndims = train_dataset.sample_representations_shape[-1]
log.info('representations have dimension %d', ndims)
ntags = train_dataset.count_unique_features()
assert ntags is not None, 'no label count, is dataset for different task?'
log.info('dependency labeling task has %d tags', ntags)
if project_to is None or ndims == project_to:
logging.info('projection dim = reps dim, not projecting')
projection = None
elif share_projection:
projection = projections.Projection(ndims, project_to)
else:
projection = projections.Projection(2 * ndims, 2 * project_to)
probe = probe_t(2 * (project_to or ndims), ntags, project=projection)
learning.train(probe,
train_dataset,
dev_dataset=dev_dataset,
stopper=learning.EarlyStopping(patience=patience),
epochs=epochs,
lr=lr,
device=device,
also_log_to_wandb=also_log_to_wandb)
accuracy = learning.test(probe, test_dataset, device=device)
return probe, accuracy
# TODO(evandez): May as well commonize this, since it's shared with POS.
def axis_alignment(
probe: Probe,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]:
"""Measure whether the given probe is axis aligned.
Args:
probe (Probe): The probe to evaluate.
dev_dataset (datasets.TaskDataset): Data used to determine which axes
to cut.
test_dataset (datasets.TaskDataset): Data used to determine the effect
of cutting an axis.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (bool, optional): If set, log results to wandb.
Returns:
Sequence[Tuple[int, float]]: The ablated axes paired with optimal probe
accuracy after that axis is zeroed.
"""
log = logging.getLogger(__name__)
projection = probe.project
assert projection is not None, 'no projection?'
axes = set(range(projection.project.in_features))
ablated: Set[int] = set()
accuracies = []
while axes:
best_model, best_axis, best_accuracy = probe, -1, -1.
for axis in axes:
model = copy.deepcopy(best_model).eval()
assert model.project is not None, 'no projection?'
model.project.project.weight.data[:, sorted(ablated | {axis})] = 0
accuracy = learning.test(model, dev_dataset, device=device)
if accuracy > best_accuracy:
best_model = model
best_axis = axis
best_accuracy = accuracy
accuracy = learning.test(best_model, test_dataset, device=device)
log.info('ablating axis %d, test accuracy %f', best_axis, accuracy)
if also_log_to_wandb:
wandb.log({
'axis': best_axis,
'dev accuracy': best_accuracy,
'test accuracy': accuracy,
})
axes.remove(best_axis)
ablated.add(best_axis)
accuracies.append((best_axis, accuracy))
return tuple(accuracies)
|
[
"logging.getLogger",
"wandb.log",
"torch.stack",
"ldp.learning.EarlyStopping",
"numpy.sum",
"collections.defaultdict",
"copy.deepcopy",
"ldp.models.projections.Projection",
"logging.info",
"ldp.learning.test"
] |
[((11285, 11312), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (11302, 11312), False, 'import logging\n'), ((12378, 12427), 'ldp.learning.test', 'learning.test', (['probe', 'test_dataset'], {'device': 'device'}), '(probe, test_dataset, device=device)\n', (12391, 12427), False, 'from ldp import datasets, learning\n'), ((13442, 13469), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (13459, 13469), False, 'import logging\n'), ((7924, 7967), 'torch.stack', 'torch.stack', (['[rels[i, j] for i, j in pairs]'], {}), '([rels[i, j] for i, j in pairs])\n', (7935, 7967), False, 'import torch\n'), ((11709, 11766), 'logging.info', 'logging.info', (['"""projection dim = reps dim, not projecting"""'], {}), "('projection dim = reps dim, not projecting')\n", (11721, 11766), False, 'import logging\n'), ((14199, 14253), 'ldp.learning.test', 'learning.test', (['best_model', 'test_dataset'], {'device': 'device'}), '(best_model, test_dataset, device=device)\n', (14212, 14253), False, 'from ldp import datasets, learning\n'), ((3130, 3165), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (3153, 3165), False, 'import collections\n'), ((3388, 3403), 'numpy.sum', 'numpy.sum', (['dist'], {}), '(dist)\n', (3397, 3403), False, 'import numpy\n'), ((11841, 11882), 'ldp.models.projections.Projection', 'projections.Projection', (['ndims', 'project_to'], {}), '(ndims, project_to)\n', (11863, 11882), False, 'from ldp.models import probes, projections\n'), ((11914, 11963), 'ldp.models.projections.Projection', 'projections.Projection', (['(2 * ndims)', '(2 * project_to)'], {}), '(2 * ndims, 2 * project_to)\n', (11936, 11963), False, 'from ldp.models import probes, projections\n'), ((12170, 12211), 'ldp.learning.EarlyStopping', 'learning.EarlyStopping', ([], {'patience': 'patience'}), '(patience=patience)\n', (12192, 12211), False, 'from ldp import datasets, learning\n'), ((13981, 14029), 'ldp.learning.test', 'learning.test', (['model', 'dev_dataset'], {'device': 'device'}), '(model, dev_dataset, device=device)\n', (13994, 14029), False, 'from ldp import datasets, learning\n'), ((14373, 14465), 'wandb.log', 'wandb.log', (["{'axis': best_axis, 'dev accuracy': best_accuracy, 'test accuracy': accuracy}"], {}), "({'axis': best_axis, 'dev accuracy': best_accuracy,\n 'test accuracy': accuracy})\n", (14382, 14465), False, 'import wandb\n'), ((7812, 7865), 'torch.stack', 'torch.stack', (['(representations[i], representations[j])'], {}), '((representations[i], representations[j]))\n', (7823, 7865), False, 'import torch\n'), ((13783, 13808), 'copy.deepcopy', 'copy.deepcopy', (['best_model'], {}), '(best_model)\n', (13796, 13808), False, 'import copy\n')]
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
try:
import scipy.stats as stats
except ImportError:
pass
from .common import Benchmark
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
class InferentialStats(Benchmark):
def setup(self):
np.random.seed(12345678)
self.a = stats.norm.rvs(loc=5, scale=10, size=500)
self.b = stats.norm.rvs(loc=8, scale=10, size=20)
self.c = stats.norm.rvs(loc=8, scale=20, size=20)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
class Distribution(Benchmark):
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
np.random.seed(12345678)
self.x = np.random.rand(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = "fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
class DescriptiveStats(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
np.random.seed(12345678)
self.levels = np.random.randint(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
|
[
"scipy.stats.beta.rvs",
"scipy.stats.gamma.rvs",
"numpy.random.rand",
"scipy.stats.norm.rvs",
"scipy.stats.ttest_ind",
"scipy.stats.gamma.pdf",
"scipy.stats.cauchy.fit",
"numpy.random.seed",
"scipy.stats.beta.fit",
"warnings.simplefilter",
"numpy.random.normal",
"scipy.stats.gamma.cdf",
"scipy.stats.fisher_exact",
"scipy.stats.gamma.fit",
"scipy.stats.beta.cdf",
"scipy.stats.cauchy.cdf",
"scipy.stats.cauchy.rvs",
"scipy.stats.anderson_ksamp",
"scipy.stats.cauchy.pdf",
"scipy.stats.mode",
"warnings.catch_warnings",
"numpy.random.randint",
"scipy.stats.beta.pdf"
] |
[((813, 864), 'scipy.stats.fisher_exact', 'stats.fisher_exact', (['self.a'], {'alternative': 'alternative'}), '(self.a, alternative=alternative)\n', (831, 864), True, 'import scipy.stats as stats\n'), ((931, 955), 'numpy.random.seed', 'np.random.seed', (['(12345678)'], {}), '(12345678)\n', (945, 955), True, 'import numpy as np\n'), ((973, 1014), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': '(5)', 'scale': '(10)', 'size': '(500)'}), '(loc=5, scale=10, size=500)\n', (987, 1014), True, 'import scipy.stats as stats\n'), ((1032, 1072), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': '(8)', 'scale': '(10)', 'size': '(20)'}), '(loc=8, scale=10, size=20)\n', (1046, 1072), True, 'import scipy.stats as stats\n'), ((1090, 1130), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'loc': '(8)', 'scale': '(20)', 'size': '(20)'}), '(loc=8, scale=20, size=20)\n', (1104, 1130), True, 'import scipy.stats as stats\n'), ((1232, 1263), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.b'], {}), '(self.a, self.b)\n', (1247, 1263), True, 'import scipy.stats as stats\n'), ((1272, 1320), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.b'], {'equal_var': '(False)'}), '(self.a, self.b, equal_var=False)\n', (1287, 1320), True, 'import scipy.stats as stats\n'), ((1432, 1463), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.c'], {}), '(self.a, self.c)\n', (1447, 1463), True, 'import scipy.stats as stats\n'), ((1472, 1520), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['self.a', 'self.c'], {'equal_var': '(False)'}), '(self.a, self.c, equal_var=False)\n', (1487, 1520), True, 'import scipy.stats as stats\n'), ((1762, 1786), 'numpy.random.seed', 'np.random.seed', (['(12345678)'], {}), '(12345678)\n', (1776, 1786), True, 'import numpy as np\n'), ((1804, 1823), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (1818, 1823), True, 'import numpy as np\n'), ((3507, 3531), 'numpy.random.seed', 'np.random.seed', (['(12345678)'], {}), '(12345678)\n', (3521, 3531), True, 'import numpy as np\n'), ((3554, 3598), 'numpy.random.randint', 'np.random.randint', (['n_levels'], {'size': '(1000, 10)'}), '(n_levels, size=(1000, 10))\n', (3571, 3598), True, 'import numpy as np\n'), ((3643, 3674), 'scipy.stats.mode', 'stats.mode', (['self.levels'], {'axis': '(0)'}), '(self.levels, axis=0)\n', (3653, 3674), True, 'import scipy.stats as stats\n'), ((283, 317), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'i', 'size': '(1000)'}), '(loc=i, size=1000)\n', (299, 317), True, 'import numpy as np\n'), ((386, 411), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (409, 411), False, 'import warnings\n'), ((425, 469), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (446, 469), False, 'import warnings\n'), ((482, 513), 'scipy.stats.anderson_ksamp', 'stats.anderson_ksamp', (['self.rand'], {}), '(self.rand)\n', (502, 513), True, 'import scipy.stats as stats\n'), ((691, 711), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (705, 711), True, 'import numpy as np\n'), ((1972, 2017), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['self.x'], {'a': '(5)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, loc=4, scale=10)\n', (1987, 2017), True, 'import scipy.stats as stats\n'), ((2072, 2117), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['self.x'], {'a': '(5)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, loc=4, scale=10)\n', (2087, 2117), True, 'import scipy.stats as stats\n'), ((2407, 2448), 'scipy.stats.cauchy.pdf', 'stats.cauchy.pdf', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2423, 2448), True, 'import scipy.stats as stats\n'), ((2172, 2220), 'scipy.stats.gamma.rvs', 'stats.gamma.rvs', ([], {'size': '(1000)', 'a': '(5)', 'loc': '(4)', 'scale': '(10)'}), '(size=1000, a=5, loc=4, scale=10)\n', (2187, 2220), True, 'import scipy.stats as stats\n'), ((2503, 2544), 'scipy.stats.cauchy.cdf', 'stats.cauchy.cdf', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2519, 2544), True, 'import scipy.stats as stats\n'), ((2829, 2878), 'scipy.stats.beta.pdf', 'stats.beta.pdf', (['self.x'], {'a': '(5)', 'b': '(3)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, b=3, loc=4, scale=10)\n', (2843, 2878), True, 'import scipy.stats as stats\n'), ((2275, 2315), 'scipy.stats.gamma.fit', 'stats.gamma.fit', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2290, 2315), True, 'import scipy.stats as stats\n'), ((2599, 2643), 'scipy.stats.cauchy.rvs', 'stats.cauchy.rvs', ([], {'size': '(1000)', 'loc': '(4)', 'scale': '(10)'}), '(size=1000, loc=4, scale=10)\n', (2615, 2643), True, 'import scipy.stats as stats\n'), ((2933, 2982), 'scipy.stats.beta.cdf', 'stats.beta.cdf', (['self.x'], {'a': '(5)', 'b': '(3)', 'loc': '(4)', 'scale': '(10)'}), '(self.x, a=5, b=3, loc=4, scale=10)\n', (2947, 2982), True, 'import scipy.stats as stats\n'), ((2698, 2739), 'scipy.stats.cauchy.fit', 'stats.cauchy.fit', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (2714, 2739), True, 'import scipy.stats as stats\n'), ((3037, 3089), 'scipy.stats.beta.rvs', 'stats.beta.rvs', ([], {'size': '(1000)', 'a': '(5)', 'b': '(3)', 'loc': '(4)', 'scale': '(10)'}), '(size=1000, a=5, b=3, loc=4, scale=10)\n', (3051, 3089), True, 'import scipy.stats as stats\n'), ((3144, 3183), 'scipy.stats.beta.fit', 'stats.beta.fit', (['self.x'], {'loc': '(4)', 'scale': '(10)'}), '(self.x, loc=4, scale=10)\n', (3158, 3183), True, 'import scipy.stats as stats\n')]
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from shutil import copy
import subprocess
import math
import warnings
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
from finn.core.datatype import DataType
from onnx import TensorProto, helper
from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
from . import templates
class StreamingFIFO(HLSCustomOp):
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.strm_fifo_wrapper = templates.strm_fifo_wrapper
def get_nodeattr_types(self):
my_attrs = {
# FIFO depth
"depth": ("i", True, 0),
# folded shape of input/output
"folded_shape": ("ints", True, []),
# FINN DataTypes for inputs/outputs
"dataType": ("s", True, ""),
# Toggle between hls or IPI implementation
# rtl - use the hls generated IP during stitching
# vivado - use the AXI Infrastructure FIFO
"impl_style": ("s", False, "rtl", {"rtl", "vivado"}),
# FPGA resource type for FIFOs when impl_style is vivado
# auto -- let Vivado decide
# block -- use BRAM
# distributed -- use LUTRAM
# ultra -- use URAM (on UltraScale+)
"ram_style": (
"s",
False,
"auto",
{"auto", "block", "distributed", "ultra"},
),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
def infer_node_datatype(self, model):
node = self.onnx_node
idt = model.get_tensor_datatype(node.input[0])
if idt != self.get_input_datatype():
warn_str = "inputDataType changing for %s: %s -> %s " % (
node.name,
str(self.get_input_datatype()),
str(idt),
)
warnings.warn(warn_str)
self.set_nodeattr("dataType", idt.name)
# data type stays the same
model.set_tensor_datatype(node.output[0], idt)
def verify_node(self):
pass
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s" % (node.name)
return prefixed_top_name
def code_generation_ipgen(self, model, fpgapart, clk):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
os.makedirs(verilog_dir)
# copy Q_srl.v from finn-rtllib to verilog directory
memstream_dir = "/workspace/finn/finn-rtllib/memstream/hdl/"
Q_file = os.path.join(memstream_dir, "Q_srl.v")
copy(Q_file, verilog_dir)
# empty code gen dictionary for new entries
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
self.code_gen_dict["$LAYER_NAME$"] = [
"{}_{}".format(self.onnx_node.name, self.onnx_node.name)
]
# make instream width a multiple of 8 for axi interface
in_width = self.get_instream_width_padded()
count_width = int(self.get_nodeattr("depth") - 1).bit_length()
self.code_gen_dict["$COUNT_RANGE$"] = ["[{}:0]".format(count_width - 1)]
self.code_gen_dict["$IN_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$OUT_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$WIDTH$"] = [str(in_width)]
self.code_gen_dict["$DEPTH$"] = [str(self.get_nodeattr("depth"))]
template = self.strm_fifo_wrapper
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "{}.v".format(self.onnx_node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_singlenode_code(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
# prepare the IP packaging tcl template
template = templates.ip_package_tcl
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
# note: setting the root dir as absolute can cause path problems
# the ipgen script will be invoked from the sources dir so root_dir=. is OK
self.code_gen_dict["$VERILOG_DIR$"] = ["."]
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "package_ip.tcl"), "w")
f.write(template)
f.close()
# create a shell script and call Vivado to invoke the IP pkg script
make_project_sh = verilog_dir + "/make_ip.sh"
working_dir = os.environ["PWD"]
with open(make_project_sh, "w") as f:
f.write("#!/bin/bash \n")
f.write("cd {}\n".format(verilog_dir))
f.write("vivado -mode batch -source package_ip.tcl\n")
f.write("cd {}\n".format(working_dir))
bash_command = ["bash", make_project_sh]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
# set ipgen_path and ip_path to point to the new packaged IP
self.set_nodeattr("ipgen_path", verilog_dir)
self.set_nodeattr("ip_path", verilog_dir)
vlnv = "xilinx.com:hls:%s:1.0" % (self.onnx_node.name)
self.set_nodeattr("ip_vlnv", vlnv)
self.code_gen_dict.clear()
def get_normal_input_shape(self):
depth = self.get_nodeattr("depth")
# depth has to be between 2 and 256 with the current
# StreamingFIFO implementation
assert depth >= 2, """Depth is too low"""
if depth > 256 and self.get_nodeattr("impl_style") == "rtl":
warnings.warn(
"Depth is high, set between 2 and 256 for efficient SRL implementation"
)
# derive normal shape from folded shape
# StreamingFIFOs are inserted in between fpgadataflow nodes
# the folded shape could be for example (1, nf, pe)
# with nf (neuron folding): mh // pe
# the normal input shape is in this case (1, mh)
# so to achieve this the two inner dimensions are multiplied
# and together with all previous dimensions
# this gives the normal input shape
folded_shape = self.get_nodeattr("folded_shape")
# extract inner dimension
inner_dim = folded_shape[-1]
# multiply with the next inner dimension
folding_factor = folded_shape[-2] * inner_dim
normal_ishape = []
# create the normal_ishape
for i in range(len(folded_shape) - 2):
normal_ishape.append(folded_shape[i])
normal_ishape.append(folding_factor)
return normal_ishape
def get_normal_output_shape(self):
return self.get_normal_input_shape()
def get_folded_input_shape(self):
return self.get_nodeattr("folded_shape")
def get_folded_output_shape(self):
return self.get_nodeattr("folded_shape")
def get_instream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def get_outstream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
node = self.onnx_node
inp = context[node.input[0]]
exp_shape = self.get_normal_input_shape()
if mode == "cppsim":
output = inp
output = np.asarray([output], dtype=np.float32).reshape(*exp_shape)
context[node.output[0]] = output
elif mode == "rtlsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
# create a npy file for the input of the node
assert (
str(inp.dtype) == "float32"
), """Input datatype is
not float32 as expected."""
expected_inp_shape = self.get_folded_input_shape()
reshaped_input = inp.reshape(expected_inp_shape)
if DataType[self.get_nodeattr("dataType")] == DataType.BIPOLAR:
# store bipolar activations as binary
reshaped_input = (reshaped_input + 1) / 2
export_idt = DataType.BINARY
else:
export_idt = DataType[self.get_nodeattr("dataType")]
# make copy before saving the array
reshaped_input = reshaped_input.copy()
np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
sim = self.get_rtlsim()
nbits = self.get_instream_width()
inp = npy_to_rtlsim_input(
"{}/input_0.npy".format(code_gen_dir), export_idt, nbits
)
super().reset_rtlsim(sim)
super().toggle_clk(sim)
output = self.rtlsim(sim, inp)
odt = DataType[self.get_nodeattr("dataType")]
target_bits = odt.bitwidth()
packed_bits = self.get_outstream_width()
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
output, out_npy_path, odt, out_shape, packed_bits, target_bits
)
# load and reshape output
output = np.load(out_npy_path)
oshape = self.get_normal_output_shape()
output = np.asarray([output], dtype=np.float32).reshape(*oshape)
context[node.output[0]] = output
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def get_number_output_values(self):
folded_oshape = self.get_folded_output_shape()
return np.prod(folded_oshape[:-1])
def global_includes(self):
pass
def defines(self, var):
pass
def read_npy_data(self):
pass
def strm_decl(self):
pass
def docompute(self):
pass
def dataoutstrm(self):
pass
def save_as_npy(self):
pass
def blackboxfunction(self):
pass
def pragmas(self):
pass
def code_generation_ipi(self):
impl_style = self.get_nodeattr("impl_style")
if impl_style == "rtl":
return super().code_generation_ipi()
elif impl_style == "vivado":
cmd = []
node_name = self.onnx_node.name
depth = self.get_nodeattr("depth")
ram_style = self.get_nodeattr("ram_style")
# create a hierarchy for this layer, with the same port names
clk_name = self.get_verilog_top_module_intf_names()["clk"][0]
rst_name = self.get_verilog_top_module_intf_names()["rst"][0]
dout_name = self.get_verilog_top_module_intf_names()["m_axis"][0][0]
din_name = self.get_verilog_top_module_intf_names()["s_axis"][0][0]
cmd.append("create_bd_cell -type hier %s" % node_name)
cmd.append("create_bd_pin -dir I -type clk /%s/%s" % (node_name, clk_name))
cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name))
cmd.append(
"create_bd_intf_pin -mode Master "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"
% (node_name, dout_name)
)
cmd.append(
"create_bd_intf_pin -mode Slave "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, din_name)
)
# instantiate and configure DWC
cmd.append(
"create_bd_cell -type ip "
"-vlnv xilinx.com:ip:axis_data_fifo:2.0 /%s/fifo" % node_name
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_DEPTH {%d}] "
"[get_bd_cells /%s/fifo]" % (depth, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_MEMORY_TYPE {%s}] "
"[get_bd_cells /%s/fifo]" % (ram_style, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] "
"[get_bd_cells /%s/fifo]"
% (np.ceil(self.get_outstream_width() / 8), node_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, dout_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/S_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, din_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aresetn]"
% (node_name, rst_name, node_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aclk]" % (node_name, clk_name, node_name)
)
return cmd
else:
raise Exception(
"FIFO implementation style %s not supported, please use rtl or vivado"
% impl_style
)
def bram_estimation(self):
"""Calculates resource estimation for BRAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "block"):
# Non-BRAM based implementation
return 0
if W == 1:
return math.ceil(depth / 16384)
elif W == 2:
return math.ceil(depth / 8192)
elif W <= 4:
return (math.ceil(depth / 4096)) * (math.ceil(W / 4))
elif W <= 9:
return (math.ceil(depth / 2048)) * (math.ceil(W / 9))
elif W <= 18 or depth > 512:
return (math.ceil(depth / 1024)) * (math.ceil(W / 18))
else:
return (math.ceil(depth / 512)) * (math.ceil(W / 36))
def uram_estimation(self):
"""Calculates resource estimation for URAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "ultra"):
# Non-BRAM based implementation
return 0
else:
return (math.ceil(depth / 4096)) * (math.ceil(W / 72))
def bram_efficiency_estimation(self):
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
bram16_est = self.bram_estimation()
if bram16_est == 0:
return 1
wbits = W * depth
bram16_est_capacity = bram16_est * 36 * 512
return wbits / bram16_est_capacity
def lut_estimation(self):
"""Calculates resource estimations for LUTs"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
address_luts = 2 * math.ceil(math.log(depth, 2))
if impl == "rtl" or (impl == "vivado" and ram_type == "distributed"):
ram_luts = (math.ceil(depth / 32)) * (math.ceil(W / 2))
else:
ram_luts = 0
return int(address_luts + ram_luts)
def prepare_rtlsim(self):
assert self.get_nodeattr("impl_style") != "vivado", (
"StreamingFIFO impl_style "
"cannot be vivado for rtlsim. Only impl_style=rtl supported."
)
super().prepare_rtlsim()
|
[
"numpy.prod",
"finn.util.data_packing.rtlsim_output_to_npy",
"math.ceil",
"os.makedirs",
"subprocess.Popen",
"os.path.join",
"numpy.asarray",
"math.log",
"shutil.copy",
"warnings.warn",
"numpy.load",
"numpy.random.randn"
] |
[((4864, 4888), 'os.makedirs', 'os.makedirs', (['verilog_dir'], {}), '(verilog_dir)\n', (4875, 4888), False, 'import os\n'), ((5036, 5074), 'os.path.join', 'os.path.join', (['memstream_dir', '"""Q_srl.v"""'], {}), "(memstream_dir, 'Q_srl.v')\n", (5048, 5074), False, 'import os\n'), ((5083, 5108), 'shutil.copy', 'copy', (['Q_file', 'verilog_dir'], {}), '(Q_file, verilog_dir)\n', (5087, 5108), False, 'from shutil import copy\n'), ((7852, 7906), 'subprocess.Popen', 'subprocess.Popen', (['bash_command'], {'stdout': 'subprocess.PIPE'}), '(bash_command, stdout=subprocess.PIPE)\n', (7868, 7906), False, 'import subprocess\n'), ((12985, 13012), 'numpy.prod', 'np.prod', (['folded_oshape[:-1]'], {}), '(folded_oshape[:-1])\n', (12992, 13012), True, 'import numpy as np\n'), ((4195, 4218), 'warnings.warn', 'warnings.warn', (['warn_str'], {}), '(warn_str)\n', (4208, 4218), False, 'import warnings\n'), ((7260, 7303), 'os.path.join', 'os.path.join', (['verilog_dir', '"""package_ip.tcl"""'], {}), "(verilog_dir, 'package_ip.tcl')\n", (7272, 7303), False, 'import os\n'), ((8571, 8662), 'warnings.warn', 'warnings.warn', (['"""Depth is high, set between 2 and 256 for efficient SRL implementation"""'], {}), "(\n 'Depth is high, set between 2 and 256 for efficient SRL implementation')\n", (8584, 8662), False, 'import warnings\n'), ((16926, 16950), 'math.ceil', 'math.ceil', (['(depth / 16384)'], {}), '(depth / 16384)\n', (16935, 16950), False, 'import math\n'), ((3420, 3444), 'numpy.random.randn', 'np.random.randn', (['*oshape'], {}), '(*oshape)\n', (3435, 3444), True, 'import numpy as np\n'), ((12233, 12321), 'finn.util.data_packing.rtlsim_output_to_npy', 'rtlsim_output_to_npy', (['output', 'out_npy_path', 'odt', 'out_shape', 'packed_bits', 'target_bits'], {}), '(output, out_npy_path, odt, out_shape, packed_bits,\n target_bits)\n', (12253, 12321), False, 'from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy\n'), ((12407, 12428), 'numpy.load', 'np.load', (['out_npy_path'], {}), '(out_npy_path)\n', (12414, 12428), True, 'import numpy as np\n'), ((16991, 17014), 'math.ceil', 'math.ceil', (['(depth / 8192)'], {}), '(depth / 8192)\n', (17000, 17014), False, 'import math\n'), ((17810, 17833), 'math.ceil', 'math.ceil', (['(depth / 4096)'], {}), '(depth / 4096)\n', (17819, 17833), False, 'import math\n'), ((17838, 17855), 'math.ceil', 'math.ceil', (['(W / 72)'], {}), '(W / 72)\n', (17847, 17855), False, 'import math\n'), ((18497, 18515), 'math.log', 'math.log', (['depth', '(2)'], {}), '(depth, 2)\n', (18505, 18515), False, 'import math\n'), ((18620, 18641), 'math.ceil', 'math.ceil', (['(depth / 32)'], {}), '(depth / 32)\n', (18629, 18641), False, 'import math\n'), ((18646, 18662), 'math.ceil', 'math.ceil', (['(W / 2)'], {}), '(W / 2)\n', (18655, 18662), False, 'import math\n'), ((10598, 10636), 'numpy.asarray', 'np.asarray', (['[output]'], {'dtype': 'np.float32'}), '([output], dtype=np.float32)\n', (10608, 10636), True, 'import numpy as np\n'), ((11566, 11607), 'os.path.join', 'os.path.join', (['code_gen_dir', '"""input_0.npy"""'], {}), "(code_gen_dir, 'input_0.npy')\n", (11578, 11607), False, 'import os\n'), ((12502, 12540), 'numpy.asarray', 'np.asarray', (['[output]'], {'dtype': 'np.float32'}), '([output], dtype=np.float32)\n', (12512, 12540), True, 'import numpy as np\n'), ((17056, 17079), 'math.ceil', 'math.ceil', (['(depth / 4096)'], {}), '(depth / 4096)\n', (17065, 17079), False, 'import math\n'), ((17084, 17100), 'math.ceil', 'math.ceil', (['(W / 4)'], {}), '(W / 4)\n', (17093, 17100), False, 'import math\n'), ((17143, 17166), 'math.ceil', 'math.ceil', (['(depth / 2048)'], {}), '(depth / 2048)\n', (17152, 17166), False, 'import math\n'), ((17171, 17187), 'math.ceil', 'math.ceil', (['(W / 9)'], {}), '(W / 9)\n', (17180, 17187), False, 'import math\n'), ((17246, 17269), 'math.ceil', 'math.ceil', (['(depth / 1024)'], {}), '(depth / 1024)\n', (17255, 17269), False, 'import math\n'), ((17274, 17291), 'math.ceil', 'math.ceil', (['(W / 18)'], {}), '(W / 18)\n', (17283, 17291), False, 'import math\n'), ((17327, 17349), 'math.ceil', 'math.ceil', (['(depth / 512)'], {}), '(depth / 512)\n', (17336, 17349), False, 'import math\n'), ((17354, 17371), 'math.ceil', 'math.ceil', (['(W / 36)'], {}), '(W / 36)\n', (17363, 17371), False, 'import math\n')]
|
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import cv2
import numpy as np
from ...adapters import MTCNNPAdapter
def calibrate_predictions(previous_stage_predictions, out, threshold, outputs_mapping, iou_type=None):
prob_out = outputs_mapping['probability_out']
if prob_out not in out[0]:
prob_out = prob_out + '/sink_port_0' if '/sink_port_0' not in prob_out else prob_out.replace('/sink_port_0', '')
score = out[0][prob_out][:, 1]
pass_t = np.where(score > 0.7)[0]
removed_boxes = [i for i in range(previous_stage_predictions[0].size) if i not in pass_t]
previous_stage_predictions[0].remove(removed_boxes)
previous_stage_predictions[0].scores = score[pass_t]
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
region_out = outputs_mapping['region_out']
if region_out not in out[0]:
region_out = (
region_out + '/sink_port_0' if '/sink_port_0' not in region_out else region_out.replace('/sink_port_0', '')
)
mv = out[0][region_out][pass_t]
if iou_type:
previous_stage_predictions[0], peek = nms(previous_stage_predictions[0], threshold, iou_type)
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
mv = mv[np.sort(peek).astype(int)]
x_mins, y_mins, x_maxs, y_maxs, _ = bbreg(bboxes, mv.T).T
previous_stage_predictions[0].x_mins = x_mins
previous_stage_predictions[0].y_mins = y_mins
previous_stage_predictions[0].x_maxs = x_maxs
previous_stage_predictions[0].y_maxs = y_maxs
return previous_stage_predictions
def nms(prediction, threshold, iou_type):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
peek = MTCNNPAdapter.nms(bboxes, threshold, iou_type)
prediction.remove([i for i in range(prediction.size) if i not in peek])
return prediction, peek
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bounding boxes
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 2] + reg[:, 2] * w
bb3 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
return boundingbox
def filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph):
mask = np.ones(len(tmph))
tmp_ys_len = (edy + 1) - dy
tmp_xs_len = (edx + 1) - dx
img_ys_len = (ey + 1) - y
img_xs_len = (ex + 1) - x
mask = np.logical_and(mask, np.logical_and(tmph > 0, tmpw > 0))
mask = np.logical_and(mask, np.logical_and(tmp_ys_len > 0, tmp_xs_len > 0))
mask = np.logical_and(mask, np.logical_and(img_xs_len > 0, img_ys_len > 0))
mask = np.logical_and(mask, np.logical_and(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len))
return dy[mask], edy[mask], dx[mask], edx[mask], y[mask], ey[mask], x[mask], ex[mask], tmpw[mask], tmph[mask], mask
def pad(boxesA, h, w):
boxes = boxesA.copy()
tmph = boxes[:, 3] - boxes[:, 1] + 1
tmpw = boxes[:, 2] - boxes[:, 0] + 1
numbox = boxes.shape[0]
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:, 0:1][:, 0]
y = boxes[:, 1:2][:, 0]
ex = boxes[:, 2:3][:, 0]
ey = boxes[:, 3:4][:, 0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w - 1 + tmpw[tmp]
ex[tmp] = w - 1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h - 1 + tmph[tmp]
ey[tmp] = h - 1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy, dx = np.maximum(0, dy - 1), np.maximum(0, dx - 1)
y = np.maximum(0, y - 1)
x = np.maximum(0, x - 1)
edy = np.maximum(0, edy - 1)
edx = np.maximum(0, edx - 1)
ey = np.maximum(0, ey - 1)
ex = np.maximum(0, ex - 1)
return filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph)
def rerec(bboxA):
w = bboxA[:, 2] - bboxA[:, 0]
h = bboxA[:, 3] - bboxA[:, 1]
max_side = np.maximum(w, h).T
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - max_side * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - max_side * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.repeat([max_side], 2, axis=0).T
return bboxA
def cut_roi(image, prediction, dst_size, include_bound=True):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
img = image.data
bboxes = rerec(bboxes)
bboxes[:, 0:4] = np.fix(bboxes[:, 0:4])
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph, mask = pad(bboxes, *img.shape[:2])
bboxes = bboxes[mask]
numbox = bboxes.shape[0]
tempimg = np.zeros((numbox, dst_size, dst_size, 3))
for k in range(numbox):
tmp_k_h, tmp_k_w = int(tmph[k]) + int(include_bound), int(tmpw[k]) + int(include_bound)
tmp = np.zeros((tmp_k_h, tmp_k_w, 3))
tmp_ys, tmp_xs = slice(int(dy[k]), int(edy[k]) + 1), slice(int(dx[k]), int(edx[k]) + 1)
img_ys, img_xs = slice(int(y[k]), int(ey[k]) + 1), slice(int(x[k]), int(ex[k]) + 1)
tmp[tmp_ys, tmp_xs] = img[img_ys, img_xs]
tempimg[k, :, :, :] = cv2.resize(tmp, (dst_size, dst_size))
image.data = tempimg
return image
def transform_for_callback(batch_size, raw_outputs):
output_per_box = []
fq_weights = []
for i in range(batch_size):
box_outs = OrderedDict()
for layer_name, data in raw_outputs[0].items():
if layer_name in fq_weights:
continue
if layer_name.endswith('fq_weights_1'):
fq_weights.append(layer_name)
box_outs[layer_name] = data
elif data.shape[0] <= i:
box_outs[layer_name] = data
else:
box_outs[layer_name] = np.expand_dims(data[i], axis=0)
output_per_box.append(box_outs)
return output_per_box
|
[
"numpy.ones_like",
"collections.OrderedDict",
"numpy.repeat",
"numpy.ones",
"numpy.logical_and",
"cv2.resize",
"numpy.where",
"numpy.fix",
"numpy.sort",
"numpy.array",
"numpy.zeros",
"numpy.expand_dims",
"numpy.maximum"
] |
[((4091, 4106), 'numpy.ones', 'np.ones', (['numbox'], {}), '(numbox)\n', (4098, 4106), True, 'import numpy as np\n'), ((4116, 4131), 'numpy.ones', 'np.ones', (['numbox'], {}), '(numbox)\n', (4123, 4131), True, 'import numpy as np\n'), ((4893, 4913), 'numpy.maximum', 'np.maximum', (['(0)', '(y - 1)'], {}), '(0, y - 1)\n', (4903, 4913), True, 'import numpy as np\n'), ((4922, 4942), 'numpy.maximum', 'np.maximum', (['(0)', '(x - 1)'], {}), '(0, x - 1)\n', (4932, 4942), True, 'import numpy as np\n'), ((4953, 4975), 'numpy.maximum', 'np.maximum', (['(0)', '(edy - 1)'], {}), '(0, edy - 1)\n', (4963, 4975), True, 'import numpy as np\n'), ((4986, 5008), 'numpy.maximum', 'np.maximum', (['(0)', '(edx - 1)'], {}), '(0, edx - 1)\n', (4996, 5008), True, 'import numpy as np\n'), ((5018, 5039), 'numpy.maximum', 'np.maximum', (['(0)', '(ey - 1)'], {}), '(0, ey - 1)\n', (5028, 5039), True, 'import numpy as np\n'), ((5049, 5070), 'numpy.maximum', 'np.maximum', (['(0)', '(ex - 1)'], {}), '(0, ex - 1)\n', (5059, 5070), True, 'import numpy as np\n'), ((5710, 5732), 'numpy.fix', 'np.fix', (['bboxes[:, 0:4]'], {}), '(bboxes[:, 0:4])\n', (5716, 5732), True, 'import numpy as np\n'), ((5885, 5926), 'numpy.zeros', 'np.zeros', (['(numbox, dst_size, dst_size, 3)'], {}), '((numbox, dst_size, dst_size, 3))\n', (5893, 5926), True, 'import numpy as np\n'), ((1038, 1059), 'numpy.where', 'np.where', (['(score > 0.7)'], {}), '(score > 0.7)\n', (1046, 1059), True, 'import numpy as np\n'), ((3199, 3229), 'numpy.array', 'np.array', (['[bb0, bb1, bb2, bb3]'], {}), '([bb0, bb1, bb2, bb3])\n', (3207, 3229), True, 'import numpy as np\n'), ((3505, 3539), 'numpy.logical_and', 'np.logical_and', (['(tmph > 0)', '(tmpw > 0)'], {}), '(tmph > 0, tmpw > 0)\n', (3519, 3539), True, 'import numpy as np\n'), ((3573, 3619), 'numpy.logical_and', 'np.logical_and', (['(tmp_ys_len > 0)', '(tmp_xs_len > 0)'], {}), '(tmp_ys_len > 0, tmp_xs_len > 0)\n', (3587, 3619), True, 'import numpy as np\n'), ((3653, 3699), 'numpy.logical_and', 'np.logical_and', (['(img_xs_len > 0)', '(img_ys_len > 0)'], {}), '(img_xs_len > 0, img_ys_len > 0)\n', (3667, 3699), True, 'import numpy as np\n'), ((3733, 3799), 'numpy.logical_and', 'np.logical_and', (['(tmp_xs_len == img_xs_len)', '(tmp_ys_len == img_ys_len)'], {}), '(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len)\n', (3747, 3799), True, 'import numpy as np\n'), ((4286, 4302), 'numpy.where', 'np.where', (['(ex > w)'], {}), '(ex > w)\n', (4294, 4302), True, 'import numpy as np\n'), ((4414, 4430), 'numpy.where', 'np.where', (['(ey > h)'], {}), '(ey > h)\n', (4422, 4430), True, 'import numpy as np\n'), ((4542, 4557), 'numpy.where', 'np.where', (['(x < 1)'], {}), '(x < 1)\n', (4550, 4557), True, 'import numpy as np\n'), ((4633, 4653), 'numpy.ones_like', 'np.ones_like', (['x[tmp]'], {}), '(x[tmp])\n', (4645, 4653), True, 'import numpy as np\n'), ((4664, 4679), 'numpy.where', 'np.where', (['(y < 1)'], {}), '(y < 1)\n', (4672, 4679), True, 'import numpy as np\n'), ((4755, 4775), 'numpy.ones_like', 'np.ones_like', (['y[tmp]'], {}), '(y[tmp])\n', (4767, 4775), True, 'import numpy as np\n'), ((4840, 4861), 'numpy.maximum', 'np.maximum', (['(0)', '(dy - 1)'], {}), '(0, dy - 1)\n', (4850, 4861), True, 'import numpy as np\n'), ((4863, 4884), 'numpy.maximum', 'np.maximum', (['(0)', '(dx - 1)'], {}), '(0, dx - 1)\n', (4873, 4884), True, 'import numpy as np\n'), ((5242, 5258), 'numpy.maximum', 'np.maximum', (['w', 'h'], {}), '(w, h)\n', (5252, 5258), True, 'import numpy as np\n'), ((6065, 6096), 'numpy.zeros', 'np.zeros', (['(tmp_k_h, tmp_k_w, 3)'], {}), '((tmp_k_h, tmp_k_w, 3))\n', (6073, 6096), True, 'import numpy as np\n'), ((6365, 6402), 'cv2.resize', 'cv2.resize', (['tmp', '(dst_size, dst_size)'], {}), '(tmp, (dst_size, dst_size))\n', (6375, 6402), False, 'import cv2\n'), ((6595, 6608), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6606, 6608), False, 'from collections import OrderedDict\n'), ((5411, 5443), 'numpy.repeat', 'np.repeat', (['[max_side]', '(2)'], {'axis': '(0)'}), '([max_side], 2, axis=0)\n', (5420, 5443), True, 'import numpy as np\n'), ((2172, 2185), 'numpy.sort', 'np.sort', (['peek'], {}), '(peek)\n', (2179, 2185), True, 'import numpy as np\n'), ((7011, 7042), 'numpy.expand_dims', 'np.expand_dims', (['data[i]'], {'axis': '(0)'}), '(data[i], axis=0)\n', (7025, 7042), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
# Plot learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid(True)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Validation score")
plt.legend(loc="best")
plt.show()
return plt
# Plot validation curve
def plot_validation_curve(estimator, title, X, y, param_name, param_range, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
train_scores, test_scores = validation_curve(estimator, X, y, param_name, param_range, cv)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='r', marker='o', markersize=5, label='Training score')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='r')
plt.plot(param_range, test_mean, color='g', linestyle='--', marker='s', markersize=5, label='Validation score')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='g')
plt.grid(True)
plt.xscale('log')
plt.legend(loc='best')
plt.xlabel('Parameter')
plt.ylabel('Score')
plt.ylim(ylim)
|
[
"numpy.mean",
"matplotlib.pyplot.grid",
"sklearn.model_selection.learning_curve",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((282, 306), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (293, 306), True, 'import numpy as np\n'), ((312, 324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (322, 324), True, 'import matplotlib.pyplot as plt\n'), ((329, 345), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (338, 345), True, 'import matplotlib.pyplot as plt\n'), ((399, 430), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training examples"""'], {}), "('Training examples')\n", (409, 430), True, 'import matplotlib.pyplot as plt\n'), ((435, 454), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (445, 454), True, 'import matplotlib.pyplot as plt\n'), ((500, 578), 'sklearn.model_selection.learning_curve', 'learning_curve', (['estimator', 'X', 'y'], {'cv': 'cv', 'n_jobs': 'n_jobs', 'train_sizes': 'train_sizes'}), '(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n', (514, 578), False, 'from sklearn.model_selection import learning_curve\n'), ((612, 641), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (619, 641), True, 'import numpy as np\n'), ((665, 693), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (671, 693), True, 'import numpy as np\n'), ((717, 745), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (724, 745), True, 'import numpy as np\n'), ((768, 795), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (774, 795), True, 'import numpy as np\n'), ((800, 814), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (808, 814), True, 'import matplotlib.pyplot as plt\n'), ((820, 952), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['train_sizes', '(train_scores_mean - train_scores_std)', '(train_scores_mean + train_scores_std)'], {'alpha': '(0.1)', 'color': '"""r"""'}), "(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color='r')\n", (836, 952), True, 'import matplotlib.pyplot as plt\n'), ((994, 1122), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['train_sizes', '(test_scores_mean - test_scores_std)', '(test_scores_mean + test_scores_std)'], {'alpha': '(0.1)', 'color': '"""g"""'}), "(train_sizes, test_scores_mean - test_scores_std, \n test_scores_mean + test_scores_std, alpha=0.1, color='g')\n", (1010, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1229), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'train_scores_mean', '"""o-"""'], {'color': '"""r"""', 'label': '"""Training score"""'}), "(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n", (1151, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1329), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'test_scores_mean', '"""o-"""'], {'color': '"""g"""', 'label': '"""Validation score"""'}), "(train_sizes, test_scores_mean, 'o-', color='g', label=\n 'Validation score')\n", (1250, 1329), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1365), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1353, 1365), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1380), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1378, 1380), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1586), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (1573, 1586), True, 'import numpy as np\n'), ((1700, 1729), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1707, 1729), True, 'import numpy as np\n'), ((1746, 1774), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1752, 1774), True, 'import numpy as np\n'), ((1791, 1819), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (1798, 1819), True, 'import numpy as np\n'), ((1835, 1862), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (1841, 1862), True, 'import numpy as np\n'), ((1867, 1965), 'matplotlib.pyplot.plot', 'plt.plot', (['param_range', 'train_mean'], {'color': '"""r"""', 'marker': '"""o"""', 'markersize': '(5)', 'label': '"""Training score"""'}), "(param_range, train_mean, color='r', marker='o', markersize=5,\n label='Training score')\n", (1875, 1965), True, 'import matplotlib.pyplot as plt\n'), ((1966, 2070), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['param_range', '(train_mean + train_std)', '(train_mean - train_std)'], {'alpha': '(0.15)', 'color': '"""r"""'}), "(param_range, train_mean + train_std, train_mean -\n train_std, alpha=0.15, color='r')\n", (1982, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2186), 'matplotlib.pyplot.plot', 'plt.plot', (['param_range', 'test_mean'], {'color': '"""g"""', 'linestyle': '"""--"""', 'marker': '"""s"""', 'markersize': '(5)', 'label': '"""Validation score"""'}), "(param_range, test_mean, color='g', linestyle='--', marker='s',\n markersize=5, label='Validation score')\n", (2079, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2287), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['param_range', '(test_mean + test_std)', '(test_mean - test_std)'], {'alpha': '(0.15)', 'color': '"""g"""'}), "(param_range, test_mean + test_std, test_mean - test_std,\n alpha=0.15, color='g')\n", (2203, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2302), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2296, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2325), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2318, 2325), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2352), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2340, 2352), True, 'import matplotlib.pyplot as plt\n'), ((2358, 2381), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Parameter"""'], {}), "('Parameter')\n", (2368, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2387, 2406), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (2397, 2406), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2426), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (2420, 2426), True, 'import matplotlib.pyplot as plt\n'), ((379, 394), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (387, 394), True, 'import matplotlib.pyplot as plt\n')]
|
import os.path as op
import numpy as np
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import RidgeCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, cross_val_score
import mne
from pyriemann.tangentspace import TangentSpace
import config_drago as cfg
meg = 'mag'
scale = 1e22
rank = 65
reg = 1e-6
seed = 42
n_jobs = 10
cv = KFold(n_splits=n_jobs, shuffle=True, random_state=seed)
def proj_covs_common(covs, picks, scale=scale, rank=rank, reg=reg):
covs = [d['covs'][:, picks][:, :, picks] for d in covs if 'subject' in d]
covs = scale * np.array(covs)
n_sub, n_fb, n_ch, n_ch = covs.shape
# covs2 = covs.reshape(n_sub*n_fb, n_ch, n_ch)
# covs_avg = np.mean(covs2, axis=0)
covs_avg = covs.mean(axis=1).mean(axis=0)
d, V = np.linalg.eigh(covs_avg)
d = d[::-1]
V = V[:, ::-1]
proj_mat = V[:, :rank].T
covs_proj = np.zeros((n_sub, n_fb, rank, rank))
for sub in range(n_sub):
for fb in range(n_fb):
covs_proj[sub, fb] = proj_mat @ covs[sub, fb] @ proj_mat.T
covs_proj[sub, fb] += reg * np.eye(rank)
return covs_proj
def proj_covs_ts(covs):
n_sub, n_fb, p, _ = covs.shape
covs_ts = np.zeros((n_sub, n_fb, (p*(p+1))//2))
for fb in range(n_fb):
covs_ts[:, fb, :] = TangentSpace(metric="wasserstein").fit(
covs[:, fb, :, :]).transform(covs[:, fb, :, :])
return covs_ts
file_covs = op.join(cfg.path_outputs, 'covs_allch_oas.float32.h5')
covs_allch = mne.externals.h5io.read_hdf5(file_covs) # (sub, fb, ch, ch)
info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item()
picks = mne.pick_types(info, meg=meg)
covs = proj_covs_common(covs_allch, picks, scale=scale, rank=rank, reg=reg)
X = proj_covs_ts(covs)
X = X.reshape(len(X), -1)
info = pd.read_csv(op.join(cfg.path_data, 'participants.csv'))
subjects = [d['subject'] for d in covs_allch if 'subject' in d]
y = info.set_index('Observations').age.loc[subjects]
ridge = make_pipeline(StandardScaler(),
RidgeCV(alphas=np.logspace(-3, 5, 100)))
score = - cross_val_score(ridge, X, y, cv=cv,
scoring="neg_mean_absolute_error", n_jobs=n_jobs,
verbose=True)
|
[
"numpy.linalg.eigh",
"numpy.eye",
"pyriemann.tangentspace.TangentSpace",
"mne.pick_types",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros",
"mne.externals.h5io.read_hdf5",
"sklearn.model_selection.KFold",
"numpy.logspace",
"sklearn.model_selection.cross_val_score"
] |
[((414, 469), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_jobs', 'shuffle': '(True)', 'random_state': 'seed'}), '(n_splits=n_jobs, shuffle=True, random_state=seed)\n', (419, 469), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((1494, 1548), 'os.path.join', 'op.join', (['cfg.path_outputs', '"""covs_allch_oas.float32.h5"""'], {}), "(cfg.path_outputs, 'covs_allch_oas.float32.h5')\n", (1501, 1548), True, 'import os.path as op\n'), ((1562, 1601), 'mne.externals.h5io.read_hdf5', 'mne.externals.h5io.read_hdf5', (['file_covs'], {}), '(file_covs)\n', (1590, 1601), False, 'import mne\n'), ((1696, 1725), 'mne.pick_types', 'mne.pick_types', (['info'], {'meg': 'meg'}), '(info, meg=meg)\n', (1710, 1725), False, 'import mne\n'), ((842, 866), 'numpy.linalg.eigh', 'np.linalg.eigh', (['covs_avg'], {}), '(covs_avg)\n', (856, 866), True, 'import numpy as np\n'), ((948, 983), 'numpy.zeros', 'np.zeros', (['(n_sub, n_fb, rank, rank)'], {}), '((n_sub, n_fb, rank, rank))\n', (956, 983), True, 'import numpy as np\n'), ((1264, 1305), 'numpy.zeros', 'np.zeros', (['(n_sub, n_fb, p * (p + 1) // 2)'], {}), '((n_sub, n_fb, p * (p + 1) // 2))\n', (1272, 1305), True, 'import numpy as np\n'), ((1872, 1914), 'os.path.join', 'op.join', (['cfg.path_data', '"""participants.csv"""'], {}), "(cfg.path_data, 'participants.csv')\n", (1879, 1914), True, 'import os.path as op\n'), ((2056, 2072), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2070, 2072), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2147, 2250), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['ridge', 'X', 'y'], {'cv': 'cv', 'scoring': '"""neg_mean_absolute_error"""', 'n_jobs': 'n_jobs', 'verbose': '(True)'}), "(ridge, X, y, cv=cv, scoring='neg_mean_absolute_error',\n n_jobs=n_jobs, verbose=True)\n", (2162, 2250), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((637, 651), 'numpy.array', 'np.array', (['covs'], {}), '(covs)\n', (645, 651), True, 'import numpy as np\n'), ((1639, 1679), 'os.path.join', 'op.join', (['cfg.path_data', '"""info_allch.npy"""'], {}), "(cfg.path_data, 'info_allch.npy')\n", (1646, 1679), True, 'import os.path as op\n'), ((2111, 2134), 'numpy.logspace', 'np.logspace', (['(-3)', '(5)', '(100)'], {}), '(-3, 5, 100)\n', (2122, 2134), True, 'import numpy as np\n'), ((1155, 1167), 'numpy.eye', 'np.eye', (['rank'], {}), '(rank)\n', (1161, 1167), True, 'import numpy as np\n'), ((1357, 1391), 'pyriemann.tangentspace.TangentSpace', 'TangentSpace', ([], {'metric': '"""wasserstein"""'}), "(metric='wasserstein')\n", (1369, 1391), False, 'from pyriemann.tangentspace import TangentSpace\n')]
|
import numpy as np
import cv2
import os.path as osp
import json
from human_body_prior.tools.model_loader import load_vposer
import torch
vposer_ckpt = '/Vol1/dbstore/datasets/a.vakhitov/projects/pykinect_fresh/smplify-x/smplify-x-data/vposer_v1_0/'
def load_avakhitov_fits_vposer(vposer, part_path, dev_lbl):
poses = np.load(part_path + '/poses.npy')[:-1]
face_expressions = np.load(part_path + '/expressions.npy')[:-1] * 1e2
betas = np.load(part_path + '/betas.npy')
fid_lst = np.load(part_path + '/fid_lst.npy')
with open(part_path + '/config.json', 'r') as f:
config = json.load(f)
# do we use vposer embeddings
is_vposer = config['is_vposer']
# gender of a subject
is_male = config['is_male']
# id of a device (used to decode the rigid pose of the device)
assert len(fid_lst) == len(poses), f'{len(fid_lst)} != {len(poses)}'
assert len(fid_lst) == len(face_expressions), f'{len(fid_lst)} != {len(face_expressions)}'
n = len(poses)
frame_index2fit_index = {
fid_lst[i]: i
for i in range(n)
}
# load the device pose
dev_lst = config['dev_lst']
dev_id = 0
while dev_lst[dev_id] != dev_lbl:
dev_id += 1
dev_orient = None
dev_trans = None
if dev_id > 0:
dev_orient = np.load(part_path + '/dev_orient.npy')
dev_trans = np.load(part_path + '/dev_trans.npy')
rot = poses[:, -3:]
trans = poses[:, -6:-3]
if is_vposer:
pose_body_vp = torch.tensor(poses[:, 0:32])
# convert from vposer to rotation matrices
pose_body_list = []
for i in range(n):
pose_body_mats = vposer.decode(pose_body_vp[i]).reshape(-1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros(63)
for i in range(0, pose_body_mats.shape[0]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i])
pose_body[3 * i: 3 * i + 3] = rot_vec.reshape(-1)
pose_body_list.append(pose_body)
pose_body = np.array(pose_body_list)
pose_jaw = poses[:, 32:35]
pose_eye = poses[:, 35:41]
pose_hand = poses[:, 41:-6]
else:
pose_body = poses[:, 0:63]
pose_jaw = poses[:, 63:66]
pose_eye = poses[:, 66:72]
pose_hand = poses[:, 72:-6]
if dev_orient is not None:
for i in range(n):
rot_mat = cv2.Rodrigues(rot[i].reshape(3, 1))[0]
dev_mat = cv2.Rodrigues(dev_orient.reshape(3, 1))[0]
rot_mat = dev_mat @ rot_mat
rot[i] = cv2.Rodrigues(rot_mat)[0].reshape(-1)
trans[i] = (dev_mat @ trans[i].reshape(3, 1) + dev_trans.reshape(3, 1)).reshape(-1)
result = {
'global_rvec': rot,
'global_tvec': trans,
'body_pose': pose_body,
'hand_pose': pose_hand,
'jaw_pose': pose_jaw,
'eye_pose': pose_eye,
'face_expression': face_expressions,
'betas': betas,
'n': n,
'frame_index2fit_index': frame_index2fit_index,
'is_male': is_male,
'is_vposer': is_vposer
}
return result
def load_avakhitov_fits(dp, load_betas=True, load_body_poses=True, load_expressions=False, load_fid_lst=True):
result = dict()
for flag, k, fn_no_ext in [
[load_betas, 'betas', 'betas'],
[load_body_poses, 'body_poses', 'poses'],
[load_expressions, 'expressions', 'expressions'],
[load_fid_lst, 'fid_lst', 'fid_lst']
]:
if flag:
load_fp = osp.join(dp, f'{fn_no_ext}.npy')
try:
loaded = np.load(load_fp)
except:
print(load_fp)
raise Exception()
if fn_no_ext == 'poses':
#load the vposer model
if loaded.shape[1] == 69:
pose_body = loaded[:, 0:32]
else:
vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
vposer.eval()
pose_body_vp = torch.tensor(loaded[:, 0:32])
#convert from vposer to rotation matrices
pose_body_mats = vposer.decode(pose_body_vp).reshape(len(loaded), -1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros((pose_body_mats.shape[0], 63))
for i in range(0, pose_body_mats.shape[0]):
for j in range(0, pose_body_mats.shape[1]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i,j])
pose_body[i, 3*j : 3*j+3] = rot_vec.reshape(-1)
result[k] = pose_body
result['global_rvecs'] = loaded[:, -3:]
result['global_tvecs'] = loaded[:, -6:-3]
result['n'] = len(loaded)
else:
result[k] = loaded
return result
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def get_selected_ids(id_sel_set, req_ids):
ss_sort = np.argsort(id_sel_set)
req_sort = np.argsort(req_ids)
id_ss_srt = id_sel_set[ss_sort]
id_ss_pos = np.arange(0, len(id_sel_set))[ss_sort]
req_srt = req_ids[req_sort]
req_srt_pos = -1 * np.ones(len(req_srt), dtype=int)
i = 0
j = 0
while i < len(id_ss_srt) and j < len(req_srt):
if req_srt[j] == id_ss_srt[i]:
req_srt_pos[j] = id_ss_pos[i]
i += 1
j += 1
elif req_srt[j] < id_ss_srt[i]:
j += 1
elif id_ss_srt[i] < req_srt[j]:
i += 1
req_ids_ans = -1 * np.ones(len(req_srt), dtype=int)
req_ids_ans[req_sort] = req_srt_pos
return req_ids_ans
|
[
"torch.bmm",
"torch.split",
"human_body_prior.tools.model_loader.load_vposer",
"torch.eye",
"torch.sin",
"os.path.join",
"numpy.argsort",
"torch.tensor",
"torch.norm",
"numpy.array",
"torch.cos",
"numpy.zeros",
"cv2.Rodrigues",
"json.load",
"numpy.load",
"torch.zeros",
"torch.cat"
] |
[((451, 484), 'numpy.load', 'np.load', (["(part_path + '/betas.npy')"], {}), "(part_path + '/betas.npy')\n", (458, 484), True, 'import numpy as np\n'), ((499, 534), 'numpy.load', 'np.load', (["(part_path + '/fid_lst.npy')"], {}), "(part_path + '/fid_lst.npy')\n", (506, 534), True, 'import numpy as np\n'), ((5335, 5384), 'torch.norm', 'torch.norm', (['(rot_vecs + 1e-08)'], {'dim': '(1)', 'keepdim': '(True)'}), '(rot_vecs + 1e-08, dim=1, keepdim=True)\n', (5345, 5384), False, 'import torch\n'), ((5553, 5583), 'torch.split', 'torch.split', (['rot_dir', '(1)'], {'dim': '(1)'}), '(rot_dir, 1, dim=1)\n', (5564, 5583), False, 'import torch\n'), ((5592, 5651), 'torch.zeros', 'torch.zeros', (['(batch_size, 3, 3)'], {'dtype': 'dtype', 'device': 'device'}), '((batch_size, 3, 3), dtype=dtype, device=device)\n', (5603, 5651), False, 'import torch\n'), ((5665, 5721), 'torch.zeros', 'torch.zeros', (['(batch_size, 1)'], {'dtype': 'dtype', 'device': 'device'}), '((batch_size, 1), dtype=dtype, device=device)\n', (5676, 5721), False, 'import torch\n'), ((6042, 6064), 'numpy.argsort', 'np.argsort', (['id_sel_set'], {}), '(id_sel_set)\n', (6052, 6064), True, 'import numpy as np\n'), ((6080, 6099), 'numpy.argsort', 'np.argsort', (['req_ids'], {}), '(req_ids)\n', (6090, 6099), True, 'import numpy as np\n'), ((326, 359), 'numpy.load', 'np.load', (["(part_path + '/poses.npy')"], {}), "(part_path + '/poses.npy')\n", (333, 359), True, 'import numpy as np\n'), ((605, 617), 'json.load', 'json.load', (['f'], {}), '(f)\n', (614, 617), False, 'import json\n'), ((1302, 1340), 'numpy.load', 'np.load', (["(part_path + '/dev_orient.npy')"], {}), "(part_path + '/dev_orient.npy')\n", (1309, 1340), True, 'import numpy as np\n'), ((1361, 1398), 'numpy.load', 'np.load', (["(part_path + '/dev_trans.npy')"], {}), "(part_path + '/dev_trans.npy')\n", (1368, 1398), True, 'import numpy as np\n'), ((1494, 1522), 'torch.tensor', 'torch.tensor', (['poses[:, 0:32]'], {}), '(poses[:, 0:32])\n', (1506, 1522), False, 'import torch\n'), ((2018, 2042), 'numpy.array', 'np.array', (['pose_body_list'], {}), '(pose_body_list)\n', (2026, 2042), True, 'import numpy as np\n'), ((5442, 5458), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (5451, 5458), False, 'import torch\n'), ((5493, 5509), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (5502, 5509), False, 'import torch\n'), ((388, 427), 'numpy.load', 'np.load', (["(part_path + '/expressions.npy')"], {}), "(part_path + '/expressions.npy')\n", (395, 427), True, 'import numpy as np\n'), ((1754, 1766), 'numpy.zeros', 'np.zeros', (['(63)'], {}), '(63)\n', (1762, 1766), True, 'import numpy as np\n'), ((3505, 3537), 'os.path.join', 'osp.join', (['dp', 'f"""{fn_no_ext}.npy"""'], {}), "(dp, f'{fn_no_ext}.npy')\n", (3513, 3537), True, 'import os.path as osp\n'), ((5730, 5796), 'torch.cat', 'torch.cat', (['[zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros]'], {'dim': '(1)'}), '([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n', (5739, 5796), False, 'import torch\n'), ((5846, 5886), 'torch.eye', 'torch.eye', (['(3)'], {'dtype': 'dtype', 'device': 'device'}), '(3, dtype=dtype, device=device)\n', (5855, 5886), False, 'import torch\n'), ((5948, 5963), 'torch.bmm', 'torch.bmm', (['K', 'K'], {}), '(K, K)\n', (5957, 5963), False, 'import torch\n'), ((1854, 1886), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pose_body_mats[i]'], {}), '(pose_body_mats[i])\n', (1867, 1886), False, 'import cv2\n'), ((3580, 3596), 'numpy.load', 'np.load', (['load_fp'], {}), '(load_fp)\n', (3587, 3596), True, 'import numpy as np\n'), ((3903, 3948), 'human_body_prior.tools.model_loader.load_vposer', 'load_vposer', (['vposer_ckpt'], {'vp_model': '"""snapshot"""'}), "(vposer_ckpt, vp_model='snapshot')\n", (3914, 3948), False, 'from human_body_prior.tools.model_loader import load_vposer\n'), ((4018, 4047), 'torch.tensor', 'torch.tensor', (['loaded[:, 0:32]'], {}), '(loaded[:, 0:32])\n', (4030, 4047), False, 'import torch\n'), ((4267, 4306), 'numpy.zeros', 'np.zeros', (['(pose_body_mats.shape[0], 63)'], {}), '((pose_body_mats.shape[0], 63))\n', (4275, 4306), True, 'import numpy as np\n'), ((2546, 2568), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rot_mat'], {}), '(rot_mat)\n', (2559, 2568), False, 'import cv2\n'), ((4482, 4517), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pose_body_mats[i, j]'], {}), '(pose_body_mats[i, j])\n', (4495, 4517), False, 'import cv2\n')]
|
import numpy as np
def smooth(a, WSZ):
# a: NumPy 1-D array containing the data to be smoothed
# WSZ: smoothing window size needs, which must be odd number,
# as in the original MATLAB implementation
if WSZ % 2 == 0:
WSZ = WSZ - 1
out0 = np.convolve(a, np.ones(WSZ, dtype=int), 'valid') / WSZ
r = np.arange(1, WSZ - 1, 2)
start = np.cumsum(a[:WSZ - 1])[::2] / r
stop = (np.cumsum(a[:-WSZ:-1])[::2] / r)[::-1]
return np.concatenate((start, out0, stop))
|
[
"numpy.concatenate",
"numpy.cumsum",
"numpy.ones",
"numpy.arange"
] |
[((331, 355), 'numpy.arange', 'np.arange', (['(1)', '(WSZ - 1)', '(2)'], {}), '(1, WSZ - 1, 2)\n', (340, 355), True, 'import numpy as np\n'), ((462, 497), 'numpy.concatenate', 'np.concatenate', (['(start, out0, stop)'], {}), '((start, out0, stop))\n', (476, 497), True, 'import numpy as np\n'), ((283, 306), 'numpy.ones', 'np.ones', (['WSZ'], {'dtype': 'int'}), '(WSZ, dtype=int)\n', (290, 306), True, 'import numpy as np\n'), ((368, 390), 'numpy.cumsum', 'np.cumsum', (['a[:WSZ - 1]'], {}), '(a[:WSZ - 1])\n', (377, 390), True, 'import numpy as np\n'), ((412, 434), 'numpy.cumsum', 'np.cumsum', (['a[:-WSZ:-1]'], {}), '(a[:-WSZ:-1])\n', (421, 434), True, 'import numpy as np\n')]
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.platform import flags
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
FLAGS = flags.FLAGS
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def kl_divergence(mu, log_sigma):
"""KL divergence of diagonal gaussian N(mu,exp(log_sigma)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_sigma: log(sigma) parameter of the distribution.
Returns:
the KL loss.
"""
return -.5 * tf.reduce_sum(1. + log_sigma - tf.square(mu) - tf.exp(log_sigma),
axis=1)
def construct_latent_tower(images):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
Returns:
latent_mean: predicted latent mean
latent_std: predicted latent standard deviation
latent_loss: loss of the latent twoer
samples: random samples sampled from standard guassian
"""
with slim.arg_scope([slim.conv2d], reuse=False):
stacked_images = tf.concat(images, 3)
latent_enc1 = slim.conv2d(
stacked_images,
32, [3, 3],
stride=2,
scope='latent_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm1'})
latent_enc2 = slim.conv2d(
latent_enc1,
64, [3, 3],
stride=2,
scope='latent_conv2',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm2'})
latent_enc3 = slim.conv2d(
latent_enc2,
64, [3, 3],
stride=1,
scope='latent_conv3',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm3'})
latent_mean = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
activation_fn=None,
scope='latent_mean',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm_mean'})
latent_std = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
scope='latent_std',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_std_norm'})
latent_std += FLAGS.latent_std_min
divergence = kl_divergence(latent_mean, latent_std)
latent_loss = tf.reduce_mean(divergence)
if FLAGS.multi_latent:
# timestep x batch_size x latent_size
samples = tf.random_normal(
[FLAGS.sequence_length-1] + latent_mean.shape, 0, 1,
dtype=tf.float32)
else:
# batch_size x latent_size
samples = tf.random_normal(latent_mean.shape, 0, 1, dtype=tf.float32)
if FLAGS.inference_time:
# No latent tower at inference time, just standard gaussian.
return None, None, None, samples
else:
return latent_mean, latent_std, latent_loss, samples
def construct_model(images,
actions=None,
states=None,
iter_num=-1.0,
k=-1,
use_state=True,
num_masks=10,
stp=False,
cdna=True,
dna=False,
context_frames=2):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
# Each image is being used twice, in latent tower and main tower.
# This is to make sure we are using the *same* image for both, ...
# ... given how TF queues work.
images = [tf.identity(image) for image in images]
if stp + cdna + dna != 1:
raise ValueError('More than one, or no network option specified.')
batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4]
lstm_func = basic_conv_lstm_cell
# Generated robot states and images.
gen_states, gen_images = [], []
current_state = states[0]
if k == -1:
feedself = True
else:
# Scheduled sampling:
# Calculate number of ground-truth frames to pass in.
num_ground_truth = tf.to_int32(
tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k)))))
feedself = False
# LSTM state sizes and states.
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32]))
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
lstm_state5, lstm_state6, lstm_state7 = None, None, None
# Latent tower
latent_loss = 0.0
if FLAGS.stochastic_model:
latent_tower_outputs = construct_latent_tower(images)
latent_mean, latent_std, latent_loss, samples = latent_tower_outputs
# Main tower
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep.
reuse = bool(gen_images)
done_warm_start = len(gen_images) > context_frames - 1
with slim.arg_scope(
[lstm_func, slim.layers.conv2d, slim.layers.fully_connected,
tf_layers.layer_norm, slim.layers.conv2d_transpose],
reuse=reuse):
if feedself and done_warm_start:
# Feed in generated image.
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size,
num_ground_truth)
else:
# Always feed in ground_truth
prev_image = image
# Predicted state is always fed back in
state_action = tf.concat(axis=1, values=[action, current_state])
enc0 = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1'})
hidden1, lstm_state1 = lstm_func(
enc0, lstm_state1, lstm_size[0], scope='state1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden2, lstm_state2 = lstm_func(
hidden1, lstm_state2, lstm_size[1], scope='state2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
enc1 = slim.layers.conv2d(
hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2')
hidden3, lstm_state3 = lstm_func(
enc1, lstm_state3, lstm_size[2], scope='state3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4')
hidden4, lstm_state4 = lstm_func(
hidden3, lstm_state4, lstm_size[3], scope='state4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5')
enc2 = slim.layers.conv2d(
hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3')
# Pass in state and action.
smear = tf.reshape(
state_action,
[int(batch_size), 1, 1, int(state_action.get_shape()[1])])
smear = tf.tile(
smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1])
if use_state:
enc2 = tf.concat(axis=3, values=[enc2, smear])
# Setup latent
if FLAGS.stochastic_model:
latent = samples
if FLAGS.multi_latent:
latent = samples[timestep]
if not FLAGS.inference_time:
latent = tf.cond(iter_num < FLAGS.num_iterations_1st_stage,
lambda: tf.identity(latent),
lambda: latent_mean + tf.exp(latent_std / 2.0) * latent)
with tf.control_dependencies([latent]):
enc2 = tf.concat([enc2, latent], 3)
enc3 = slim.layers.conv2d(
enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4')
hidden5, lstm_state5 = lstm_func(
enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6')
enc4 = slim.layers.conv2d_transpose(
hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1')
hidden6, lstm_state6 = lstm_func(
enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16
hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7')
# Skip connection.
hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
enc5 = slim.layers.conv2d_transpose(
hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2')
hidden7, lstm_state7 = lstm_func(
enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32
hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8')
# Skip connection.
hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
enc6 = slim.layers.conv2d_transpose(
hidden7,
hidden7.get_shape()[3], 3, stride=2, scope='convt3', activation_fn=None,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9'})
if dna:
# Using largest hidden state for predicting untied conv kernels.
enc7 = slim.layers.conv2d_transpose(
enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4', activation_fn=None)
else:
# Using largest hidden state for predicting a new image layer.
enc7 = slim.layers.conv2d_transpose(
enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None)
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
transformed = [tf.nn.sigmoid(enc7)]
if stp:
stp_input0 = tf.reshape(hidden5, [int(batch_size), -1])
stp_input1 = slim.layers.fully_connected(
stp_input0, 100, scope='fc_stp')
transformed += stp_transformation(prev_image, stp_input1, num_masks)
elif cdna:
cdna_input = tf.reshape(hidden5, [int(batch_size), -1])
transformed += cdna_transformation(prev_image, cdna_input, num_masks,
int(color_channels))
elif dna:
# Only one mask is supported (more should be unnecessary).
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
transformed = [dna_transformation(prev_image, enc7)]
masks = slim.layers.conv2d_transpose(
enc6, num_masks + 1, 1, stride=1, scope='convt7', activation_fn=None)
masks = tf.reshape(
tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),
[int(batch_size), int(img_height), int(img_width), num_masks + 1])
mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks)
output = mask_list[0] * prev_image
for layer, mask in zip(transformed, mask_list[1:]):
output += layer * mask
gen_images.append(output)
current_state = slim.layers.fully_connected(
state_action,
int(current_state.get_shape()[1]),
scope='state_pred',
activation_fn=None)
gen_states.append(current_state)
return gen_images, gen_states, latent_loss
## Utility functions
def stp_transformation(prev_image, stp_input, num_masks):
"""Apply spatial transformer predictor (STP) to previous image.
Args:
prev_image: previous image to be transformed.
stp_input: hidden layer to be used for computing STN parameters.
num_masks: number of masks and hence the number of STP transformations.
Returns:
List of images transformed by the predicted STP parameters.
"""
# Only import spatial transformer if needed.
from spatial_transformer import transformer
identity_params = tf.convert_to_tensor(
np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
transformed = []
for i in range(num_masks - 1):
params = slim.layers.fully_connected(
stp_input, 6, scope='stp_params' + str(i),
activation_fn=None) + identity_params
transformed.append(transformer(prev_image, params))
return transformed
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: the number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = int(cdna_input.get_shape()[0])
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = slim.layers.fully_connected(
cdna_input,
DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks,
scope='cdna_params',
activation_fn=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks])
cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')
# Transpose the dimensions to where they belong.
transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
def dna_transformation(prev_image, dna_input):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(DNA_KERN_SIZE):
for ykern in range(DNA_KERN_SIZE):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT
kernel = tf.expand_dims(
kernel / tf.reduce_sum(
kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""Sample batch with specified mix of ground truth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
idx = tf.random_shuffle(tf.range(int(batch_size)))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
return tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
|
[
"tensorflow.unstack",
"tensorflow.pad",
"tensorflow.transpose",
"tensorflow.contrib.slim.arg_scope",
"spatial_transformer.transformer",
"tensorflow.reduce_sum",
"tensorflow.split",
"numpy.array",
"tensorflow.control_dependencies",
"tensorflow.contrib.slim.layers.conv2d",
"tensorflow.reduce_mean",
"tensorflow.slice",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.random_normal",
"tensorflow.dynamic_stitch",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.square",
"tensorflow.range",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.contrib.slim.layers.conv2d_transpose",
"tensorflow.contrib.slim.layers.fully_connected",
"tensorflow.nn.relu",
"tensorflow.to_float",
"tensorflow.exp",
"tensorflow.identity",
"tensorflow.contrib.layers.python.layers.layer_norm",
"tensorflow.contrib.slim.conv2d"
] |
[((14752, 14879), 'tensorflow.contrib.slim.layers.fully_connected', 'slim.layers.fully_connected', (['cdna_input', '(DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks)'], {'scope': '"""cdna_params"""', 'activation_fn': 'None'}), "(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE *\n num_masks, scope='cdna_params', activation_fn=None)\n", (14779, 14879), True, 'import tensorflow.contrib.slim as slim\n'), ((14944, 15029), 'tensorflow.reshape', 'tf.reshape', (['cdna_kerns', '[batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]'], {}), '(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]\n )\n', (14954, 15029), True, 'import tensorflow as tf\n'), ((15112, 15164), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cdna_kerns', '[1, 2, 3]'], {'keep_dims': '(True)'}), '(cdna_kerns, [1, 2, 3], keep_dims=True)\n', (15125, 15164), True, 'import tensorflow as tf\n'), ((15475, 15516), 'tensorflow.transpose', 'tf.transpose', (['cdna_kerns', '[1, 2, 0, 4, 3]'], {}), '(cdna_kerns, [1, 2, 0, 4, 3])\n', (15487, 15516), True, 'import tensorflow as tf\n'), ((15532, 15609), 'tensorflow.reshape', 'tf.reshape', (['cdna_kerns', '[DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]'], {}), '(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks])\n', (15542, 15609), True, 'import tensorflow as tf\n'), ((15668, 15706), 'tensorflow.transpose', 'tf.transpose', (['prev_image', '[3, 1, 2, 0]'], {}), '(prev_image, [3, 1, 2, 0])\n', (15680, 15706), True, 'import tensorflow as tf\n'), ((15745, 15813), 'tensorflow.nn.depthwise_conv2d', 'tf.nn.depthwise_conv2d', (['prev_image', 'cdna_kerns', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')\n", (15767, 15813), True, 'import tensorflow as tf\n'), ((15882, 15961), 'tensorflow.reshape', 'tf.reshape', (['transformed', '[color_channels, height, width, batch_size, num_masks]'], {}), '(transformed, [color_channels, height, width, batch_size, num_masks])\n', (15892, 15961), True, 'import tensorflow as tf\n'), ((15978, 16020), 'tensorflow.transpose', 'tf.transpose', (['transformed', '[3, 1, 2, 0, 4]'], {}), '(transformed, [3, 1, 2, 0, 4])\n', (15990, 16020), True, 'import tensorflow as tf\n'), ((16037, 16069), 'tensorflow.unstack', 'tf.unstack', (['transformed'], {'axis': '(-1)'}), '(transformed, axis=-1)\n', (16047, 16069), True, 'import tensorflow as tf\n'), ((16458, 16510), 'tensorflow.pad', 'tf.pad', (['prev_image', '[[0, 0], [2, 2], [2, 2], [0, 0]]'], {}), '(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])\n', (16464, 16510), True, 'import tensorflow as tf\n'), ((16883, 16915), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': 'inputs'}), '(axis=3, values=inputs)\n', (16892, 16915), True, 'import tensorflow as tf\n'), ((17116, 17168), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(kernel * inputs)', '[3]'], {'keep_dims': '(False)'}), '(kernel * inputs, [3], keep_dims=False)\n', (17129, 17168), True, 'import tensorflow as tf\n'), ((17885, 17928), 'tensorflow.gather', 'tf.gather', (['ground_truth_x', 'ground_truth_idx'], {}), '(ground_truth_x, ground_truth_idx)\n', (17894, 17928), True, 'import tensorflow as tf\n'), ((17950, 17987), 'tensorflow.gather', 'tf.gather', (['generated_x', 'generated_idx'], {}), '(generated_x, generated_idx)\n', (17959, 17987), True, 'import tensorflow as tf\n'), ((17997, 18094), 'tensorflow.dynamic_stitch', 'tf.dynamic_stitch', (['[ground_truth_idx, generated_idx]', '[ground_truth_examps, generated_examps]'], {}), '([ground_truth_idx, generated_idx], [ground_truth_examps,\n generated_examps])\n', (18014, 18094), True, 'import tensorflow as tf\n'), ((2297, 2339), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.conv2d]'], {'reuse': '(False)'}), '([slim.conv2d], reuse=False)\n', (2311, 2339), True, 'import tensorflow.contrib.slim as slim\n'), ((2362, 2382), 'tensorflow.concat', 'tf.concat', (['images', '(3)'], {}), '(images, 3)\n', (2371, 2382), True, 'import tensorflow as tf\n'), ((2402, 2562), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['stacked_images', '(32)', '[3, 3]'], {'stride': '(2)', 'scope': '"""latent_conv1"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm1'}"}), "(stacked_images, 32, [3, 3], stride=2, scope='latent_conv1',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'latent_norm1'})\n", (2413, 2562), True, 'import tensorflow.contrib.slim as slim\n'), ((2623, 2780), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc1', '(64)', '[3, 3]'], {'stride': '(2)', 'scope': '"""latent_conv2"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm2'}"}), "(latent_enc1, 64, [3, 3], stride=2, scope='latent_conv2',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'latent_norm2'})\n", (2634, 2780), True, 'import tensorflow.contrib.slim as slim\n'), ((2841, 2998), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc2', '(64)', '[3, 3]'], {'stride': '(1)', 'scope': '"""latent_conv3"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm3'}"}), "(latent_enc2, 64, [3, 3], stride=1, scope='latent_conv3',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'latent_norm3'})\n", (2852, 2998), True, 'import tensorflow.contrib.slim as slim\n'), ((3059, 3259), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc3', 'FLAGS.latent_channels', '[3, 3]'], {'stride': '(2)', 'activation_fn': 'None', 'scope': '"""latent_mean"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm_mean'}"}), "(latent_enc3, FLAGS.latent_channels, [3, 3], stride=2,\n activation_fn=None, scope='latent_mean', normalizer_fn=tf_layers.\n layer_norm, normalizer_params={'scope': 'latent_norm_mean'})\n", (3070, 3259), True, 'import tensorflow.contrib.slim as slim\n'), ((3326, 3505), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc3', 'FLAGS.latent_channels', '[3, 3]'], {'stride': '(2)', 'scope': '"""latent_std"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_std_norm'}"}), "(latent_enc3, FLAGS.latent_channels, [3, 3], stride=2, scope=\n 'latent_std', normalizer_fn=tf_layers.layer_norm, normalizer_params={\n 'scope': 'latent_std_norm'})\n", (3337, 3505), True, 'import tensorflow.contrib.slim as slim\n'), ((3660, 3686), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['divergence'], {}), '(divergence)\n', (3674, 3686), True, 'import tensorflow as tf\n'), ((3769, 3862), 'tensorflow.random_normal', 'tf.random_normal', (['([FLAGS.sequence_length - 1] + latent_mean.shape)', '(0)', '(1)'], {'dtype': 'tf.float32'}), '([FLAGS.sequence_length - 1] + latent_mean.shape, 0, 1,\n dtype=tf.float32)\n', (3785, 3862), True, 'import tensorflow as tf\n'), ((3927, 3986), 'tensorflow.random_normal', 'tf.random_normal', (['latent_mean.shape', '(0)', '(1)'], {'dtype': 'tf.float32'}), '(latent_mean.shape, 0, 1, dtype=tf.float32)\n', (3943, 3986), True, 'import tensorflow as tf\n'), ((5811, 5829), 'tensorflow.identity', 'tf.identity', (['image'], {}), '(image)\n', (5822, 5829), True, 'import tensorflow as tf\n'), ((6488, 6527), 'numpy.array', 'np.array', (['[32, 32, 64, 64, 128, 64, 32]'], {}), '([32, 32, 64, 64, 128, 64, 32])\n', (6496, 6527), True, 'import numpy as np\n'), ((13722, 13774), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0, 1.0, 0.0]', 'np.float32'], {}), '([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)\n', (13730, 13774), True, 'import numpy as np\n'), ((15047, 15082), 'tensorflow.nn.relu', 'tf.nn.relu', (['(cdna_kerns - RELU_SHIFT)'], {}), '(cdna_kerns - RELU_SHIFT)\n', (15057, 15082), True, 'import tensorflow as tf\n'), ((16957, 16991), 'tensorflow.nn.relu', 'tf.nn.relu', (['(dna_input - RELU_SHIFT)'], {}), '(dna_input - RELU_SHIFT)\n', (16967, 16991), True, 'import tensorflow as tf\n'), ((17754, 17780), 'tensorflow.range', 'tf.range', (['num_ground_truth'], {}), '(num_ground_truth)\n', (17762, 17780), True, 'import tensorflow as tf\n'), ((7085, 7230), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[lstm_func, slim.layers.conv2d, slim.layers.fully_connected, tf_layers.\n layer_norm, slim.layers.conv2d_transpose]'], {'reuse': 'reuse'}), '([lstm_func, slim.layers.conv2d, slim.layers.fully_connected,\n tf_layers.layer_norm, slim.layers.conv2d_transpose], reuse=reuse)\n', (7099, 7230), True, 'import tensorflow.contrib.slim as slim\n'), ((7696, 7745), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[action, current_state]'}), '(axis=1, values=[action, current_state])\n', (7705, 7745), True, 'import tensorflow as tf\n'), ((7760, 7922), 'tensorflow.contrib.slim.layers.conv2d', 'slim.layers.conv2d', (['prev_image', '(32)', '[5, 5]'], {'stride': '(2)', 'scope': '"""scale1_conv1"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'layer_norm1'}"}), "(prev_image, 32, [5, 5], stride=2, scope='scale1_conv1',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'layer_norm1'})\n", (7778, 7922), True, 'import tensorflow.contrib.slim as slim\n'), ((8092, 8142), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden1'], {'scope': '"""layer_norm2"""'}), "(hidden1, scope='layer_norm2')\n", (8112, 8142), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((8261, 8311), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden2'], {'scope': '"""layer_norm3"""'}), "(hidden2, scope='layer_norm3')\n", (8281, 8311), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((8537, 8587), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden3'], {'scope': '"""layer_norm4"""'}), "(hidden3, scope='layer_norm4')\n", (8557, 8587), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((8706, 8756), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden4'], {'scope': '"""layer_norm5"""'}), "(hidden4, scope='layer_norm5')\n", (8726, 8756), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((9935, 9985), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden5'], {'scope': '"""layer_norm6"""'}), "(hidden5, scope='layer_norm6')\n", (9955, 9985), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((10226, 10276), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden6'], {'scope': '"""layer_norm7"""'}), "(hidden6, scope='layer_norm7')\n", (10246, 10276), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((10318, 10359), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[hidden6, enc1]'}), '(axis=3, values=[hidden6, enc1])\n', (10327, 10359), True, 'import tensorflow as tf\n'), ((10614, 10664), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden7'], {'scope': '"""layer_norm8"""'}), "(hidden7, scope='layer_norm8')\n", (10634, 10664), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((10707, 10748), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[hidden7, enc0]'}), '(axis=3, values=[hidden7, enc0])\n', (10716, 10748), True, 'import tensorflow as tf\n'), ((12360, 12463), 'tensorflow.contrib.slim.layers.conv2d_transpose', 'slim.layers.conv2d_transpose', (['enc6', '(num_masks + 1)', '(1)'], {'stride': '(1)', 'scope': '"""convt7"""', 'activation_fn': 'None'}), "(enc6, num_masks + 1, 1, stride=1, scope=\n 'convt7', activation_fn=None)\n", (12388, 12463), True, 'import tensorflow.contrib.slim as slim\n'), ((12656, 12719), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(num_masks + 1)', 'value': 'masks'}), '(axis=3, num_or_size_splits=num_masks + 1, value=masks)\n', (12664, 12719), True, 'import tensorflow as tf\n'), ((13990, 14021), 'spatial_transformer.transformer', 'transformer', (['prev_image', 'params'], {}), '(prev_image, params)\n', (14001, 14021), False, 'from spatial_transformer import transformer\n'), ((17047, 17089), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kernel', '[3]'], {'keep_dims': '(True)'}), '(kernel, [3], keep_dims=True)\n', (17060, 17089), True, 'import tensorflow as tf\n'), ((1452, 1469), 'tensorflow.exp', 'tf.exp', (['log_sigma'], {}), '(log_sigma)\n', (1458, 1469), True, 'import tensorflow as tf\n'), ((9155, 9194), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[enc2, smear]'}), '(axis=3, values=[enc2, smear])\n', (9164, 9194), True, 'import tensorflow as tf\n'), ((11112, 11220), 'tensorflow.contrib.slim.layers.conv2d_transpose', 'slim.layers.conv2d_transpose', (['enc6', '(DNA_KERN_SIZE ** 2)', '(1)'], {'stride': '(1)', 'scope': '"""convt4"""', 'activation_fn': 'None'}), "(enc6, DNA_KERN_SIZE ** 2, 1, stride=1, scope=\n 'convt4', activation_fn=None)\n", (11140, 11220), True, 'import tensorflow.contrib.slim as slim\n'), ((11325, 11429), 'tensorflow.contrib.slim.layers.conv2d_transpose', 'slim.layers.conv2d_transpose', (['enc6', 'color_channels', '(1)'], {'stride': '(1)', 'scope': '"""convt4"""', 'activation_fn': 'None'}), "(enc6, color_channels, 1, stride=1, scope=\n 'convt4', activation_fn=None)\n", (11353, 11429), True, 'import tensorflow.contrib.slim as slim\n'), ((11728, 11788), 'tensorflow.contrib.slim.layers.fully_connected', 'slim.layers.fully_connected', (['stp_input0', '(100)'], {'scope': '"""fc_stp"""'}), "(stp_input0, 100, scope='fc_stp')\n", (11755, 11788), True, 'import tensorflow.contrib.slim as slim\n'), ((1436, 1449), 'tensorflow.square', 'tf.square', (['mu'], {}), '(mu)\n', (1445, 1449), True, 'import tensorflow as tf\n'), ((6349, 6372), 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), '(batch_size)\n', (6360, 6372), True, 'import tensorflow as tf\n'), ((9611, 9644), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[latent]'], {}), '([latent])\n', (9634, 9644), True, 'import tensorflow as tf\n'), ((9663, 9691), 'tensorflow.concat', 'tf.concat', (['[enc2, latent]', '(3)'], {}), '([enc2, latent], 3)\n', (9672, 9691), True, 'import tensorflow as tf\n'), ((11607, 11626), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['enc7'], {}), '(enc7)\n', (11620, 11626), True, 'import tensorflow as tf\n'), ((12520, 12558), 'tensorflow.reshape', 'tf.reshape', (['masks', '[-1, num_masks + 1]'], {}), '(masks, [-1, num_masks + 1])\n', (12530, 12558), True, 'import tensorflow as tf\n'), ((16758, 16845), 'tensorflow.slice', 'tf.slice', (['prev_image_pad', '[0, xkern, ykern, 0]', '[-1, image_height, image_width, -1]'], {}), '(prev_image_pad, [0, xkern, ykern, 0], [-1, image_height,\n image_width, -1])\n', (16766, 16845), True, 'import tensorflow as tf\n'), ((6385, 6405), 'tensorflow.exp', 'tf.exp', (['(iter_num / k)'], {}), '(iter_num / k)\n', (6391, 6405), True, 'import tensorflow as tf\n'), ((9493, 9512), 'tensorflow.identity', 'tf.identity', (['latent'], {}), '(latent)\n', (9504, 9512), True, 'import tensorflow as tf\n'), ((9563, 9587), 'tensorflow.exp', 'tf.exp', (['(latent_std / 2.0)'], {}), '(latent_std / 2.0)\n', (9569, 9587), True, 'import tensorflow as tf\n')]
|
# noqa: D100
from typing import Optional
import numpy as np
import xarray
from xclim.core.units import (
convert_units_to,
declare_units,
pint_multiply,
rate2amount,
units,
units2pint,
)
from xclim.core.utils import ensure_chunk_size
from ._multivariate import (
daily_temperature_range,
extreme_temperature_range,
precip_accumulation,
)
from ._simple import tg_mean
from .generic import select_resample_op
from .run_length import lazy_indexing
# Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start
# See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
# -------------------------------------------------- #
# ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! #
# -------------------------------------------------- #
__all__ = [
"temperature_seasonality",
"precip_seasonality",
"tg_mean_warmcold_quarter",
"tg_mean_wetdry_quarter",
"prcptot_wetdry_quarter",
"prcptot_warmcold_quarter",
"prcptot",
"prcptot_wetdry_period",
"isothermality",
]
_xr_argops = {
"wettest": xarray.DataArray.argmax,
"warmest": xarray.DataArray.argmax,
"dryest": xarray.DataArray.argmin,
"driest": xarray.DataArray.argmin,
"coldest": xarray.DataArray.argmin,
}
_np_ops = {
"wettest": "max",
"warmest": "max",
"dryest": "min",
"driest": "min",
"coldest": "min",
}
@declare_units(tasmin="[temperature]", tasmax="[temperature]")
def isothermality(
tasmin: xarray.DataArray, tasmax: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray:
r"""Isothermality.
The mean diurnal range divided by the annual temperature range.
Parameters
----------
tasmin : xarray.DataArray
Average daily minimum temperature at daily, weekly, or monthly frequency.
tasmax : xarray.DataArray
Average daily maximum temperature at daily, weekly, or monthly frequency.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [%]
Isothermality
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the output with input data with daily frequency as well. As such weekly or monthly input values, if desired, should
be calculated prior to calling the function.
"""
dtr = daily_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
etr = extreme_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
with xarray.set_options(keep_attrs=True):
iso = dtr / etr * 100
iso.attrs["units"] = "%"
return iso
@declare_units(tas="[temperature]")
def temperature_seasonality(tas: xarray.DataArray) -> xarray.DataArray:
r"""ANUCLIM temperature seasonality (coefficient of variation).
The annual temperature coefficient of variation expressed in percent. Calculated as the standard deviation
of temperature values for a given year expressed as a percentage of the mean of those temperatures.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
Returns
-------
xarray.DataArray, [%]
Mean temperature coefficient of variation
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature seasonality:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file).tas
>>> tday_seasonality = xci.temperature_seasonality(t)
>>> t_weekly = xci.tg_mean(t, freq='7D')
>>> tweek_seasonality = xci.temperature_seasonality(t_weekly)
Notes
-----
For this calculation, the mean in degrees Kelvin is used. This avoids the possibility of having to
divide by zero, but it does mean that the values are usually quite small.
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired, should be
calculated prior to calling the function.
"""
tas = convert_units_to(tas, "K")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(tas)
seas.attrs["units"] = "%"
return seas
@declare_units(pr="[precipitation]")
def precip_seasonality(
pr: xarray.DataArray,
) -> xarray.DataArray:
r"""ANUCLIM Precipitation Seasonality (C of V).
The annual precipitation Coefficient of Variation (C of V) expressed in percent. Calculated as the standard deviation
of precipitation values for a given year expressed as a percentage of the mean of those values.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
Units need to be defined as a rate (e.g. mm d-1, mm week-1).
Returns
-------
xarray.DataArray, [%]
Precipitation coefficient of variation
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual precipitation seasonality:
>>> import xclim.indices as xci
>>> p = xr.open_dataset(path_to_pr_file).pr
>>> pday_seasonality = xci.precip_seasonality(p)
>>> p_weekly = xci.precip_accumulation(p, freq='7D')
# Input units need to be a rate
>>> p_weekly.attrs['units'] = "mm/week"
>>> pweek_seasonality = xci.precip_seasonality(p_weekly)
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
If input units are in mm s-1 (or equivalent) values are converted to mm/day to avoid potentially small denominator
values.
"""
# If units in mm/sec convert to mm/days to avoid potentially small denominator
if units2pint(pr) == units("mm / s"):
pr = convert_units_to(pr, "mm d-1")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(pr)
seas.attrs["units"] = "%"
return seas
@declare_units(tas="[temperature]")
def tg_mean_warmcold_quarter(
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the mean temperature of this period is
calculated. If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods,
otherwise as 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : str {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate warmest quarter; 'coldest' calculate coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quearter of each year.
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature
warmest quarter mean temperature:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file)
>>> t_warm_qrt = xci.tg_mean_warmcold_quarter(tas=t.tas, op='warmest', src_timestep='daily')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
out = _to_quarter(src_timestep, tas=tas)
oper = _np_ops[op]
out = select_resample_op(out, oper, freq)
out.attrs["units"] = tas.units
return out
@declare_units(tas="[temperature]", pr="[precipitation]")
def tg_mean_wetdry_quarter(
tas: xarray.DataArray,
pr: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the mean temperature of this period is calculated.
If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform: 'wettest' calculate for the wettest quarter; 'driest' calculate for the driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quarter of each year.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
tas_qrt = _to_quarter(src_timestep, tas=tas)
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
with xarray.set_options(keep_attrs=True):
out = _from_other_arg(criteria=pr_qrt, output=tas_qrt, op=xr_op, freq=freq)
out.attrs = tas.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot_wetdry_quarter(
pr: xarray.DataArray, op: str = None, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the total precipitation of this
period is calculated. If the input data frequency is daily ("D") or weekly ("W") quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest quarter ; 'driest' calculate driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation values of the {op} quarter of each year.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual wettest quarter total precipitation:
>>> from xclim.indices import prcptot_wetdry_quarter
>>> p = xr.open_dataset(path_to_pr_file)
>>> pr_warm_qrt = prcptot_wetdry_quarter(pr=p.pr, op='wettest', src_timestep='D')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
try:
oper = _np_ops[op]
except KeyError:
raise NotImplementedError(
f'Unknown operation "{op}" ; not one of "wettest" or "driest"'
)
out = select_resample_op(pr_qrt, oper, freq)
out.attrs["units"] = pr_qrt.units
return out
@declare_units(pr="[precipitation]", tas="[temperature]")
def prcptot_warmcold_quarter(
pr: xarray.DataArray,
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the total
precipitation of this period is calculated. If the input data frequency is daily ("D) or weekly ("W"), quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate for the warmest quarter ; 'coldest' calculate for the coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray : [mm]
Total precipitation values of the {op} quarter of each year
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# determine input data frequency
tas_qrt = _to_quarter(src_timestep, tas=tas)
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
out = _from_other_arg(criteria=tas_qrt, output=pr_qrt, op=xr_op, freq=freq)
out.attrs = pr_qrt.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot(
pr: xarray.DataArray, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Accumulated total precipitation.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well.
"""
pram = rate2amount(pr)
return pram.resample(time=freq).sum(dim="time", keep_attrs=True)
# FIXME: src_timestep is not used here.
@declare_units(pr="[precipitation]")
def prcptot_wetdry_period(
pr: xarray.DataArray, *, op: str, src_timestep: str, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM precipitation of the wettest/driest day, week, or month, depending on the time step.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest period ; 'driest' calculate driest period.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation of the {op} period.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
pram = rate2amount(pr)
if op == "wettest":
return pram.resample(time=freq).max(dim="time", keep_attrs=True)
if op == "driest":
return pram.resample(time=freq).min(dim="time", keep_attrs=True)
raise NotImplementedError(
f'Unknown operation "{op}" ; op parameter but be one of "wettest" or "driest"'
)
def _anuclim_coeff_var(arr: xarray.DataArray) -> xarray.DataArray:
"""Calculate the annual coefficient of variation for ANUCLIM indices."""
std = arr.resample(time="YS").std(dim="time")
mu = arr.resample(time="YS").mean(dim="time")
return std / mu
def _from_other_arg(
criteria: xarray.DataArray, output: xarray.DataArray, op, freq: str
) -> xarray.DataArray:
"""Pick values from output based on operation returning an index from criteria.
Parameters
----------
criteria : DataArray
Series on which operation returning index is applied.
output : DataArray
Series to be indexed.
op : func
Function returning an index, for example np.argmin, np.argmax, np.nanargmin, np.nanargmax.
freq : str
Temporal grouping.
Returns
-------
DataArray
Output values where criteria is met at the given frequency.
"""
ds = xarray.Dataset(data_vars={"criteria": criteria, "output": output})
dim = "time"
def get_other_op(dataset):
all_nans = dataset.criteria.isnull().all(dim=dim)
index = op(dataset.criteria.where(~all_nans, 0), dim=dim)
return lazy_indexing(dataset.output, index=index, dim=dim).where(~all_nans)
return ds.resample(time=freq).map(get_other_op)
def _to_quarter(
freq: str,
pr: Optional[xarray.DataArray] = None,
tas: Optional[xarray.DataArray] = None,
) -> xarray.DataArray:
"""Convert daily, weekly or monthly time series to quarterly time series according to ANUCLIM specifications."""
if freq.upper().startswith("D"):
if tas is not None:
tas = tg_mean(tas, freq="7D")
if pr is not None:
# Accumulate on a week
# Ensure units are back to a "rate" for rate2amount below
pr = convert_units_to(precip_accumulation(pr, freq="7D"), "mm")
pr.attrs["units"] = "mm/week"
freq = "W"
if freq.upper().startswith("W"):
window = 13
elif freq.upper().startswith("M"):
window = 3
else:
raise NotImplementedError(
f'Unknown input time frequency "{freq}": must be one of "D", "W" or "M".'
)
if tas is not None:
tas = ensure_chunk_size(tas, time=np.ceil(window / 2))
if pr is not None:
pr = ensure_chunk_size(pr, time=np.ceil(window / 2))
if pr is not None:
pram = rate2amount(pr)
out = pram.rolling(time=window, center=False).sum()
out.attrs = pr.attrs
out.attrs["units"] = pram.units
if tas is not None:
out = tas.rolling(time=window, center=False).mean(skipna=False)
out.attrs = tas.attrs
out = ensure_chunk_size(out, time=-1)
return out
|
[
"xclim.core.units.units",
"numpy.ceil",
"xclim.core.units.units2pint",
"xclim.core.units.convert_units_to",
"xarray.Dataset",
"xclim.core.units.rate2amount",
"xclim.core.utils.ensure_chunk_size",
"xclim.core.units.declare_units",
"xarray.set_options"
] |
[((1421, 1482), 'xclim.core.units.declare_units', 'declare_units', ([], {'tasmin': '"""[temperature]"""', 'tasmax': '"""[temperature]"""'}), "(tasmin='[temperature]', tasmax='[temperature]')\n", (1434, 1482), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((2760, 2794), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""'}), "(tas='[temperature]')\n", (2773, 2794), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((4548, 4583), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (4561, 4583), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6563, 6597), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""'}), "(tas='[temperature]')\n", (6576, 6597), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((8529, 8585), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'pr': '"""[precipitation]"""'}), "(tas='[temperature]', pr='[precipitation]')\n", (8542, 8585), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((10436, 10471), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (10449, 10471), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((12570, 12626), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""', 'tas': '"""[temperature]"""'}), "(pr='[precipitation]', tas='[temperature]')\n", (12583, 12626), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((14495, 14530), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (14508, 14530), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((15506, 15541), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (15519, 15541), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((4379, 4405), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""K"""'], {}), "(tas, 'K')\n", (4395, 4405), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((15378, 15393), 'xclim.core.units.rate2amount', 'rate2amount', (['pr'], {}), '(pr)\n', (15389, 15393), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((16719, 16734), 'xclim.core.units.rate2amount', 'rate2amount', (['pr'], {}), '(pr)\n', (16730, 16734), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((17961, 18027), 'xarray.Dataset', 'xarray.Dataset', ([], {'data_vars': "{'criteria': criteria, 'output': output}"}), "(data_vars={'criteria': criteria, 'output': output})\n", (17975, 18027), False, 'import xarray\n'), ((19730, 19761), 'xclim.core.utils.ensure_chunk_size', 'ensure_chunk_size', (['out'], {'time': '(-1)'}), '(out, time=-1)\n', (19747, 19761), False, 'from xclim.core.utils import ensure_chunk_size\n'), ((2642, 2677), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (2660, 2677), False, 'import xarray\n'), ((4416, 4451), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (4434, 4451), False, 'import xarray\n'), ((6343, 6357), 'xclim.core.units.units2pint', 'units2pint', (['pr'], {}), '(pr)\n', (6353, 6357), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6361, 6376), 'xclim.core.units.units', 'units', (['"""mm / s"""'], {}), "('mm / s')\n", (6366, 6376), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6391, 6421), 'xclim.core.units.convert_units_to', 'convert_units_to', (['pr', '"""mm d-1"""'], {}), "(pr, 'mm d-1')\n", (6407, 6421), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6432, 6467), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (6450, 6467), False, 'import xarray\n'), ((10263, 10298), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (10281, 10298), False, 'import xarray\n'), ((19447, 19462), 'xclim.core.units.rate2amount', 'rate2amount', (['pr'], {}), '(pr)\n', (19458, 19462), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((19303, 19322), 'numpy.ceil', 'np.ceil', (['(window / 2)'], {}), '(window / 2)\n', (19310, 19322), True, 'import numpy as np\n'), ((19387, 19406), 'numpy.ceil', 'np.ceil', (['(window / 2)'], {}), '(window / 2)\n', (19394, 19406), True, 'import numpy as np\n')]
|
import os,sys
import webbrowser
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pylab as plt
from matplotlib import ticker
plt.rcParams['font.family'] = 'monospace'
fig = plt.figure()
rect = fig.add_subplot(111, aspect='equal')
data0 = np.loadtxt('data0.dat', delimiter=',')
data1 = np.loadtxt('data1.dat', delimiter=',')
dense = np.loadtxt('dense.dat', delimiter=',')
ID = sys.argv[1]
X = np.arange(-2.0, 2.05, 0.05)
Y = np.arange(-2.0, 2.05, 0.05)
Xm, Ym = np.meshgrid(X, Y)
vmin, vmax = dense.min(), dense.max()
if vmin * vmax < 0:
vmin = -abs(max(-vmin, vmax))
vmax = +abs(max(-vmin, vmax))
cr = rect.imshow(dense.reshape((len(Y), len(X))), extent=(X[0], X[-1], Y[0], Y[-1]), vmin=vmin, vmax=vmax, cmap=cm.coolwarm, origin='lower')
plt.contour(Xm, Ym, dense, levels=[-1, 1], cmap=cm.bwr, linestyles='dashed', linewidths=[2,2])
plt.contour(Xm, Ym, dense, levels=[0], colors='black', linestyles='dashed', linewidths=[2])
cb = plt.colorbar(cr, format='%+.1e')
cb.solids.set_edgecolor('face')
cb.set_ticks(ticker.LinearLocator(6))
cb.ax.tick_params(labelsize=12)
rect.scatter(data0[:,0], data0[:,1], marker='v', facecolor='red', edgecolor='black', s=30, lw=1)
rect.scatter(data1[:,0], data1[:,1], marker='^', facecolor='blue', edgecolor='black', s=30, lw=1)
plt.xlim(X[0], X[-1])
plt.ylim(Y[0], Y[-1])
plt.xlabel("")
plt.ylabel("")
plt.grid(ls='dotted')
plt.savefig('{}.svg'.format(ID), bbox_inches='tight', pad_inches=0.1)
plt.savefig('{}.eps'.format(ID), bbox_inches='tight', pad_inches=0.1)
os.remove('dense.dat')
os.remove('data0.dat')
os.remove('data1.dat')
webbrowser.open('file://{}'.format(os.path.realpath('{}.svg'.format(sys.argv[1]))))
|
[
"matplotlib.pylab.xlim",
"matplotlib.pylab.grid",
"matplotlib.pylab.figure",
"matplotlib.use",
"matplotlib.pylab.contour",
"matplotlib.pylab.ylim",
"matplotlib.ticker.LinearLocator",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.colorbar",
"os.remove",
"numpy.meshgrid",
"numpy.loadtxt",
"numpy.arange",
"matplotlib.pylab.ylabel"
] |
[((69, 90), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (83, 90), False, 'import matplotlib\n'), ((228, 240), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (238, 240), True, 'import matplotlib.pylab as plt\n'), ((293, 331), 'numpy.loadtxt', 'np.loadtxt', (['"""data0.dat"""'], {'delimiter': '""","""'}), "('data0.dat', delimiter=',')\n", (303, 331), True, 'import numpy as np\n'), ((340, 378), 'numpy.loadtxt', 'np.loadtxt', (['"""data1.dat"""'], {'delimiter': '""","""'}), "('data1.dat', delimiter=',')\n", (350, 378), True, 'import numpy as np\n'), ((387, 425), 'numpy.loadtxt', 'np.loadtxt', (['"""dense.dat"""'], {'delimiter': '""","""'}), "('dense.dat', delimiter=',')\n", (397, 425), True, 'import numpy as np\n'), ((447, 474), 'numpy.arange', 'np.arange', (['(-2.0)', '(2.05)', '(0.05)'], {}), '(-2.0, 2.05, 0.05)\n', (456, 474), True, 'import numpy as np\n'), ((479, 506), 'numpy.arange', 'np.arange', (['(-2.0)', '(2.05)', '(0.05)'], {}), '(-2.0, 2.05, 0.05)\n', (488, 506), True, 'import numpy as np\n'), ((516, 533), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (527, 533), True, 'import numpy as np\n'), ((795, 894), 'matplotlib.pylab.contour', 'plt.contour', (['Xm', 'Ym', 'dense'], {'levels': '[-1, 1]', 'cmap': 'cm.bwr', 'linestyles': '"""dashed"""', 'linewidths': '[2, 2]'}), "(Xm, Ym, dense, levels=[-1, 1], cmap=cm.bwr, linestyles='dashed',\n linewidths=[2, 2])\n", (806, 894), True, 'import matplotlib.pylab as plt\n'), ((890, 985), 'matplotlib.pylab.contour', 'plt.contour', (['Xm', 'Ym', 'dense'], {'levels': '[0]', 'colors': '"""black"""', 'linestyles': '"""dashed"""', 'linewidths': '[2]'}), "(Xm, Ym, dense, levels=[0], colors='black', linestyles='dashed',\n linewidths=[2])\n", (901, 985), True, 'import matplotlib.pylab as plt\n'), ((987, 1019), 'matplotlib.pylab.colorbar', 'plt.colorbar', (['cr'], {'format': '"""%+.1e"""'}), "(cr, format='%+.1e')\n", (999, 1019), True, 'import matplotlib.pylab as plt\n'), ((1318, 1339), 'matplotlib.pylab.xlim', 'plt.xlim', (['X[0]', 'X[-1]'], {}), '(X[0], X[-1])\n', (1326, 1339), True, 'import matplotlib.pylab as plt\n'), ((1340, 1361), 'matplotlib.pylab.ylim', 'plt.ylim', (['Y[0]', 'Y[-1]'], {}), '(Y[0], Y[-1])\n', (1348, 1361), True, 'import matplotlib.pylab as plt\n'), ((1362, 1376), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (1372, 1376), True, 'import matplotlib.pylab as plt\n'), ((1377, 1391), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (1387, 1391), True, 'import matplotlib.pylab as plt\n'), ((1392, 1413), 'matplotlib.pylab.grid', 'plt.grid', ([], {'ls': '"""dotted"""'}), "(ls='dotted')\n", (1400, 1413), True, 'import matplotlib.pylab as plt\n'), ((1554, 1576), 'os.remove', 'os.remove', (['"""dense.dat"""'], {}), "('dense.dat')\n", (1563, 1576), False, 'import os, sys\n'), ((1577, 1599), 'os.remove', 'os.remove', (['"""data0.dat"""'], {}), "('data0.dat')\n", (1586, 1599), False, 'import os, sys\n'), ((1600, 1622), 'os.remove', 'os.remove', (['"""data1.dat"""'], {}), "('data1.dat')\n", (1609, 1622), False, 'import os, sys\n'), ((1065, 1088), 'matplotlib.ticker.LinearLocator', 'ticker.LinearLocator', (['(6)'], {}), '(6)\n', (1085, 1088), False, 'from matplotlib import ticker\n')]
|
import os
import itertools
import importlib
import numpy as np
import random
STRATEGY_FOLDER = "exampleStrats"
RESULTS_FILE = "results.txt"
pointsArray = [[1,5],[0,3]] # The i-j-th element of this array is how many points you receive if you do play i, and your opponent does play j.
moveLabels = ["D","C"]
# D = defect, betray, sabotage, free-ride, etc.
# C = cooperate, stay silent, comply, upload files, etc.
# Returns a 2-by-n numpy array. The first axis is which player (0 = us, 1 = opponent)
# The second axis is which turn. (0 = first turn, 1 = next turn, etc.
# For example, it might return
#
# [[0 0 1] a.k.a. D D C
# [1 1 1]] a.k.a. C C C
#
# if there have been 3 turns, and we have defected twice then cooperated once,
# and our opponent has cooperated all three times.
def getVisibleHistory(history, player, turn):
historySoFar = history[:,:turn].copy()
if player == 1:
historySoFar = np.flip(historySoFar,0)
return historySoFar
def runRound(pair):
moduleA = importlib.import_module(STRATEGY_FOLDER+"."+pair[0])
moduleB = importlib.import_module(STRATEGY_FOLDER+"."+pair[1])
memoryA = None
memoryB = None
LENGTH_OF_GAME = int(200-40*np.log(random.random())) # The games are a minimum of 50 turns long. The np.log here guarantees that every turn after the 50th has an equal (low) chance of being the final turn.
history = np.zeros((2,LENGTH_OF_GAME),dtype=int)
for turn in range(LENGTH_OF_GAME):
playerAmove, memoryA = moduleA.strategy(getVisibleHistory(history,0,turn),memoryA)
playerBmove, memoryB = moduleB.strategy(getVisibleHistory(history,1,turn),memoryB)
history[0,turn] = playerAmove
history[1,turn] = playerBmove
return history
def tallyRoundScores(history):
scoreA = 0
scoreB = 0
ROUND_LENGTH = history.shape[1]
for turn in range(ROUND_LENGTH):
playerAmove = history[0,turn]
playerBmove = history[1,turn]
scoreA += pointsArray[playerAmove][playerBmove]
scoreB += pointsArray[playerBmove][playerAmove]
return scoreA/ROUND_LENGTH, scoreB/ROUND_LENGTH
def outputRoundResults(f, pair, roundHistory, scoresA, scoresB):
f.write(pair[0]+" (P1) VS. "+pair[1]+" (P2)\n")
for p in range(2):
for t in range(roundHistory.shape[1]):
move = roundHistory[p,t]
f.write(moveLabels[move]+" ")
f.write("\n")
f.write("Final score for "+pair[0]+": "+str(scoresA)+"\n")
f.write("Final score for "+pair[1]+": "+str(scoresB)+"\n")
f.write("\n")
def pad(stri, leng):
result = stri
for i in range(len(stri),leng):
result = result+" "
return result
def runFullPairingTournament(inFolder, outFile):
print("Starting tournament, reading files from "+inFolder)
scoreKeeper = {}
STRATEGY_LIST = []
for file in os.listdir(inFolder):
if file.endswith(".py"):
STRATEGY_LIST.append(file[:-3])
for strategy in STRATEGY_LIST:
scoreKeeper[strategy] = 0
f = open(outFile,"w+")
for pair in itertools.combinations(STRATEGY_LIST, r=2):
roundHistory = runRound(pair)
scoresA, scoresB = tallyRoundScores(roundHistory)
outputRoundResults(f, pair, roundHistory, scoresA, scoresB)
scoreKeeper[pair[0]] += scoresA
scoreKeeper[pair[1]] += scoresB
scoresNumpy = np.zeros(len(scoreKeeper))
for i in range(len(STRATEGY_LIST)):
scoresNumpy[i] = scoreKeeper[STRATEGY_LIST[i]]
rankings = np.argsort(scoresNumpy)
f.write("\n\nTOTAL SCORES\n")
for rank in range(len(STRATEGY_LIST)):
i = rankings[-1-rank]
score = scoresNumpy[i]
scorePer = score/(len(STRATEGY_LIST)-1)
f.write("#"+str(rank+1)+": "+pad(STRATEGY_LIST[i]+":",16)+' %.3f'%score+' (%.3f'%scorePer+" average)\n")
f.flush()
f.close()
print("Done with everything! Results file written to "+RESULTS_FILE)
runFullPairingTournament(STRATEGY_FOLDER, RESULTS_FILE)
|
[
"numpy.flip",
"os.listdir",
"importlib.import_module",
"itertools.combinations",
"numpy.argsort",
"numpy.zeros",
"random.random"
] |
[((1069, 1125), 'importlib.import_module', 'importlib.import_module', (["(STRATEGY_FOLDER + '.' + pair[0])"], {}), "(STRATEGY_FOLDER + '.' + pair[0])\n", (1092, 1125), False, 'import importlib\n'), ((1137, 1193), 'importlib.import_module', 'importlib.import_module', (["(STRATEGY_FOLDER + '.' + pair[1])"], {}), "(STRATEGY_FOLDER + '.' + pair[1])\n", (1160, 1193), False, 'import importlib\n'), ((1462, 1502), 'numpy.zeros', 'np.zeros', (['(2, LENGTH_OF_GAME)'], {'dtype': 'int'}), '((2, LENGTH_OF_GAME), dtype=int)\n', (1470, 1502), True, 'import numpy as np\n'), ((2994, 3014), 'os.listdir', 'os.listdir', (['inFolder'], {}), '(inFolder)\n', (3004, 3014), False, 'import os\n'), ((3249, 3291), 'itertools.combinations', 'itertools.combinations', (['STRATEGY_LIST'], {'r': '(2)'}), '(STRATEGY_LIST, r=2)\n', (3271, 3291), False, 'import itertools\n'), ((3711, 3734), 'numpy.argsort', 'np.argsort', (['scoresNumpy'], {}), '(scoresNumpy)\n', (3721, 3734), True, 'import numpy as np\n'), ((982, 1006), 'numpy.flip', 'np.flip', (['historySoFar', '(0)'], {}), '(historySoFar, 0)\n', (989, 1006), True, 'import numpy as np\n'), ((1276, 1291), 'random.random', 'random.random', ([], {}), '()\n', (1289, 1291), False, 'import random\n')]
|
from polymath import UNSET_SHAPE, DEFAULT_SHAPES
import builtins
import operator
from collections import OrderedDict, Mapping, Sequence, deque
import functools
from numbers import Integral, Rational, Real
import contextlib
import traceback
import uuid
import numpy as np
import importlib
from .graph import Graph
from .domain import Domain
from .util import _noop_callback, _flatten_iterable, node_hash, \
_is_node_type_instance, is_iterable
class Node(object):
"""
Base class for nodes.
Parameters
----------
args : tuple
Positional arguments passed to the `_evaluate` method.
name : str or None
Name of the node or `None` to use a random, unique identifier.
shape : tuple or None
Shape of the output for a node. This can be a tuple of integers or parameter node names.
graph : Node or None
Parent graph of this node. If graph is `None`, this is the top-level graph.
op_name : str
Operation name which describes the node functionality.
value : Any or None
If a node has a default value to use for execution, it can be set using `value`.
kwargs : dict
Keyword arguments passed to the `_evaluate` method.
"""
_graph_stack = deque([None])
_eval_stack = []
stack_size = 5
evaluated_nodes = 0
def __init__(self, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
self.nodes = Graph()
self.value = value
self.dependencies = []
self._args = []
self._predeecessors = []
self._succesors = []
self.args = args
if "name" in kwargs:
kwargs.pop("name")
self.added_attrs = []
# TODO: CHange this to underscore private variable
self.kwargs = kwargs
self.graph = graph
self._shape = OrderedDict()
self.shape = shape or tuple([])
# Get a list of all dependencies relevant to this node
self.dependencies = [] if dependencies is None else dependencies
if self.graph:
self.dependencies.extend(self.graph.dependencies)
# Choose a name for the node and add the node to the graph
self._name = None
self.name = name or uuid.uuid4().hex
self._op_name = None
self.op_name = op_name
# Get the stack context so we can report where the node was defined
self._stack = traceback.extract_stack(limit=1)
@property
def graph(self):
"""
polymath.srdfg.graph.Graph : Parent graph of this node. If graph is `None`, this is the top-level graph.
"""
return self._graph
def preds(self):
return self._preds
def succs(self):
return self._preds
def add_predecessor(self, pred):
if isinstance(pred, Node):
self._predecessors.append(pred.gname)
else:
self._predecessors.append(pred)
def add_successor(self, succ):
if isinstance(succ, Node):
self._succesors.append(succ.gname)
else:
self._succesors.append(succ)
def set_edges(self):
for e in self.args:
self.add_predecessor(e)
if isinstance(e, Node):
e.add_successor(self)
@property
def domain(self):
return Domain(tuple([]))
@property
def args(self):
"""
tuple : Positional arguments which are used for executing this node.
"""
return tuple(self._args)
@property
def argnames(self):
return [a.name if isinstance(a, Node) else a for a in self.args]
@property
def shape(self):
"""
tuple : Shape of the output for a node. This can be a tuple of integers or parameter node names.
"""
return self._shape
@property
def var(self):
return self
@property
def name(self):
"""str : Unique name of the node"""
return self._name
@property
def op_name(self):
"""
str : Operation name which describes the node functionality.
"""
return self._op_name
@op_name.setter
def op_name(self, op_name):
if op_name:
self._op_name = op_name
elif self.__class__.__name__ == "Node":
self._op_name = self.name
else:
self._op_name = self.__class__.__name__
@name.setter
def name(self, name):
self.set_name(name)
@args.setter
def args(self, args):
new_args = []
for arg in args:
if isinstance(arg, Node):
if self.__class__.__name__ == "Node":
self.nodes[arg.name] = self.graph[arg.name]
new_args.append(arg)
self._args = tuple(new_args)
@shape.setter
def shape(self, shape):
self.set_shape(shape, init=True)
@graph.setter
def graph(self, graph):
self._graph = Node.get_active_graph(graph)
@property
def gname(self):
scope_names = [self.name]
cgraph = self.graph
while cgraph:
scope_names.append(cgraph.name)
cgraph = cgraph.graph
return "/".join(list(reversed(scope_names)))
def __enter__(self):
Node._graph_stack.append(self)
return self
def __exit__(self, *args):
assert self == Node._graph_stack.pop()
def __repr__(self):
return "<node '%s'>" % self.name
def add_attribute(self, key, value):
self.added_attrs.append(key)
self.kwargs[key] = value
def is_shape_finalized(self):
if self.shape == UNSET_SHAPE:
return False
for s in self.shape:
if not isinstance(s, Integral):
return False
return True
def set_shape(self, shape=None, init=False):
if isinstance(shape, float):
self._shape = tuple([np.int(shape)])
elif isinstance(shape, Integral):
self._shape = tuple([shape])
elif isinstance(shape, Node):
self._shape = tuple([shape])
elif not shape or len(shape) == 0:
# TODO: Change in order to enable "is shape finalized" to work
self._shape = UNSET_SHAPE
else:
shapes = []
for dim in shape:
if isinstance(dim, (Node, Integral)):
shapes.append(dim)
elif isinstance(dim, float):
shapes.append(int(dim))
else:
raise TypeError(f"Shape value must be placeholder or integer value for {self.name}\n"
f"\tDim: {dim}"
f"\n\t{self.kwargs} ")
self._shape = tuple(shapes)
@staticmethod
def get_active_graph(graph=None):
"""
Obtain the currently active graph instance by returning the explicitly given graph or using
the default graph.
Parameters
----------
graph : Node or None
Graph to return or `None` to use the default graph.
Raises
------
ValueError
If no `Graph` instance can be obtained.
"""
graph = graph or Node._graph_stack[-1]
return graph
def instantiate_node(self, node): # pylint:disable=W0621
"""
Instantiate nodes by retrieving the node object associated with the node name.
Parameters
----------
node : Node or str
Node instance or name of an node.
Returns
-------
instantiated_node : Node
Node instance.
Raises
------
ValueError
If `node` is not an `Node` instance or an node name.
RuntimeError
If `node` is an `Node` instance but does not belong to this graph.
"""
if isinstance(node, str):
return self.nodes[node]
if isinstance(node, Node):
if node.name not in self.nodes and (node.graph != self):
raise RuntimeError(f"node '{node}' does not belong to {self} graph, instead belongs to"
f" {node.graph}")
return node
raise ValueError(f"'{node}' is not an `Node` instance or node name")
def instantiate_graph(self, context, **kwargs):
"""
Instantiate a graph by replacing all node names with node instances.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
context : dict[Node or str, object]
Context whose keys are node instances or names.
kwargs : dict[str, object]
Additional context information keyed by variable name.
Returns
-------
normalized_context : dict[Node, object]
Normalized context whose keys are node instances.
Raises
------
ValueError
If the context specifies more than one value for any node.
ValueError
If `context` is not a mapping.
"""
if context is None:
context = {}
elif not isinstance(context, Mapping):
raise ValueError("`context` must be a mapping.")
nodes = list(context)
# Add the keyword arguments
for node in nodes: # pylint:disable=W0621
value = context.pop(node)
node = self.instantiate_node(node)
if node in context:
raise ValueError(f"duplicate unequal value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
for name, value in kwargs.items():
node = self.nodes[name]
if node in context:
raise ValueError(f"duplicate value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
return context
def run(self, fetches, context=None, *, callback=None, **kwargs):
"""
Evaluate one or more nodes given a dictionary of node names with their values.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
fetches : list[str or Node] or str or Node
One or more `Node` instances or names to evaluate.
context : dict or None
Context in which to evaluate the nodes.
callback : callable or None
Callback to be evaluated when an node is evaluated.
kwargs : dict
Additional context information keyed by variable name.
Returns
-------
values : Node or tuple[object]
Output of the nodes given the context.
Raises
------
ValueError
If `fetches` is not an `Node` instance, node name, or a sequence thereof.
"""
if isinstance(fetches, (str, Node)):
fetches = [fetches]
single = True
elif isinstance(fetches, Sequence):
single = False
else:
raise ValueError("`fetches` must be an `Node` instance, node name, or a "
"sequence thereof.")
fetches = [self.instantiate_node(node) for node in fetches]
context = self.instantiate_graph(context, **kwargs)
for c in context:
if c in fetches and c.op_name in ["output", "state", "temp"]:
write_name = "/".join([f"{i}{c.write_count-1}" for i in c.name.split("/")]) if c.write_count > 0 else c.name
fetches[fetches.index(c)] = c.graph.nodes[write_name]
values = [fetch.evaluate_node(fetch, context, callback=callback) for fetch in fetches]
return values[0] if single else tuple(values)
def __getstate__(self):
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
def set_name(self, name):
"""
Set the name of the node and update the graph.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
name = name or uuid.uuid4().hex
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:\n\t"
f"Existing: {self.graph.nodes[name].args}\n\t"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def evaluate_dependencies(self, context, callback=None):
"""
Evaluate the dependencies of this node and discard the values.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
"""
for node in self.dependencies:
node.evaluate(context, callback)
def evaluate(self, context, callback=None):
"""
Evaluate the node given a context.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
Returns
-------
value : object
Output of the node given the context.
"""
# Evaluate all explicit dependencies first
self.evaluate_dependencies(context, callback)
if self in context:
return context[self]
# Evaluate the parents
partial = functools.partial(self.evaluate_node, context=context, callback=callback)
args = [partial(arg) for arg in self.args]
kwargs = {key: partial(value) for key, value in self.kwargs.items() if key not in self.added_attrs}
# Evaluate the node
callback = callback or _noop_callback
with callback(self, context):
if self.__class__.__name__ == "Node":
context[self] = self.value = self._evaluate(*args, context=context, **kwargs)
else:
context[self] = self.value = self._evaluate(*args, **kwargs)
return self.value
def _evaluate(self, *args, context=None, **kwargs):
"""
Inheriting nodes should implement this function to evaluate the node.
"""
return self(*args, context, **kwargs)
@classmethod
def evaluate_node(cls, node, context, **kwargs):
"""
Evaluate an node or constant given a context.
"""
Node.evaluated_nodes += 1
try:
if isinstance(node, Node):
Node._eval_stack.append(node.name)
return node.evaluate(context, **kwargs)
partial = functools.partial(cls.evaluate_node, context=context, **kwargs)
if isinstance(node, tuple):
return tuple(partial(element) for element in node)
if isinstance(node, list):
return [partial(element) for element in node]
if isinstance(node, dict):
return {partial(key): partial(value) for key, value in node.items()}
if isinstance(node, slice):
return slice(*[partial(getattr(node, attr))
for attr in ['start', 'stop', 'step']])
return node
except Exception as ex: # pragma: no cover
messages = []
interactive = False
if isinstance(node, Node) or not is_iterable(node):
node = [node]
for n in node:
stack = []
if isinstance(n, Node):
for frame in reversed(n._stack): # pylint: disable=protected-access
# Do not capture any internal stack traces
fname = frame.filename
if 'polymath' in fname:
continue
# Stop tracing at the last interactive cell
if interactive and not fname.startswith('<'):
break # pragma: no cover
interactive = fname.startswith('<')
stack.append(frame)
stack = "".join(traceback.format_list(reversed(stack)))
message = "Failed to evaluate node `%s` defined at:\n\n%s" % (n, stack)
messages.append(message)
raise ex from EvaluationError("".join(messages))
@classmethod
def init_from_args(cls, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
if len(args) == 0:
n = cls(name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
else:
n = cls(*args,
name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
return n
def __bool__(self):
return True
def __hash__(self):
return id(self)
def func_hash(self):
"""
This returns the functional hash of a particular node. The default hash returns an object id, whereas this function
returns a hash of all attributes and subgraphs of a node.
"""
return node_hash(self)
def find_node(self, name):
g = self.graph
while g is not None and name not in g.nodes:
g = g.graph
if name in g.nodes:
return g.nodes[name]
raise RuntimeError(f"Cannot find {name} in graph nodes. Graph: {self.graph}")
def __len__(self):
#TODO: Update this to check for finalzied shape
if self.shape == UNSET_SHAPE:
raise TypeError(f'`shape` must be specified explicitly for nodes {self}')
return self.shape[0]
def __iter__(self):
num = len(self)
for i in range(num):
yield self[i]
def __eq__(self, other):
return hash(self) == hash(other)
def __getattr__(self, name):
return getattr_(self, name, graph=self.graph)
def __getitem__(self, key):
if self.__class__.__name__ != "Node":
if isinstance(key, (slice, Integral)):
return getitem(self, key, graph=self.graph)
else:
if isinstance(key, (list)):
return var_index(self, key, graph=self)
elif isinstance(key, tuple):
return var_index(self, list(key), graph=self)
else:
return var_index(self, [key], graph=self)
else:
return self.nodes[key]
def __add__(self, other):
return add(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__radd__(self)
def __radd__(self, other):
return add(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__add__(self)
def __sub__(self, other):
return sub(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rsub__(self)
def __rsub__(self, other):
return sub(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__sub__(self)
def __pow__(self, other):
return pow_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __rpow__(self, other):
return pow_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __matmul__(self, other):
return matmul(self, other, graph=self.graph)
def __rmatmul__(self, other):
return matmul(other, self, graph=self.graph)
def __mul__(self, other):
return mul(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmul__(self)
def __rmul__(self, other):
return mul(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mul__(self)
def __truediv__(self, other):
return truediv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__truediv__(self)
def __rtruediv__(self, other):
return truediv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rtruediv__(self)
def __floordiv__(self, other):
return floordiv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rfloordiv__(self)
def __rfloordiv__(self, other):
return floordiv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__floordiv__(self)
def __mod__(self, other):
return mod(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmod__(self)
def __rmod__(self, other):
return mod(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mod__(self)
def __lshift__(self, other):
return lshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rlshift__(self)
def __rlshift__(self, other):
return lshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lshift__(self)
def __rshift__(self, other):
return rshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rrshift__(self)
def __rrshift__(self, other):
return rshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rshift__(self)
def __and__(self, other):
return and_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rand__(self)
def __rand__(self, other):
return and_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__and__(self)
def __or__(self, other):
return or_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ror__(self)
def __ror__(self, other):
return or_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__or__(self)
def __xor__(self, other):
return xor(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rxor__(self)
def __rxor__(self, other):
return xor(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__xor__(self)
def __lt__(self, other):
return lt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__gt__(self)
def __le__(self, other):
return le(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ge__(self)
def __ne__(self, other):
return ne(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ne__(self)
def __gt__(self, other):
return gt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lt__(self)
def __ge__(self, other):
return ge(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__le__(self)
def __invert__(self):
return inv(self, graph=self.graph)
def __neg__(self):
return neg(self, graph=self.graph)
def __abs__(self):
return abs_(self, graph=self.graph)
def __pos__(self):
return pos(self, graph=self.graph)
def __reversed__(self):
return reversed_(self, graph=self.graph)
def update_graph_key(self, old_key, new_key):
n = list(map(lambda k: (new_key, self.nodes[k]) if k == old_key else (k, self.nodes[k]), self.nodes.keys()))
self.nodes = Graph(n)
def insert_node(self, node, idx):
node_list = list(self.nodes.items())
node_list.insert(idx, (node.name, node))
self.nodes = Graph(node_list)
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
class EvaluationError(RuntimeError):
"""
Failed to evaluate an node.
"""
class var_index(Node): # pylint: disable=C0103,W0223
"""
Node representing values of a variable corresponding to input index values.
Parameters
----------
var : Node
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, var, idx, name=None, **kwargs): # pylint: disable=W0235
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
domain = Domain(idx)
super(var_index, self).__init__(var, idx, name=name, domain=domain, **kwargs)
@property
def domain(self):
return self.kwargs["domain"]
@property
def var(self):
var, index_list = self.args
return var
def set_name(self, name):
"""
Set the name for a variable index, making sure to replicate the new name with
a unique stringwhich corresponds to the variable, index combination.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:"
f"Existing: {self.graph.nodes[name].args}\n"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name is not None and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def __getitem__(self, key):
if self.is_shape_finalized() and len(self.nodes) >= np.prod(self.shape):
if isinstance(key, Integral):
key = tuple([key])
idx = np.ravel_multi_index(key, dims=self.shape, order='C')
ret = self.nodes.item_by_index(idx)
return ret
else:
if isinstance(key, (list)):
ret = var_index(self.var, tuple(key), graph=self)
elif isinstance(key, tuple):
ret = var_index(self.var, key, graph=self)
else:
ret = var_index(self.var, tuple([key]), graph=self)
return ret
def is_scalar(self, val=None):
if val is not None and (not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)):
if self.var.shape != DEFAULT_SHAPES[0] and (len(self.var.shape) == 1 and not isinstance(self.var.shape[0],Node)):
raise ValueError(f"Invalid shape var for var index {self} with variable shape {self.var.shape}")
return True
else:
return self.var.shape == DEFAULT_SHAPES[0]
def _evaluate(self, var, indices, **kwargs):
if self.is_scalar(var):
out_shape = (1,)
indices = (0,)
single = True
else:
out_shape = self.domain.shape_from_indices(indices)
indices = self.domain.compute_pairs()
single = False
if isinstance(var, (Integral, Real, str)):
var = np.asarray([var])
elif not isinstance(var, (np.ndarray, list)):
raise TypeError(f"Variable {var} with type {type(var)} is not a list or numpy array, and cannot be sliced for {self.name}")
elif isinstance(var, list):
var = np.asarray(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) == np.prod(out_shape):
if len(out_shape) > len(var.shape):
for i in range(len(out_shape)):
if out_shape[i] == 1:
var = np.expand_dims(var, axis=i)
else:
var = np.squeeze(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) != np.prod(out_shape):
raise ValueError(f"Index list does not match {var.shape} in {self.var.name} - {self.var.op_name}"
f"dimensions for slice {self.args[0].name} with {out_shape}.\n"
f"Domain: {self.domain}\n"
f"Eval Stack: {Node._eval_stack}")
if not single and not all([(idx_val - 1) >= indices[-1][idx] for idx, idx_val in enumerate(var.shape)]):
raise ValueError(f"var_index {self.name} has indices which are greater than the variable shape:\n"
f"\tArgs: {self.args}\n"
f"\tVar shape: {var.shape}\n"
f"\tNode shape: {self.var.shape}\n"
f"\tIndex Upper bounds: {indices[-1]}")
indices = list(map(lambda x: x.tolist() if isinstance(x, np.ndarray) else x, indices))
res = var[indices] if single else np.asarray([var[idx] for idx in indices]).reshape(out_shape)
if out_shape == (1,) and len(indices) == 1:
res = res[0]
self.domain.set_computed(out_shape, indices)
return res
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<var_index name=%s, index=%s>" % (self.name, self.args)
class slice_op(Node):
"""
Node representing multi-dimensional operations performed on a node.
Parameters
----------
target : cal
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, target, *args, **kwargs):
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
if "op_name" in kwargs:
kwargs.pop("op_name")
target_name = f"{target.__module__}.{target.__name__}"
super(slice_op, self).__init__(*args, target=target_name, domain=domain, op_name=f"slice_{target.__name__}", **kwargs)
self.target = target
@property
def domain(self):
return self.kwargs["domain"]
def __getitem__(self, key):
if isinstance(key, (tuple, list, np.ndarray)) and len(key) == 0:
return self
elif self.is_shape_finalized() and len(self.nodes) > 0:
if isinstance(key, (int, Node)):
key = tuple([key])
if len(key) != len(self.shape):
raise KeyError(f"Invalid key shape for {self.name}:\n"
f"Shape: {self.shape}\n"
f"Key: {key}")
name = f"{self.name}{key}"
if name not in self.nodes.keys():
raise KeyError(f"{name} not in {self.name} keys:\n"
f"Node keys: {list(self.nodes.keys())}")
ret = self.nodes[name]
return ret
else:
name = []
if isinstance(key, Node):
name.append(key.name)
elif hasattr(key, "__len__") and not isinstance(key, str):
for k in key:
if isinstance(k, Node):
name.append(k.name)
else:
name.append(str(k))
else:
name.append(key)
name = self.var.name + "[" + "][".join(name) + "]"
if name in self.graph.nodes:
return self.graph.nodes[name]
elif isinstance(key, (list)):
return var_index(self, key, name=name, graph=self.graph)
elif isinstance(key, tuple):
return var_index(self, list(key), name=name, graph=self.graph)
else:
return var_index(self, [key], name=name, graph=self.graph)
def set_shape(self, shape=None, init=False):
s = []
assert isinstance(shape, (tuple, list))
if all([isinstance(sv, Integral) for sv in shape]) and len(self.domain) == np.product(shape) and len(shape) > 0:
self._shape = shape if isinstance(shape, tuple) else tuple(shape)
else:
for idx, d in enumerate(self.domain.dom_set):
if shape and isinstance(shape[idx], (func_op, Integral)):
s.append(shape[idx])
elif shape and isinstance(shape[idx], float):
s.append(int(shape[idx]))
elif isinstance(d, float):
s.append(int(d))
elif isinstance(d, var_index):
s.append(d.domain)
else:
s.append(d)
self._shape = tuple(s)
def is_scalar(self, val):
return not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)
def _evaluate(self, op1, op2, context=None, **kwargs):
if self.is_scalar(op1) or self.is_scalar(op2):
value = self.target(op1, op2)
else:
arg0_dom = self.args[0].domain
arg1_dom = self.args[1].domain
op1_idx = self.domain.map_sub_domain(arg0_dom) if isinstance(self.args[0], Node) else tuple([])
op2_idx = self.domain.map_sub_domain(arg1_dom) if isinstance(self.args[1], Node) else tuple([])
op1 = np.asarray(list(map(lambda x: op1[x], op1_idx))).reshape(self.domain.computed_shape)
op2 = np.asarray(list(map(lambda x: op2[x], op2_idx))).reshape(self.domain.computed_shape)
value = self.target(op1, op2)
return value
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
elif _is_node_type_instance(slice1_var, "index"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
elif _is_node_type_instance(slice2_var, "index"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<slice_%s '%s'>" % (self.target.__name__, self.name)
class func_op(Node): # pylint: disable=C0103,R0903
"""
Node wrapper for stateless functions.
Parameters
----------
target : callable
function to evaluate the node
args : tuple
positional arguments passed to the target
kwargs : dict
keywoard arguments passed to the target
"""
def __init__(self, target, *args, **kwargs):
kwargs["op_name"] = kwargs["op_name"] if "op_name" in kwargs \
else f"{target.__name__}"
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
elif len(args) == 2:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
else:
domain = Domain(tuple([]))
self._target = None
super(func_op, self).__init__(*args, target=f"{target.__module__}.{target.__name__}", domain=domain, **kwargs)
self.target = target
self.added_attrs += ["domain", "target"]
@property
def target(self):
return self._target
@target.setter
def target(self, fnc):
self._target = fnc
self.op_name = f"{fnc.__name__}"
self.kwargs["target"] = f"{fnc.__module__}.{fnc.__name__}"
def __getitem__(self, key):
return self
@property
def domain(self):
return self.kwargs["domain"]
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def _evaluate(self, *args, **kwargs):
for aa in list(kwargs.keys()):
if aa in self.added_attrs:
kwargs.pop(aa)
return self.target(*args, **kwargs)
def __call__(self, *args, **kwargs):
return call(self, *args, **kwargs)
def __repr__(self):
return "<func_op '%s' target=%s args=<%d items>>" % \
(self.name, self.kwargs["target"], len(self.args))
def nodeop(target=None, **kwargs):
"""
Decorator for creating nodes from functions.
"""
# This is called when the decorator is used with arguments
if target is None:
return functools.partial(nodeop, **kwargs)
# This is called when the decorator is used without arguments
@functools.wraps(target)
def _wrapper(*args, **kwargs_inner):
return func_op(target, *args, **kwargs_inner, **kwargs)
return _wrapper
@nodeop
def call(func, *args, **kwargs):
"""
Call `func` with positional arguments `args` and keyword arguments `kwargs`.
Parameters
----------
func : callable
Function to call when the node is executed.
args : list
Sequence of positional arguments passed to `func`.
kwargs : dict
Mapping of keyword arguments passed to `func`.
"""
return func(*args, **kwargs)
@contextlib.contextmanager
def control_dependencies(dependencies, graph=None):
"""
Ensure that all `dependencies` are executed before any nodes in this scope.
Parameters
----------
dependencies : list
Sequence of nodes to be evaluted before evaluating any nodes defined in this
scope.
"""
# Add dependencies to the graph
graph = Node.get_active_graph(graph)
graph.dependencies.extend(dependencies)
yield
# Remove dependencies from the graph
del graph.dependencies[-len(dependencies):]
#pylint: disable=C0103
abs_ = nodeop(builtins.abs)
dict_ = nodeop(builtins.dict)
help_ = nodeop(builtins.help)
min_ = nodeop(builtins.min)
setattr_ = nodeop(builtins.setattr)
all_ = nodeop(builtins.all)
dir_ = nodeop(builtins.dir)
hex_ = nodeop(builtins.hex)
next_ = nodeop(builtins.next)
slice_ = nodeop(builtins.slice)
any_ = nodeop(builtins.any)
divmod_ = nodeop(builtins.divmod)
id_ = nodeop(builtins.id)
object_ = nodeop(builtins.object)
sorted_ = nodeop(builtins.sorted)
ascii_ = nodeop(builtins.ascii)
enumerate_ = nodeop(builtins.enumerate)
input_ = nodeop(builtins.input)
oct_ = nodeop(builtins.oct)
staticmethod_ = nodeop(builtins.staticmethod)
bin_ = nodeop(builtins.bin)
eval_ = nodeop(builtins.eval)
int_ = nodeop(builtins.int)
open_ = nodeop(builtins.open)
str_ = nodeop(builtins.str)
bool_ = nodeop(builtins.bool)
exec_ = nodeop(builtins.exec)
isinstance_ = nodeop(builtins.isinstance)
ord_ = nodeop(builtins.ord)
sum_ = nodeop(builtins.sum)
bytearray_ = nodeop(builtins.bytearray)
filter_ = nodeop(builtins.filter)
issubclass_ = nodeop(builtins.issubclass)
pow_ = nodeop(builtins.pow)
super_ = nodeop(builtins.super)
bytes_ = nodeop(builtins.bytes)
float_ = nodeop(builtins.float)
iter_ = nodeop(builtins.iter)
print_ = nodeop(builtins.print)
tuple_ = nodeop(builtins.tuple)
callable_ = nodeop(builtins.callable)
format_ = nodeop(builtins.format)
len_ = nodeop(builtins.len)
property_ = nodeop(builtins.property)
type_ = nodeop(builtins.type)
chr_ = nodeop(builtins.chr)
frozenset_ = nodeop(builtins.frozenset)
list_ = nodeop(builtins.list)
range_ = nodeop(builtins.range)
vars_ = nodeop(builtins.vars)
classmethod_ = nodeop(builtins.classmethod)
getattr_ = nodeop(builtins.getattr)
locals_ = nodeop(builtins.locals)
repr_ = nodeop(builtins.repr)
zip_ = nodeop(builtins.zip)
compile_ = nodeop(builtins.compile)
globals_ = nodeop(builtins.globals)
map_ = nodeop(builtins.map)
reversed_ = nodeop(builtins.reversed)
complex_ = nodeop(builtins.complex)
hasattr_ = nodeop(builtins.hasattr)
max_ = nodeop(builtins.max)
round_ = nodeop(builtins.round)
delattr_ = nodeop(builtins.delattr)
hash_ = nodeop(builtins.hash)
memoryview_ = nodeop(builtins.memoryview)
set_ = nodeop(builtins.set)
add = nodeop(operator.add)
and_ = nodeop(operator.and_)
attrgetter = nodeop(operator.attrgetter)
concat = nodeop(operator.concat)
contains = nodeop(operator.contains)
countOf = nodeop(operator.countOf)
delitem = nodeop(operator.delitem)
eq = nodeop(operator.eq)
floordiv = nodeop(operator.floordiv)
ge = nodeop(operator.ge)
getitem = nodeop(operator.getitem)
gt = nodeop(operator.gt)
index = nodeop(operator.index)
indexOf = nodeop(operator.indexOf)
inv = nodeop(operator.inv)
invert = nodeop(operator.invert)
ior = nodeop(operator.ior)
ipow = nodeop(operator.ipow)
irshift = nodeop(operator.irshift)
is_ = nodeop(operator.is_)
is_not = nodeop(operator.is_not)
itemgetter = nodeop(operator.itemgetter)
le = nodeop(operator.le)
length_hint = nodeop(operator.length_hint)
lshift = nodeop(operator.lshift)
lt = nodeop(operator.lt)
matmul = nodeop(operator.matmul)
methodcaller = nodeop(operator.methodcaller)
mod = nodeop(operator.mod)
mul = nodeop(operator.mul)
ne = nodeop(operator.ne)
neg = nodeop(operator.neg)
not_ = nodeop(operator.not_)
or_ = nodeop(operator.or_)
pos = nodeop(operator.pos)
rshift = nodeop(operator.rshift)
setitem = nodeop(operator.setitem)
sub = nodeop(operator.sub)
truediv = nodeop(operator.truediv)
truth = nodeop(operator.truth)
xor = nodeop(operator.xor)
import_ = nodeop(importlib.import_module)
|
[
"numpy.product",
"numpy.prod",
"collections.OrderedDict",
"collections.deque",
"traceback.extract_stack",
"numpy.ravel_multi_index",
"numpy.asarray",
"functools.wraps",
"uuid.uuid4",
"numpy.squeeze",
"functools.partial",
"numpy.expand_dims",
"numpy.int"
] |
[((1238, 1251), 'collections.deque', 'deque', (['[None]'], {}), '([None])\n', (1243, 1251), False, 'from collections import OrderedDict, Mapping, Sequence, deque\n'), ((46542, 46565), 'functools.wraps', 'functools.wraps', (['target'], {}), '(target)\n', (46557, 46565), False, 'import functools\n'), ((1981, 1994), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1992, 1994), False, 'from collections import OrderedDict, Mapping, Sequence, deque\n'), ((2554, 2586), 'traceback.extract_stack', 'traceback.extract_stack', ([], {'limit': '(1)'}), '(limit=1)\n', (2577, 2586), False, 'import traceback\n'), ((14781, 14854), 'functools.partial', 'functools.partial', (['self.evaluate_node'], {'context': 'context', 'callback': 'callback'}), '(self.evaluate_node, context=context, callback=callback)\n', (14798, 14854), False, 'import functools\n'), ((46434, 46469), 'functools.partial', 'functools.partial', (['nodeop'], {}), '(nodeop, **kwargs)\n', (46451, 46469), False, 'import functools\n'), ((15961, 16024), 'functools.partial', 'functools.partial', (['cls.evaluate_node'], {'context': 'context'}), '(cls.evaluate_node, context=context, **kwargs)\n', (15978, 16024), False, 'import functools\n'), ((28968, 29021), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['key'], {'dims': 'self.shape', 'order': '"""C"""'}), "(key, dims=self.shape, order='C')\n", (28988, 29021), True, 'import numpy as np\n'), ((30292, 30309), 'numpy.asarray', 'np.asarray', (['[var]'], {}), '([var])\n', (30302, 30309), True, 'import numpy as np\n'), ((2379, 2391), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2389, 2391), False, 'import uuid\n'), ((13002, 13014), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13012, 13014), False, 'import uuid\n'), ((28852, 28871), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (28859, 28871), True, 'import numpy as np\n'), ((30618, 30636), 'numpy.prod', 'np.prod', (['var.shape'], {}), '(var.shape)\n', (30625, 30636), True, 'import numpy as np\n'), ((30640, 30658), 'numpy.prod', 'np.prod', (['out_shape'], {}), '(out_shape)\n', (30647, 30658), True, 'import numpy as np\n'), ((30896, 30911), 'numpy.squeeze', 'np.squeeze', (['var'], {}), '(var)\n', (30906, 30911), True, 'import numpy as np\n'), ((30961, 30979), 'numpy.prod', 'np.prod', (['var.shape'], {}), '(var.shape)\n', (30968, 30979), True, 'import numpy as np\n'), ((30983, 31001), 'numpy.prod', 'np.prod', (['out_shape'], {}), '(out_shape)\n', (30990, 31001), True, 'import numpy as np\n'), ((38171, 38188), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (38181, 38188), True, 'import numpy as np\n'), ((6037, 6050), 'numpy.int', 'np.int', (['shape'], {}), '(shape)\n', (6043, 6050), True, 'import numpy as np\n'), ((30554, 30569), 'numpy.asarray', 'np.asarray', (['var'], {}), '(var)\n', (30564, 30569), True, 'import numpy as np\n'), ((31938, 31979), 'numpy.asarray', 'np.asarray', (['[var[idx] for idx in indices]'], {}), '([var[idx] for idx in indices])\n', (31948, 31979), True, 'import numpy as np\n'), ((30828, 30855), 'numpy.expand_dims', 'np.expand_dims', (['var'], {'axis': 'i'}), '(var, axis=i)\n', (30842, 30855), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import numpy as np
import scipy as sp
import torch
from ml.rl.evaluation.cpe import CpeEstimate
from ml.rl.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class WeightedSequentialDoublyRobustEstimator:
NUM_SUBSETS_FOR_CB_ESTIMATES = 25
CONFIDENCE_INTERVAL = 0.9
NUM_BOOTSTRAP_SAMPLES = 50
BOOTSTRAP_SAMPLE_PCT = 0.5
def __init__(self, gamma):
self.gamma = gamma
def estimate(
self,
edp: EvaluationDataPage,
num_j_steps,
whether_self_normalize_importance_weights,
) -> CpeEstimate:
# For details, visit https://arxiv.org/pdf/1604.00923.pdf Section 5, 7, 8
(
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
) = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
edp.mdp_id,
edp.action_mask.cpu().numpy(),
edp.logged_rewards.cpu().numpy().flatten(),
edp.logged_propensities.cpu().numpy().flatten(),
edp.model_propensities.cpu().numpy(),
edp.model_values.cpu().numpy(),
)
num_trajectories = actions.shape[0]
trajectory_length = actions.shape[1]
j_steps = [float("inf")]
if num_j_steps > 1:
j_steps.append(-1)
if num_j_steps > 2:
interval = trajectory_length // (num_j_steps - 1)
j_steps.extend([i * interval for i in range(1, num_j_steps - 1)])
target_propensity_for_logged_action = np.sum(
np.multiply(target_propensities, actions), axis=2
)
estimated_q_values_for_logged_action = np.sum(
np.multiply(estimated_q_values, actions), axis=2
)
estimated_state_values = np.sum(
np.multiply(target_propensities, estimated_q_values), axis=2
)
importance_weights = target_propensity_for_logged_action / logged_propensities
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([num_trajectories, 1]) * 1.0 / num_trajectories
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
discounts = np.logspace(
start=0, stop=trajectory_length - 1, num=trajectory_length, base=self.gamma
)
j_step_return_trajectories = []
for j_step in j_steps:
j_step_return_trajectories.append(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values_for_logged_action,
j_step,
)
)
j_step_return_trajectories = np.array(j_step_return_trajectories)
j_step_returns = np.sum(j_step_return_trajectories, axis=1)
if len(j_step_returns) == 1:
weighted_doubly_robust = j_step_returns[0]
weighted_doubly_robust_std_error = 0.0
else:
# break trajectories into several subsets to estimate confidence bounds
infinite_step_returns = []
num_subsets = int(
min(
num_trajectories / 2,
WeightedSequentialDoublyRobustEstimator.NUM_SUBSETS_FOR_CB_ESTIMATES,
)
)
interval = num_trajectories / num_subsets
for i in range(num_subsets):
trajectory_subset = np.arange(
int(i * interval), int((i + 1) * interval)
)
importance_weights = (
target_propensity_for_logged_action[trajectory_subset]
/ logged_propensities[trajectory_subset]
)
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([len(trajectory_subset), 1]) * 1.0 / len(trajectory_subset)
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
infinite_step_return = np.sum(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards[trajectory_subset],
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values[trajectory_subset],
estimated_q_values_for_logged_action[trajectory_subset],
float("inf"),
)
)
infinite_step_returns.append(infinite_step_return)
# Compute weighted_doubly_robust mean point estimate using all data
weighted_doubly_robust = self.compute_weighted_doubly_robust_point_estimate(
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
)
# Use bootstrapping to compute weighted_doubly_robust standard error
bootstrapped_means = []
sample_size = int(
WeightedSequentialDoublyRobustEstimator.BOOTSTRAP_SAMPLE_PCT
* num_subsets
)
for _ in range(
WeightedSequentialDoublyRobustEstimator.NUM_BOOTSTRAP_SAMPLES
):
random_idxs = np.random.choice(num_j_steps, sample_size, replace=False)
random_idxs.sort()
wdr_estimate = self.compute_weighted_doubly_robust_point_estimate(
j_steps=[j_steps[i] for i in random_idxs],
num_j_steps=sample_size,
j_step_returns=j_step_returns[random_idxs],
infinite_step_returns=infinite_step_returns,
j_step_return_trajectories=j_step_return_trajectories[random_idxs],
)
bootstrapped_means.append(wdr_estimate)
weighted_doubly_robust_std_error = np.std(bootstrapped_means)
episode_values = np.sum(np.multiply(rewards, discounts), axis=1)
denominator = np.nanmean(episode_values)
if abs(denominator) < 1e-6:
return CpeEstimate(
raw=0.0, normalized=0.0, raw_std_error=0.0, normalized_std_error=0.0
)
return CpeEstimate(
raw=weighted_doubly_robust,
normalized=weighted_doubly_robust / denominator,
raw_std_error=weighted_doubly_robust_std_error,
normalized_std_error=weighted_doubly_robust_std_error / denominator,
)
def compute_weighted_doubly_robust_point_estimate(
self,
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
):
low_bound, high_bound = WeightedSequentialDoublyRobustEstimator.confidence_bounds(
infinite_step_returns,
WeightedSequentialDoublyRobustEstimator.CONFIDENCE_INTERVAL,
)
# decompose error into bias + variance
j_step_bias = np.zeros([num_j_steps])
where_lower = np.where(j_step_returns < low_bound)[0]
j_step_bias[where_lower] = low_bound - j_step_returns[where_lower]
where_higher = np.where(j_step_returns > high_bound)[0]
j_step_bias[where_higher] = j_step_returns[where_higher] - high_bound
covariance = np.cov(j_step_return_trajectories)
error = covariance + j_step_bias.T * j_step_bias
# minimize mse error
constraint = {"type": "eq", "fun": lambda x: np.sum(x) - 1.0}
x = np.zeros([len(j_steps)])
res = sp.optimize.minimize(
mse_loss,
x,
args=error,
constraints=constraint,
bounds=[(0, 1) for _ in range(x.shape[0])],
)
x = np.array(res.x)
return float(np.dot(x, j_step_returns))
@staticmethod
def transform_to_equal_length_trajectories(
mdp_ids,
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
):
"""
Take in samples (action, rewards, propensities, etc.) and output lists
of equal-length trajectories (episodes) accoriding to terminals.
As the raw trajectories are of various lengths, the shorter ones are
filled with zeros(ones) at the end.
"""
num_actions = len(target_propensities[0])
terminals = np.zeros(mdp_ids.shape[0])
for x in range(0, mdp_ids.shape[0]):
if x + 1 == mdp_ids.shape[0] or mdp_ids[x, 0] != mdp_ids[x + 1, 0]:
terminals[x] = 1
trajectories = []
episode_start = 0
episode_ends = np.nonzero(terminals)[0]
if len(terminals) - 1 not in episode_ends:
episode_ends = np.append(episode_ends, len(terminals) - 1)
for episode_end in episode_ends:
trajectories.append(np.arange(episode_start, episode_end + 1))
episode_start = episode_end + 1
action_trajectories = []
reward_trajectories = []
logged_propensity_trajectories = []
target_propensity_trajectories = []
Q_value_trajectories = []
for trajectory in trajectories:
action_trajectories.append(actions[trajectory])
reward_trajectories.append(rewards[trajectory])
logged_propensity_trajectories.append(logged_propensities[trajectory])
target_propensity_trajectories.append(target_propensities[trajectory])
Q_value_trajectories.append(estimated_q_values[trajectory])
def to_equal_length(x, fill_value):
x_equal_length = np.array(
list(itertools.zip_longest(*x, fillvalue=fill_value))
).swapaxes(0, 1)
return x_equal_length
action_trajectories = to_equal_length(
action_trajectories, np.zeros([num_actions])
)
reward_trajectories = to_equal_length(reward_trajectories, 0)
logged_propensity_trajectories = to_equal_length(
logged_propensity_trajectories, 1
)
target_propensity_trajectories = to_equal_length(
target_propensity_trajectories, np.zeros([num_actions])
)
Q_value_trajectories = to_equal_length(
Q_value_trajectories, np.zeros([num_actions])
)
return (
action_trajectories,
reward_trajectories,
logged_propensity_trajectories,
target_propensity_trajectories,
Q_value_trajectories,
)
@staticmethod
def normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
):
if whether_self_normalize_importance_weights:
sum_importance_weights = np.sum(importance_weights, axis=0)
where_zeros = np.where(sum_importance_weights == 0.0)[0]
sum_importance_weights[where_zeros] = len(importance_weights)
importance_weights[:, where_zeros] = 1.0
importance_weights /= sum_importance_weights
return importance_weights
else:
importance_weights /= importance_weights.shape[0]
return importance_weights
@staticmethod
def calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values,
j_step,
):
trajectory_length = len(rewards[0])
num_trajectories = len(rewards)
j_step = int(min(j_step, trajectory_length - 1))
weighted_discounts = np.multiply(discounts, importance_weights)
weighted_discounts_one_earlier = np.multiply(
discounts, importance_weights_one_earlier
)
importance_sampled_cumulative_reward = np.sum(
np.multiply(weighted_discounts[:, : j_step + 1], rewards[:, : j_step + 1]),
axis=1,
)
if j_step < trajectory_length - 1:
direct_method_value = (
weighted_discounts_one_earlier[:, j_step + 1]
* estimated_state_values[:, j_step + 1]
)
else:
direct_method_value = np.zeros([num_trajectories])
control_variate = np.sum(
np.multiply(
weighted_discounts[:, : j_step + 1], estimated_q_values[:, : j_step + 1]
)
- np.multiply(
weighted_discounts_one_earlier[:, : j_step + 1],
estimated_state_values[:, : j_step + 1],
),
axis=1,
)
j_step_return = (
importance_sampled_cumulative_reward + direct_method_value - control_variate
)
return j_step_return
@staticmethod
def confidence_bounds(x, confidence):
n = len(x)
m, se = np.mean(x), sp.stats.sem(x)
h = se * sp.stats.t._ppf((1 + confidence) / 2.0, n - 1)
return m - h, m + h
def mse_loss(x, error):
return np.dot(np.dot(x, error), x.T)
|
[
"logging.getLogger",
"scipy.stats.t._ppf",
"numpy.hstack",
"numpy.array",
"numpy.nanmean",
"scipy.stats.sem",
"numpy.cov",
"numpy.arange",
"numpy.mean",
"numpy.multiply",
"numpy.where",
"numpy.dot",
"ml.rl.evaluation.cpe.CpeEstimate",
"numpy.logspace",
"numpy.ones",
"numpy.random.choice",
"itertools.zip_longest",
"numpy.nonzero",
"numpy.std",
"numpy.sum",
"numpy.zeros",
"numpy.cumprod"
] |
[((305, 332), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (322, 332), False, 'import logging\n'), ((2219, 2257), 'numpy.cumprod', 'np.cumprod', (['importance_weights'], {'axis': '(1)'}), '(importance_weights, axis=1)\n', (2229, 2257), True, 'import numpy as np\n'), ((2604, 2675), 'numpy.hstack', 'np.hstack', (['[importance_weights_one_earlier, importance_weights[:, :-1]]'], {}), '([importance_weights_one_earlier, importance_weights[:, :-1]])\n', (2613, 2675), True, 'import numpy as np\n'), ((2719, 2811), 'numpy.logspace', 'np.logspace', ([], {'start': '(0)', 'stop': '(trajectory_length - 1)', 'num': 'trajectory_length', 'base': 'self.gamma'}), '(start=0, stop=trajectory_length - 1, num=trajectory_length,\n base=self.gamma)\n', (2730, 2811), True, 'import numpy as np\n'), ((3379, 3415), 'numpy.array', 'np.array', (['j_step_return_trajectories'], {}), '(j_step_return_trajectories)\n', (3387, 3415), True, 'import numpy as np\n'), ((3442, 3484), 'numpy.sum', 'np.sum', (['j_step_return_trajectories'], {'axis': '(1)'}), '(j_step_return_trajectories, axis=1)\n', (3448, 3484), True, 'import numpy as np\n'), ((7126, 7152), 'numpy.nanmean', 'np.nanmean', (['episode_values'], {}), '(episode_values)\n', (7136, 7152), True, 'import numpy as np\n'), ((7336, 7549), 'ml.rl.evaluation.cpe.CpeEstimate', 'CpeEstimate', ([], {'raw': 'weighted_doubly_robust', 'normalized': '(weighted_doubly_robust / denominator)', 'raw_std_error': 'weighted_doubly_robust_std_error', 'normalized_std_error': '(weighted_doubly_robust_std_error / denominator)'}), '(raw=weighted_doubly_robust, normalized=weighted_doubly_robust /\n denominator, raw_std_error=weighted_doubly_robust_std_error,\n normalized_std_error=weighted_doubly_robust_std_error / denominator)\n', (7347, 7549), False, 'from ml.rl.evaluation.cpe import CpeEstimate\n'), ((8085, 8108), 'numpy.zeros', 'np.zeros', (['[num_j_steps]'], {}), '([num_j_steps])\n', (8093, 8108), True, 'import numpy as np\n'), ((8410, 8444), 'numpy.cov', 'np.cov', (['j_step_return_trajectories'], {}), '(j_step_return_trajectories)\n', (8416, 8444), True, 'import numpy as np\n'), ((8851, 8866), 'numpy.array', 'np.array', (['res.x'], {}), '(res.x)\n', (8859, 8866), True, 'import numpy as np\n'), ((9494, 9520), 'numpy.zeros', 'np.zeros', (['mdp_ids.shape[0]'], {}), '(mdp_ids.shape[0])\n', (9502, 9520), True, 'import numpy as np\n'), ((12708, 12750), 'numpy.multiply', 'np.multiply', (['discounts', 'importance_weights'], {}), '(discounts, importance_weights)\n', (12719, 12750), True, 'import numpy as np\n'), ((12792, 12846), 'numpy.multiply', 'np.multiply', (['discounts', 'importance_weights_one_earlier'], {}), '(discounts, importance_weights_one_earlier)\n', (12803, 12846), True, 'import numpy as np\n'), ((14105, 14121), 'numpy.dot', 'np.dot', (['x', 'error'], {}), '(x, error)\n', (14111, 14121), True, 'import numpy as np\n'), ((1792, 1833), 'numpy.multiply', 'np.multiply', (['target_propensities', 'actions'], {}), '(target_propensities, actions)\n', (1803, 1833), True, 'import numpy as np\n'), ((1919, 1959), 'numpy.multiply', 'np.multiply', (['estimated_q_values', 'actions'], {}), '(estimated_q_values, actions)\n', (1930, 1959), True, 'import numpy as np\n'), ((2031, 2083), 'numpy.multiply', 'np.multiply', (['target_propensities', 'estimated_q_values'], {}), '(target_propensities, estimated_q_values)\n', (2042, 2083), True, 'import numpy as np\n'), ((7003, 7029), 'numpy.std', 'np.std', (['bootstrapped_means'], {}), '(bootstrapped_means)\n', (7009, 7029), True, 'import numpy as np\n'), ((7063, 7094), 'numpy.multiply', 'np.multiply', (['rewards', 'discounts'], {}), '(rewards, discounts)\n', (7074, 7094), True, 'import numpy as np\n'), ((7208, 7293), 'ml.rl.evaluation.cpe.CpeEstimate', 'CpeEstimate', ([], {'raw': '(0.0)', 'normalized': '(0.0)', 'raw_std_error': '(0.0)', 'normalized_std_error': '(0.0)'}), '(raw=0.0, normalized=0.0, raw_std_error=0.0,\n normalized_std_error=0.0)\n', (7219, 7293), False, 'from ml.rl.evaluation.cpe import CpeEstimate\n'), ((8131, 8167), 'numpy.where', 'np.where', (['(j_step_returns < low_bound)'], {}), '(j_step_returns < low_bound)\n', (8139, 8167), True, 'import numpy as np\n'), ((8269, 8306), 'numpy.where', 'np.where', (['(j_step_returns > high_bound)'], {}), '(j_step_returns > high_bound)\n', (8277, 8306), True, 'import numpy as np\n'), ((8888, 8913), 'numpy.dot', 'np.dot', (['x', 'j_step_returns'], {}), '(x, j_step_returns)\n', (8894, 8913), True, 'import numpy as np\n'), ((9755, 9776), 'numpy.nonzero', 'np.nonzero', (['terminals'], {}), '(terminals)\n', (9765, 9776), True, 'import numpy as np\n'), ((10949, 10972), 'numpy.zeros', 'np.zeros', (['[num_actions]'], {}), '([num_actions])\n', (10957, 10972), True, 'import numpy as np\n'), ((11269, 11292), 'numpy.zeros', 'np.zeros', (['[num_actions]'], {}), '([num_actions])\n', (11277, 11292), True, 'import numpy as np\n'), ((11385, 11408), 'numpy.zeros', 'np.zeros', (['[num_actions]'], {}), '([num_actions])\n', (11393, 11408), True, 'import numpy as np\n'), ((11860, 11894), 'numpy.sum', 'np.sum', (['importance_weights'], {'axis': '(0)'}), '(importance_weights, axis=0)\n', (11866, 11894), True, 'import numpy as np\n'), ((12937, 13009), 'numpy.multiply', 'np.multiply', (['weighted_discounts[:, :j_step + 1]', 'rewards[:, :j_step + 1]'], {}), '(weighted_discounts[:, :j_step + 1], rewards[:, :j_step + 1])\n', (12948, 13009), True, 'import numpy as np\n'), ((13303, 13331), 'numpy.zeros', 'np.zeros', (['[num_trajectories]'], {}), '([num_trajectories])\n', (13311, 13331), True, 'import numpy as np\n'), ((13941, 13951), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (13948, 13951), True, 'import numpy as np\n'), ((13953, 13968), 'scipy.stats.sem', 'sp.stats.sem', (['x'], {}), '(x)\n', (13965, 13968), True, 'import scipy as sp\n'), ((13986, 14032), 'scipy.stats.t._ppf', 'sp.stats.t._ppf', (['((1 + confidence) / 2.0)', '(n - 1)'], {}), '((1 + confidence) / 2.0, n - 1)\n', (14001, 14032), True, 'import scipy as sp\n'), ((2497, 2527), 'numpy.ones', 'np.ones', (['[num_trajectories, 1]'], {}), '([num_trajectories, 1])\n', (2504, 2527), True, 'import numpy as np\n'), ((4435, 4473), 'numpy.cumprod', 'np.cumprod', (['importance_weights'], {'axis': '(1)'}), '(importance_weights, axis=1)\n', (4445, 4473), True, 'import numpy as np\n'), ((4887, 4958), 'numpy.hstack', 'np.hstack', (['[importance_weights_one_earlier, importance_weights[:, :-1]]'], {}), '([importance_weights_one_earlier, importance_weights[:, :-1]])\n', (4896, 4958), True, 'import numpy as np\n'), ((6381, 6438), 'numpy.random.choice', 'np.random.choice', (['num_j_steps', 'sample_size'], {'replace': '(False)'}), '(num_j_steps, sample_size, replace=False)\n', (6397, 6438), True, 'import numpy as np\n'), ((9976, 10017), 'numpy.arange', 'np.arange', (['episode_start', '(episode_end + 1)'], {}), '(episode_start, episode_end + 1)\n', (9985, 10017), True, 'import numpy as np\n'), ((11921, 11960), 'numpy.where', 'np.where', (['(sum_importance_weights == 0.0)'], {}), '(sum_importance_weights == 0.0)\n', (11929, 11960), True, 'import numpy as np\n'), ((13379, 13467), 'numpy.multiply', 'np.multiply', (['weighted_discounts[:, :j_step + 1]', 'estimated_q_values[:, :j_step + 1]'], {}), '(weighted_discounts[:, :j_step + 1], estimated_q_values[:, :\n j_step + 1])\n', (13390, 13467), True, 'import numpy as np\n'), ((13509, 13612), 'numpy.multiply', 'np.multiply', (['weighted_discounts_one_earlier[:, :j_step + 1]', 'estimated_state_values[:, :j_step + 1]'], {}), '(weighted_discounts_one_earlier[:, :j_step + 1],\n estimated_state_values[:, :j_step + 1])\n', (13520, 13612), True, 'import numpy as np\n'), ((8585, 8594), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (8591, 8594), True, 'import numpy as np\n'), ((10756, 10803), 'itertools.zip_longest', 'itertools.zip_longest', (['*x'], {'fillvalue': 'fill_value'}), '(*x, fillvalue=fill_value)\n', (10777, 10803), False, 'import itertools\n')]
|
import numpy as np
import sklearn
import pandas as pd
import scipy.spatial.distance as ssd
from scipy.cluster import hierarchy
from scipy.stats import chi2_contingency
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest, SelectorMixin
from sklearn.pipeline import Pipeline
class SelectHierarchicalClustering(SelectorMixin, BaseEstimator):
"""
A transformer that clusters the features in X according to dist_matrix, and selects a feature from each cluster with
the highest chi2 score of X[feature] versus y
"""
def __init__(self, dist_matrix=None, threshold=1):
self.dist_matrix = dist_matrix
self.threshold = threshold
def _phi_coef(self, x, y):
"""
Calculates phi coefficient between features
Parameters
----------
x - feature x column
y - feature y column
Returns
----------
phi coefficient value
"""
confusion_matrix = pd.crosstab(x, y)
chi2 = chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
corr = np.sqrt(chi2 / n)
return corr
def _calc_dist_matrix(self, X):
"""
Calculate distance matrix between each two features in X, each value is 1-phi_correlation
"""
X_df = pd.DataFrame.sparse.from_spmatrix(X)
X_corr_mat = X_df.corr(method=self._phi_coef)
feature_corr_dist_matrix = 1 - X_corr_mat
feature_corr_dist_matrix_condensed = ssd.squareform(feature_corr_dist_matrix)
self.dist_matrix = feature_corr_dist_matrix_condensed
def _corr_linkage(self, method='average'):
linkage = hierarchy.linkage(self.dist_matrix, method=method)
return linkage
def _hierarchical_clustering(self, linkage):
"""
Perform hierarchical clustering
Parameters
----------
linkage - linkage dendogram created by hierarchy.linkage(self.distance_matrix, method=method)
Returns
----------
a list of lists, each list represents a cluster and contains the indexes of features belonging
to the cluster
"""
# array of len(X) - array[i] is the cluster number to which sample i belongs
cluster_ids = hierarchy.fcluster(linkage, self.threshold, criterion='distance')
cluster_id_to_feature_idx = {}
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_idx.setdefault(cluster_id, []).append(idx)
return list(cluster_id_to_feature_idx.values())
def fit(self, X, y):
"""
Clusters the features (X columns) using self.dist_matrix and self.threshold, and selects a feature from each
cluster with the highest chi2 score versus y.
The attribute self.n_features_ represents the number of features selected (=number of clusters)
The attribute self.selected_features_ is a list of indexes that correspond to the selected features
"""
if not self.dist_matrix:
self._calc_dist_matrix(X)
linkage = self._corr_linkage()
clusters = self._hierarchical_clustering(linkage)
chi2_vals, __ = sklearn.feature_selection.chi2(X, y)
chi2_vals = pd.Series(chi2_vals)
# fitted attributes
self.n_features_ = X.shape[1]
self.selected_features_ = [chi2_vals[cluster].idxmax() for cluster in clusters]
self.clusters_ = clusters
print(f'threshold={self.threshold:.2f}, selected_features={len(self.selected_features_)}')
return self
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
----------
mask - boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
# Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing
# underscore) and otherwise raises a NotFittedError with the given message.
sklearn.utils.validation.check_is_fitted(self)
mask = np.zeros((self.n_features_, ), dtype=bool)
mask[self.selected_features_] = 1
return mask
def get_fs_pipeline(k, threshold, random_state=0):
"""
Creates feature selection pipeline
Parameters
----------
k - the k parameter for the SelectKBest features function
threshold - clustering threshold for the Hierarchial clustering
random_state - random state for the RandomForestClassifier. Deafult value: 0
Returns
----------
pipeline - feature selection pipeline
"""
pipeline = Pipeline(steps=[('vectorize', CountVectorizer(lowercase=False, binary=True)),
('k_best', SelectKBest(score_func=sklearn.feature_selection.chi2, k=k)),
('cluster', SelectHierarchicalClustering(threshold=threshold)),
('rf', RandomForestClassifier(random_state=random_state))])
return pipeline
|
[
"pandas.Series",
"sklearn.utils.validation.check_is_fitted",
"scipy.spatial.distance.squareform",
"numpy.sqrt",
"scipy.stats.chi2_contingency",
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.crosstab",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.feature_selection.SelectKBest",
"numpy.zeros",
"scipy.cluster.hierarchy.linkage",
"pandas.DataFrame.sparse.from_spmatrix",
"sklearn.feature_selection.chi2",
"scipy.cluster.hierarchy.fcluster"
] |
[((1112, 1129), 'pandas.crosstab', 'pd.crosstab', (['x', 'y'], {}), '(x, y)\n', (1123, 1129), True, 'import pandas as pd\n'), ((1239, 1256), 'numpy.sqrt', 'np.sqrt', (['(chi2 / n)'], {}), '(chi2 / n)\n', (1246, 1256), True, 'import numpy as np\n'), ((1453, 1489), 'pandas.DataFrame.sparse.from_spmatrix', 'pd.DataFrame.sparse.from_spmatrix', (['X'], {}), '(X)\n', (1486, 1489), True, 'import pandas as pd\n'), ((1641, 1681), 'scipy.spatial.distance.squareform', 'ssd.squareform', (['feature_corr_dist_matrix'], {}), '(feature_corr_dist_matrix)\n', (1655, 1681), True, 'import scipy.spatial.distance as ssd\n'), ((1812, 1862), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['self.dist_matrix'], {'method': 'method'}), '(self.dist_matrix, method=method)\n', (1829, 1862), False, 'from scipy.cluster import hierarchy\n'), ((2422, 2487), 'scipy.cluster.hierarchy.fcluster', 'hierarchy.fcluster', (['linkage', 'self.threshold'], {'criterion': '"""distance"""'}), "(linkage, self.threshold, criterion='distance')\n", (2440, 2487), False, 'from scipy.cluster import hierarchy\n'), ((3345, 3381), 'sklearn.feature_selection.chi2', 'sklearn.feature_selection.chi2', (['X', 'y'], {}), '(X, y)\n', (3375, 3381), False, 'import sklearn\n'), ((3402, 3422), 'pandas.Series', 'pd.Series', (['chi2_vals'], {}), '(chi2_vals)\n', (3411, 3422), True, 'import pandas as pd\n'), ((4260, 4306), 'sklearn.utils.validation.check_is_fitted', 'sklearn.utils.validation.check_is_fitted', (['self'], {}), '(self)\n', (4300, 4306), False, 'import sklearn\n'), ((4323, 4364), 'numpy.zeros', 'np.zeros', (['(self.n_features_,)'], {'dtype': 'bool'}), '((self.n_features_,), dtype=bool)\n', (4331, 4364), True, 'import numpy as np\n'), ((1145, 1179), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['confusion_matrix'], {}), '(confusion_matrix)\n', (1161, 1179), False, 'from scipy.stats import chi2_contingency\n'), ((4896, 4941), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'lowercase': '(False)', 'binary': '(True)'}), '(lowercase=False, binary=True)\n', (4911, 4941), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((4986, 5045), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'sklearn.feature_selection.chi2', 'k': 'k'}), '(score_func=sklearn.feature_selection.chi2, k=k)\n', (4997, 5045), False, 'from sklearn.feature_selection import SelectKBest, SelectorMixin\n'), ((5181, 5230), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (5203, 5230), False, 'from sklearn.ensemble import RandomForestClassifier\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional, cast
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
from torcharrow import Scope
def from_arrow_table(
table,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
""" "
Convert arrow table to a torcharrow dataframe.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(table, pa.Table)
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
chunked_array = table.column(f.name)
pydata = chunked_array.to_pylist()
res[f.name] = scope.Column(pydata, f.dtype)
return scope.DataFrame(res, device=device)
else:
res = {}
table = table.select(columns) if columns is not None else table
for n in table.column_names:
chunked_array = table.column(n)
pydata = chunked_array.to_pylist()
res[n] = scope.Column(
pydata,
dtype=_arrowtype_to_dtype(
table.schema.field(n).type, table.column(n).null_count > 0
),
)
return scope.DataFrame(res, device=device)
def from_pandas_dataframe(
df,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
"""
Convert pandas dataframe to torcharrow dataframe (drops indices).
Parameters
----------
df : Pandas dataframe
dtype : dtype, default None
Data type to force, if None will automatically infer.
columns : array-like
List of column names to extract from df.
scope : Scope or None
Scope to use, or None for default scope.
device : str or ""
Device to use, or default if blank.
Examples
--------
>>> import pandas as pd
>>> import torcharrow as ta
>>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> gdf = ta.from_pandas_dataframe(pdf)
>>> gdf
index a b
------- --- ---
0 0 0.1
1 1 0.2
2 2
3 3 0.3
dtype: Struct([Field('a', int64), Field('b', Float64(nullable=True))]), count: 4, null_count: 0
"""
scope = scope or Scope.default
device = device or scope.device
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
# this shows that Column shoud also construct Dataframes!
res[f.name] = from_pandas_series(
pd.Series(df[f.name]), f.dtype, scope=scope
)
return scope.Frame(res, dtype=dtype, device=device)
else:
res = {}
for n in df.columns:
if columns is None or n in columns:
res[n] = from_pandas_series(pd.Series(df[n]), scope=scope)
return scope.Frame(res, device=device)
def from_arrow_array(array, dtype=None, scope=None, device=""):
""" "
Convert arrow array to a torcharrow column.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(array, pa.Array)
pydata = _arrow_scalar_to_py(array)
if dtype is not None:
assert not dt.is_struct(dtype)
return scope.Column(pydata, dtype, device=device)
else:
return scope.Column(
pydata,
dtype=_arrowtype_to_dtype(array.type, array.null_count > 0),
device=device,
)
def from_pandas_series(series, dtype=None, scope=None, device=""):
""" "
Convert pandas series array to a torcharrow column (drops indices).
"""
scope = scope or Scope.default
device = device or scope.device
return from_numpy(series.to_numpy(), dtype, scope, device)
def from_numpy(array, dtype, scope=None, device=""):
"""
Convert 1dim numpy array to a torcharrow column (zero copy).
"""
scope = scope or Scope.default
device = device or scope.device
if isinstance(array, ma.core.MaskedArray) and array.ndim == 1:
return _from_numpy_ma(array.data, array.mask, dtype, scope, device)
elif isinstance(array, np.ndarray) and array.ndim == 1:
return _from_numpy_nd(array, dtype, scope, device)
else:
raise TypeError(f"cannot convert numpy array of type {array.dtype}")
def _is_not_str(s):
return not isinstance(s, str)
def _from_numpy_ma(data, mask, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype).with_null()
else:
assert dt.is_primitive_type(dtype)
assert dtype == dt.typeof_np_dtype(data.dtype).with_null()
# TODO if not, adopt the type or?
# Something like ma.array
# np.array([np.nan, np.nan, 3.]).astype(np.int64),
# mask = np.isnan([np.nan, np.nan, 3.]))
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
assert not np.all(np.isnan(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype) or dtype == "object":
assert np.all(np.vectorize(_is_not_str)(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError(f"cannot convert masked numpy array of type {data.dtype}")
def _from_numpy_nd(data, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype)
if dtype is None:
dtype = dt.string
else:
assert dt.is_primitive(dtype)
# TODO Check why teh following assert isn't the case
# assert dtype == dt.typeof_np_dtype(data.dtype)
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
mask = np.isnan(data)
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype):
mask = np.vectorize(_is_not_str)(data)
if np.any(mask):
dtype = dtype.with_null()
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError("can not convert numpy array of type {data.dtype,}")
# def _column_without_nan(series, dtype):
# if dtype is None or is_floating(dtype):
# for i in series:
# if isinstance(i, float) and np.isnan(i):
# yield None
# else:
# yield i
# else:
# for i in series:
# yield i
def _arrow_scalar_to_py(array):
for i in array:
yield i.as_py()
def _pandatype_to_dtype(t, nullable):
return dt.typeof_nptype(t, nullable)
def _arrowtype_to_dtype(t, nullable):
if pa.types.is_boolean(t):
return dt.Boolean(nullable)
if pa.types.is_int8(t):
return dt.Int8(nullable)
if pa.types.is_int16(t):
return dt.Int16(nullable)
if pa.types.is_int32(t):
return dt.Int32(nullable)
if pa.types.is_int64(t):
return dt.Int64(nullable)
if pa.types.is_float32(t):
return dt.Float32(nullable)
if pa.types.is_float64(t):
return dt.Float64(nullable)
if pa.types.is_list(t):
return List(t.value_type, nullable)
if pa.types.is_struct(t):
return _pandatype_to_dtype(t.to_pandas_dtype(), True)
if pa.types.is_null(t):
return dt.Void()
if pa.types.is_string(t):
return dt.String(nullable)
if pa.types.is_map(t):
return dt.Map(t.item_type, t.key_type, nullable)
raise NotImplementedError("unsupported case")
|
[
"torcharrow.dtypes.is_string",
"pyarrow.types.is_float32",
"torcharrow.dtypes.Map",
"torcharrow.dtypes.Int8",
"torcharrow.dtypes.is_boolean_or_numerical",
"torcharrow.dtypes.String",
"pyarrow.types.is_float64",
"pyarrow.types.is_map",
"torcharrow.dtypes.is_struct",
"pyarrow.types.is_boolean",
"pyarrow.types.is_struct",
"torcharrow.dtypes.typeof_nptype",
"pyarrow.types.is_string",
"torcharrow.dtypes.Int32",
"torcharrow.dtypes.is_primitive_type",
"torcharrow.dtypes.Int16",
"torcharrow.dtypes.typeof_np_dtype",
"numpy.ma.array",
"pyarrow.types.is_int8",
"pyarrow.types.is_int64",
"torcharrow.dtypes.Boolean",
"pyarrow.types.is_list",
"numpy.any",
"numpy.isnan",
"pyarrow.types.is_null",
"torcharrow.dtypes.Float64",
"pyarrow.types.is_int16",
"torcharrow.dtypes.Void",
"numpy.vectorize",
"typing.cast",
"pandas.Series",
"torcharrow.dtypes.Int64",
"typing.List",
"torcharrow.dtypes.Float32",
"pyarrow.types.is_int32",
"torcharrow.dtypes.is_primitive"
] |
[((5363, 5396), 'torcharrow.dtypes.is_boolean_or_numerical', 'dt.is_boolean_or_numerical', (['dtype'], {}), '(dtype)\n', (5389, 5396), True, 'import torcharrow.dtypes as dt\n'), ((6246, 6279), 'torcharrow.dtypes.is_boolean_or_numerical', 'dt.is_boolean_or_numerical', (['dtype'], {}), '(dtype)\n', (6272, 6279), True, 'import torcharrow.dtypes as dt\n'), ((7101, 7130), 'torcharrow.dtypes.typeof_nptype', 'dt.typeof_nptype', (['t', 'nullable'], {}), '(t, nullable)\n', (7117, 7130), True, 'import torcharrow.dtypes as dt\n'), ((7178, 7200), 'pyarrow.types.is_boolean', 'pa.types.is_boolean', (['t'], {}), '(t)\n', (7197, 7200), True, 'import pyarrow as pa\n'), ((7245, 7264), 'pyarrow.types.is_int8', 'pa.types.is_int8', (['t'], {}), '(t)\n', (7261, 7264), True, 'import pyarrow as pa\n'), ((7306, 7326), 'pyarrow.types.is_int16', 'pa.types.is_int16', (['t'], {}), '(t)\n', (7323, 7326), True, 'import pyarrow as pa\n'), ((7369, 7389), 'pyarrow.types.is_int32', 'pa.types.is_int32', (['t'], {}), '(t)\n', (7386, 7389), True, 'import pyarrow as pa\n'), ((7432, 7452), 'pyarrow.types.is_int64', 'pa.types.is_int64', (['t'], {}), '(t)\n', (7449, 7452), True, 'import pyarrow as pa\n'), ((7495, 7517), 'pyarrow.types.is_float32', 'pa.types.is_float32', (['t'], {}), '(t)\n', (7514, 7517), True, 'import pyarrow as pa\n'), ((7562, 7584), 'pyarrow.types.is_float64', 'pa.types.is_float64', (['t'], {}), '(t)\n', (7581, 7584), True, 'import pyarrow as pa\n'), ((7629, 7648), 'pyarrow.types.is_list', 'pa.types.is_list', (['t'], {}), '(t)\n', (7645, 7648), True, 'import pyarrow as pa\n'), ((7701, 7722), 'pyarrow.types.is_struct', 'pa.types.is_struct', (['t'], {}), '(t)\n', (7719, 7722), True, 'import pyarrow as pa\n'), ((7793, 7812), 'pyarrow.types.is_null', 'pa.types.is_null', (['t'], {}), '(t)\n', (7809, 7812), True, 'import pyarrow as pa\n'), ((7846, 7867), 'pyarrow.types.is_string', 'pa.types.is_string', (['t'], {}), '(t)\n', (7864, 7867), True, 'import pyarrow as pa\n'), ((7911, 7929), 'pyarrow.types.is_map', 'pa.types.is_map', (['t'], {}), '(t)\n', (7926, 7929), True, 'import pyarrow as pa\n'), ((824, 843), 'torcharrow.dtypes.is_struct', 'dt.is_struct', (['dtype'], {}), '(dtype)\n', (836, 843), True, 'import torcharrow.dtypes as dt\n'), ((860, 882), 'typing.cast', 'cast', (['dt.Struct', 'dtype'], {}), '(dt.Struct, dtype)\n', (864, 882), False, 'from typing import List, Optional, cast\n'), ((2776, 2795), 'torcharrow.dtypes.is_struct', 'dt.is_struct', (['dtype'], {}), '(dtype)\n', (2788, 2795), True, 'import torcharrow.dtypes as dt\n'), ((2812, 2834), 'typing.cast', 'cast', (['dt.Struct', 'dtype'], {}), '(dt.Struct, dtype)\n', (2816, 2834), False, 'from typing import List, Optional, cast\n'), ((5028, 5055), 'torcharrow.dtypes.is_primitive_type', 'dt.is_primitive_type', (['dtype'], {}), '(dtype)\n', (5048, 5055), True, 'import torcharrow.dtypes as dt\n'), ((5938, 5968), 'torcharrow.dtypes.typeof_np_dtype', 'dt.typeof_np_dtype', (['data.dtype'], {}), '(data.dtype)\n', (5956, 5968), True, 'import torcharrow.dtypes as dt\n'), ((6050, 6072), 'torcharrow.dtypes.is_primitive', 'dt.is_primitive', (['dtype'], {}), '(dtype)\n', (6065, 6072), True, 'import torcharrow.dtypes as dt\n'), ((6296, 6310), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (6304, 6310), True, 'import numpy as np\n'), ((6383, 6402), 'torcharrow.dtypes.is_string', 'dt.is_string', (['dtype'], {}), '(dtype)\n', (6395, 6402), True, 'import torcharrow.dtypes as dt\n'), ((7217, 7237), 'torcharrow.dtypes.Boolean', 'dt.Boolean', (['nullable'], {}), '(nullable)\n', (7227, 7237), True, 'import torcharrow.dtypes as dt\n'), ((7281, 7298), 'torcharrow.dtypes.Int8', 'dt.Int8', (['nullable'], {}), '(nullable)\n', (7288, 7298), True, 'import torcharrow.dtypes as dt\n'), ((7343, 7361), 'torcharrow.dtypes.Int16', 'dt.Int16', (['nullable'], {}), '(nullable)\n', (7351, 7361), True, 'import torcharrow.dtypes as dt\n'), ((7406, 7424), 'torcharrow.dtypes.Int32', 'dt.Int32', (['nullable'], {}), '(nullable)\n', (7414, 7424), True, 'import torcharrow.dtypes as dt\n'), ((7469, 7487), 'torcharrow.dtypes.Int64', 'dt.Int64', (['nullable'], {}), '(nullable)\n', (7477, 7487), True, 'import torcharrow.dtypes as dt\n'), ((7534, 7554), 'torcharrow.dtypes.Float32', 'dt.Float32', (['nullable'], {}), '(nullable)\n', (7544, 7554), True, 'import torcharrow.dtypes as dt\n'), ((7601, 7621), 'torcharrow.dtypes.Float64', 'dt.Float64', (['nullable'], {}), '(nullable)\n', (7611, 7621), True, 'import torcharrow.dtypes as dt\n'), ((7665, 7693), 'typing.List', 'List', (['t.value_type', 'nullable'], {}), '(t.value_type, nullable)\n', (7669, 7693), False, 'from typing import List, Optional, cast\n'), ((7829, 7838), 'torcharrow.dtypes.Void', 'dt.Void', ([], {}), '()\n', (7836, 7838), True, 'import torcharrow.dtypes as dt\n'), ((7884, 7903), 'torcharrow.dtypes.String', 'dt.String', (['nullable'], {}), '(nullable)\n', (7893, 7903), True, 'import torcharrow.dtypes as dt\n'), ((7946, 7987), 'torcharrow.dtypes.Map', 'dt.Map', (['t.item_type', 't.key_type', 'nullable'], {}), '(t.item_type, t.key_type, nullable)\n', (7952, 7987), True, 'import torcharrow.dtypes as dt\n'), ((3686, 3705), 'torcharrow.dtypes.is_struct', 'dt.is_struct', (['dtype'], {}), '(dtype)\n', (3698, 3705), True, 'import torcharrow.dtypes as dt\n'), ((5542, 5561), 'torcharrow.dtypes.is_string', 'dt.is_string', (['dtype'], {}), '(dtype)\n', (5554, 5561), True, 'import torcharrow.dtypes as dt\n'), ((6462, 6474), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (6468, 6474), True, 'import numpy as np\n'), ((3015, 3036), 'pandas.Series', 'pd.Series', (['df[f.name]'], {}), '(df[f.name])\n', (3024, 3036), True, 'import pandas as pd\n'), ((4960, 4990), 'torcharrow.dtypes.typeof_np_dtype', 'dt.typeof_np_dtype', (['data.dtype'], {}), '(data.dtype)\n', (4978, 4990), True, 'import torcharrow.dtypes as dt\n'), ((6419, 6444), 'numpy.vectorize', 'np.vectorize', (['_is_not_str'], {}), '(_is_not_str)\n', (6431, 6444), True, 'import numpy as np\n'), ((3281, 3297), 'pandas.Series', 'pd.Series', (['df[n]'], {}), '(df[n])\n', (3290, 3297), True, 'import pandas as pd\n'), ((5080, 5110), 'torcharrow.dtypes.typeof_np_dtype', 'dt.typeof_np_dtype', (['data.dtype'], {}), '(data.dtype)\n', (5098, 5110), True, 'import torcharrow.dtypes as dt\n'), ((5606, 5631), 'numpy.vectorize', 'np.vectorize', (['_is_not_str'], {}), '(_is_not_str)\n', (5618, 5631), True, 'import numpy as np\n'), ((5433, 5453), 'numpy.ma.array', 'ma.array', (['data', 'mask'], {}), '(data, mask)\n', (5441, 5453), True, 'import numpy.ma as ma\n'), ((5632, 5652), 'numpy.ma.array', 'ma.array', (['data', 'mask'], {}), '(data, mask)\n', (5640, 5652), True, 'import numpy.ma as ma\n')]
|
import argparse
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--mnist', action='store_true', default=False,
help='open mnist result')
args = parser.parse_args()
def subplot(subplot, data_first, data_second, title):
plt.subplot(subplot)
if args.mnist:
x = np.arange(0,100)
else:
x = np.arange(0,500)
y_first = np.mean(data_first, axis=0)
y_second = np.mean(data_second, axis=0)
y_first_err = np.std(data_first, axis=0) / 2.
y_second_err = np.std(data_second, axis=0) / 2.
plt.fill_between(x, y_first - y_first_err, y_first + y_first_err, color='m', alpha=0.3)
plt.fill_between(x, y_second - y_second_err, y_second + y_second_err, color='c', alpha=0.3)
plt.plot(x, y_first, color='r', label='Task A')
plt.plot(x, y_second, color='g', label='Task B (transfer learning)')
plt.legend(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)
axes = plt.gca()
if args.mnist:
axes.set_xlim([0, 100])
axes.set_ylim([0, 1.2])
else:
axes.set_xlim([0, 500])
axes.set_ylim([0, 0.6])
plt.title(title, fontsize=20, y = 0.9)
plt.ylabel('Accuracy',fontsize=15)
plt.xlabel('Generations',fontsize=15)
plt.grid(True)
try:
if args.mnist:
f = open(os.path.join('./result/result_mnist.pickle'))
result = pickle.load(f)
f.close()
pathnet_first = []
pathnet_second = []
for res in result:
pathnet_first.append(res[2])
pathnet_second.append(res[3])
subplot('111', pathnet_first, pathnet_second,'MNIST')
plt.show()
else:
f = open(os.path.join('./result/result_cifar_svhn.pickle'))
result = pickle.load(f)
f.close()
cifar_first = []
cifar_second = []
svhn_first = []
svhn_second = []
for res in result:
if res[0] == 'pathnet_cifar_first':
cifar_first.append(res[2])
svhn_second.append(res[3])
else:
svhn_first.append(res[2])
cifar_second.append(res[3])
subplot('211', cifar_first, cifar_second,'CIFAR-10')
subplot('212', svhn_first, svhn_second,'cSVHN')
plt.show()
except IOError:
print("Result file does not exist")
|
[
"numpy.mean",
"matplotlib.pyplot.grid",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pickle.load",
"matplotlib.pyplot.style.use",
"os.path.join",
"matplotlib.pyplot.fill_between",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((91, 114), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (104, 114), True, 'import matplotlib.pyplot as plt\n'), ((125, 185), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (148, 185), False, 'import argparse\n'), ((386, 406), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot'], {}), '(subplot)\n', (397, 406), True, 'import matplotlib.pyplot as plt\n'), ((508, 535), 'numpy.mean', 'np.mean', (['data_first'], {'axis': '(0)'}), '(data_first, axis=0)\n', (515, 535), True, 'import numpy as np\n'), ((551, 579), 'numpy.mean', 'np.mean', (['data_second'], {'axis': '(0)'}), '(data_second, axis=0)\n', (558, 579), True, 'import numpy as np\n'), ((692, 783), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y_first - y_first_err)', '(y_first + y_first_err)'], {'color': '"""m"""', 'alpha': '(0.3)'}), "(x, y_first - y_first_err, y_first + y_first_err, color='m',\n alpha=0.3)\n", (708, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 880), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y_second - y_second_err)', '(y_second + y_second_err)'], {'color': '"""c"""', 'alpha': '(0.3)'}), "(x, y_second - y_second_err, y_second + y_second_err, color\n ='c', alpha=0.3)\n", (800, 880), True, 'import matplotlib.pyplot as plt\n'), ((880, 927), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_first'], {'color': '"""r"""', 'label': '"""Task A"""'}), "(x, y_first, color='r', label='Task A')\n", (888, 927), True, 'import matplotlib.pyplot as plt\n'), ((932, 1000), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_second'], {'color': '"""g"""', 'label': '"""Task B (transfer learning)"""'}), "(x, y_second, color='g', label='Task B (transfer learning)')\n", (940, 1000), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1070), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.8, 0.3)', 'loc': '(2)', 'ncol': '(1)', 'fontsize': '(15)'}), '(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)\n', (1015, 1070), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1091), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1089, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1290), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)', 'y': '(0.9)'}), '(title, fontsize=20, y=0.9)\n', (1263, 1290), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1332), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {'fontsize': '(15)'}), "('Accuracy', fontsize=15)\n", (1307, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1374), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generations"""'], {'fontsize': '(15)'}), "('Generations', fontsize=15)\n", (1346, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1392), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1386, 1392), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2403), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2401, 2403), True, 'import matplotlib.pyplot as plt\n'), ((438, 455), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (447, 455), True, 'import numpy as np\n'), ((477, 494), 'numpy.arange', 'np.arange', (['(0)', '(500)'], {}), '(0, 500)\n', (486, 494), True, 'import numpy as np\n'), ((598, 624), 'numpy.std', 'np.std', (['data_first'], {'axis': '(0)'}), '(data_first, axis=0)\n', (604, 624), True, 'import numpy as np\n'), ((649, 676), 'numpy.std', 'np.std', (['data_second'], {'axis': '(0)'}), '(data_second, axis=0)\n', (655, 676), True, 'import numpy as np\n'), ((1500, 1514), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1511, 1514), False, 'import pickle\n'), ((1770, 1780), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1778, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1892), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1889, 1892), False, 'import pickle\n'), ((1437, 1481), 'os.path.join', 'os.path.join', (['"""./result/result_mnist.pickle"""'], {}), "('./result/result_mnist.pickle')\n", (1449, 1481), False, 'import os\n'), ((1810, 1859), 'os.path.join', 'os.path.join', (['"""./result/result_cifar_svhn.pickle"""'], {}), "('./result/result_cifar_svhn.pickle')\n", (1822, 1859), False, 'import os\n')]
|
import numpy as np
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
import scipy.stats as sts
import xgboost as xgb
from xiter import *
import pandas as pd
import argparse
from datetime import datetime
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print('\n Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))
parser=argparse.ArgumentParser()
parser.add_argument("--end",type=float,default=100000.,help='end ratio')
parser.add_argument("--save",type=str,default="test_",help='save name')
parser.add_argument("--network",type=str,default="rnn",help='network name on symbols/')
parser.add_argument("--right",type=str,default="/scratch/yjdata/gluon100_img",help='which train sample (qq,gg,zq,zg)')
parser.add_argument("--pt",type=int,default=200,help='pt range pt~pt*1.1')
parser.add_argument("--ptmin",type=float,default=0.,help='pt range pt~pt*1.1')
parser.add_argument("--ptmax",type=float,default=2.,help='pt range pt~pt*1.1')
parser.add_argument("--epochs",type=int,default=10,help='num epochs')
parser.add_argument("--batch_size",type=int,default=100000,help='batch_size')
parser.add_argument("--loss",type=str,default="categorical_crossentropy",help='network name on symbols/')
parser.add_argument("--gpu",type=int,default=0,help='gpu number')
parser.add_argument("--isz",type=int,default=0,help='0 or z or not')
parser.add_argument("--eta",type=float,default=0.,help='end ratio')
parser.add_argument("--etabin",type=float,default=1,help='end ratio')
parser.add_argument("--unscale",type=int,default=0,help='end ratio')
args=parser.parse_args()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
batch_size=args.batch_size
params = {
'max_depth': sts.randint(1,6),
'learning_rate': sts.uniform(0.0010,0.500),
'n_estimators': sts.randint(10,101)
}
model=xgb.XGBClassifier(objective='binary:logistic',tree_method="gpu_hist")
if(args.isz==1):
if(args.etabin==1):
loaded=np.load("zqmixed{}pteta.npz".format(args.pt))
print("zqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("zqmixed{}pt.npz".format(args.pt))
print("zqmixed{}pt.npz".format(args.pt))
elif(args.isz==-1):
if(args.etabin==1):
loaded=np.load("qqmixed{}pteta.npz".format(args.pt))
print("qqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("qqmixed{}pt.npz".format(args.pt))
print("qqmixed{}pt.npz".format(args.pt))
elif(args.isz==0):
if(args.etabin==1):
if(args.unscale==1):
loaded=np.load("unscalemixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("mixed{}pteta.npz".format(args.pt))
print("etabin 1")
else:
if(args.unscale==1):
loaded=np.load("unscalemixed{}pt.npz".format(args.pt))
else:
loaded=np.load("mixed{}pt.npz".format(args.pt))
print("etabin 2.4")
data=loaded["bdtset"][:,:5]
label=loaded["label"]
line=int(30000)
endline=int(40000)
if(len(label)<40000):
line=int(len(label)*3./4.)
endline=len(label)
X=data[0:line]
vx=data[line:endline]
Y=label[0:line]
vy=label[line:endline]
Y=np.array(Y)[:,0]
folds = 3
param_comb = 100
skf = KFold(n_splits=folds, shuffle = True, random_state = 173)
#skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001)
random_search = RandomizedSearchCV(model, param_distributions=params, n_iter=param_comb, scoring='log_loss', n_jobs=6, cv=skf.split(X,Y), verbose=3, random_state=173 )
# Here we go
start_time = timer(None) # timing starts from this point for "start_time" variable
random_search.fit(X, Y)
timer(start_time)
#print(random_search.predict(X[:10]))
#print('\n All results:')
#print(random_search.cv_results_)
#print('\n Best estimator:')
#print(random_search.best_estimator_)
print('\n Best normalized gini score for %d-fold search with %d parameter combinations:' % (folds, param_comb))
print(random_search.best_score_ * 2 - 1)
#print('\n Best hyperparameters:')
#print(random_search.best_params_)
results = pd.DataFrame(random_search.cv_results_)
results.to_csv('xgb/{}-{}.csv'.format(args.save,args.pt), index=False)
#random_search.best_estimator_.save_model("bdt-{}.dat".format(args.pt))
|
[
"scipy.stats.randint",
"argparse.ArgumentParser",
"scipy.stats.uniform",
"numpy.array",
"datetime.datetime.now",
"pandas.DataFrame",
"sklearn.model_selection.KFold",
"xgboost.XGBClassifier"
] |
[((727, 752), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (750, 752), False, 'import argparse\n'), ((2254, 2324), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'objective': '"""binary:logistic"""', 'tree_method': '"""gpu_hist"""'}), "(objective='binary:logistic', tree_method='gpu_hist')\n", (2271, 2324), True, 'import xgboost as xgb\n'), ((3518, 3571), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'folds', 'shuffle': '(True)', 'random_state': '(173)'}), '(n_splits=folds, shuffle=True, random_state=173)\n', (3523, 3571), False, 'from sklearn.model_selection import KFold\n'), ((4361, 4400), 'pandas.DataFrame', 'pd.DataFrame', (['random_search.cv_results_'], {}), '(random_search.cv_results_)\n', (4373, 4400), True, 'import pandas as pd\n'), ((2124, 2141), 'scipy.stats.randint', 'sts.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (2135, 2141), True, 'import scipy.stats as sts\n'), ((2167, 2190), 'scipy.stats.uniform', 'sts.uniform', (['(0.001)', '(0.5)'], {}), '(0.001, 0.5)\n', (2178, 2190), True, 'import scipy.stats as sts\n'), ((2218, 2238), 'scipy.stats.randint', 'sts.randint', (['(10)', '(101)'], {}), '(10, 101)\n', (2229, 2238), True, 'import scipy.stats as sts\n'), ((3467, 3478), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3475, 3478), True, 'import numpy as np\n'), ((430, 444), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (442, 444), False, 'from datetime import datetime\n'), ((526, 540), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (538, 540), False, 'from datetime import datetime\n')]
|
import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
MAX_ACCOUNT_BALANCE = 2147483647
MAX_NUM_SHARES = 2147483647
MAX_SHARE_PRICE = 5000
MAX_VOLUME = 1000e8
MAX_AMOUNT = 3e10
MAX_OPEN_POSITIONS = 5
MAX_STEPS = 20000
MAX_DAY_CHANGE = 1
INITIAL_ACCOUNT_BALANCE = 10000
DATA_HIS_PERIOD = 5
# position constant
FLAT = 0 # no position
LONG = 1 # buy position
SHORT = 2 # sell position
# action constant
HOLD = 0
BUY = 1
SELL = 2
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df,show_trade=True):
super(StockTradingEnv, self).__init__()
# show the trade info
self.show_trade = show_trade
self.actions=["FLAT","LONG","SHORT"]
self.fee = 0.0005 # brokage commission
self.df = df
self.closeprices = self.df['close'].values
self.reward_range = (0, MAX_ACCOUNT_BALANCE)
# Actions of the format Buy x%, Sell x%, Hold, etc.
self.action_space = spaces.Discrete(len(self.actions))
# self.action_space = spaces.Box(
# low=np.array([0, 0]), high=np.array([3, 1]), dtype=np.float16)
# Prices contains the OHCL values for the last five prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(DATA_HIS_PERIOD+1,6), dtype=np.float16)
self.history = []
def _next_observation(self):
obs = np.array([
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'open'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'low'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'volume'].values / MAX_NUM_SHARES,
])
# Append additional data and scale each value to between 0-1
obs = np.append(obs,[[self.balance / MAX_ACCOUNT_BALANCE,
self.max_net_worth / MAX_ACCOUNT_BALANCE,
self.shares_held / MAX_NUM_SHARES,
self.cost_basis / MAX_SHARE_PRICE,
self.total_shares_sold / MAX_NUM_SHARES,
self.total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]],axis=0)
return obs
def _take_action(self, action):
# Set the current price to a random price within the time step
# current_price = random.uniform(
# self.df.loc[self.current_step, "open"], self.df.loc[self.current_step, "close"])
# Set the current price to the last close price
self.close_price = self.df.loc[self.current_step,"close"]
amount = 0.5 #the old version has this variable, so reserve
# action comes from the agent
# 1 buy, 2 sell, 0 hold
# single position can be opened per trade
# valid action sequence would be
# LONG : buy - hold - hold - sell
# SHORT : sell - hold - hold - buy
# invalid action sequence is just considered hold
# (e.g.) "buy - buy" would be considred "buy - hold"
self.action = HOLD #hold
if action == BUY: #buy
if self.position == FLAT: # if previous position was flat
self.position = LONG #update position to long
self.action = BUY # record action as buy
self.entry_price = self.close_price
# Buy amount % of balance in shares
total_possible = int(self.balance / self.close_price)
shares_bought = int(total_possible * amount)//100 *100
self.krw_balance = shares_bought * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.shares_held = shares_bought
self.balance -= self.krw_balance-commission
#self.cost_basis = (prev_cost + additional_cost) / (self.shares_held + shares_bought)
elif self.position == SHORT: # if previous position was short
self.position = FLAT # update position to flat
self.action = BUY # record action as buy
self.exit_price = self.close_price
self.reward += ((self.entry_price - self.exit_price) / self.exit_price + 1) * (
1 - self.fee) ** 2 - 1 # calculate reward
#self.krw_balance = self.krw_balance * (1.0 + self.reward) # evaluate cumulative return in krw-won
self.balance += round(self.krw_balance * (1.0 + self.reward),2) # calcuate the total balance
self.n_short += 1 # record number of short
self.total_shares_sold += self.shares_held
self.total_sales_value += self.shares_held * self.close_price
self.entry_price = 0 # clear entry price
self.shares_held = 0 # clear the shares_
elif action == SELL:
if self.position == FLAT:
self.position = SHORT
self.action = SELL
self.entry_price = self.close_price
# Sell amount % of shares held
total_possible = int(self.balance / self.close_price)
self.shares_held = int(total_possible * amount)//100 *100
self.krw_balance = self.shares_held * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.balance -= self.krw_balance-commission
elif self.position == LONG:
self.position = FLAT
self.action = SELL
self.exit_price = self.close_price
self.reward += ((self.exit_price - self.entry_price) / self.entry_price + 1) * (1 - self.fee) ** 2 - 1
#self.krw_balance = self.krw_balance * (1.0 + self.reward)
self.balance += round(self.krw_balance*(1.0+self.reward),2)
self.n_long += 1
self.total_shares_buy += self.shares_held
self.total_buys_value += self.shares_held * self.close_price
self.shares_held = 0
self.entry_price = 0
# [coin + krw_won] total value evaluated in krw won
if (self.position == LONG):
temp_reward = ((self.close_price - self.entry_price) / self.entry_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
elif (self.position == SHORT):
temp_reward = ((self.entry_price - self.close_price) / self.close_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
else:
temp_reward = 0
new_portfolio = 0
self.net_worth = self.balance + new_portfolio
if self.net_worth > self.max_net_worth:
self.max_net_worth = self.net_worth
if self.shares_held == 0:
self.cost_basis = 0
self.portfolio = round(new_portfolio,2)
def step(self, action):
# Execute one time step within the environment
self._take_action(action)
done = False
self.current_step += 1
delay_modifier = (self.current_step / MAX_STEPS)
# profits
#reward = self.net_worth - INITIAL_ACCOUNT_BALANCE
#reward = 1 if reward > 0 else -100
if self.net_worth <= 0:
done = True
if self.current_step > len(self.df.loc[:, 'open'].values) - 1:
self.current_step = DATA_HIS_PERIOD # loop training
# when loop training, then clear the history
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward = 0
self.portfolio = 0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy = 0
self.total_buys_value = 0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long = 0
self.n_short = 0
self.history=[]
# done = True
if (self.show_trade and self.current_step % 1 == 0):
print("Tick: {0}/ Portfolio (krw-won): {1}, balance: {2}".format(self.current_step, self.portfolio,self.net_worth))
print("Long: {0}/ Short: {1}".format(self.n_long, self.n_short))
# save the history data
self.history.append([
self.action,
self.position,
self.current_step,
self.close_price,
self.krw_balance,
self.balance,
self.max_net_worth,
self.shares_held,
self.portfolio,
self.total_shares_buy,
self.total_buys_value,
self.total_shares_sold,
self.total_sales_value])
#self.history.append((self.action, self.current_step, self.closingPrice, self.portfolio, self.reward))
obs = self._next_observation()
if (self.current_step > (self.df.shape[0]) - 1):
self.done = True
self.reward = self.get_profit() # return reward at end of the game
return obs, self.net_worth, done, {'portfolio': np.array([self.portfolio]),
"history": self.history,
"n_trades": {'long': self.n_long, 'short': self.n_short}}
#return obs, reward, done, {}
def get_profit(self):
if(self.position == LONG):
profit = ((self.close_Price - self.entry_price)/self.entry_price + 1)*(1-self.fee)**2 - 1
elif(self.position == SHORT):
profit = ((self.entry_price - self.close_Price)/self.close_Price + 1)*(1-self.fee)**2 - 1
else:
profit = 0
return profit
def reset(self, new_df=None):
# Reset the state of the environment to an initial state
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward =0
self.portfolio =0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy =0
self.total_buys_value=0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long=0
self.n_short=0
self.history=[]
# pass test dataset to environment
if new_df:
self.df = new_df
# Set the current step to a random point within the data frame
# self.current_step = random.randint(
# 0, len(self.df.loc[:, 'open'].values) - 6)
# the observation include the given period history data
self.current_step = DATA_HIS_PERIOD #random.randint(DATA_HIS_PERIOD,len(self.df.loc[:,'open'].values)-1)
# for i in range(DATA_HIS_PERIOD):
# self.history.append([0.0,0.0,0.0,0.0,0.0,0.0])
return self._next_observation()
def render(self, mode='human', close=False):
# Render the environment to the screen
profit = self.net_worth - INITIAL_ACCOUNT_BALANCE
print('-'*30)
print(f'Step: {self.current_step}')
print(f'Balance: {self.balance}')
print(f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})')
print(f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})')
print(f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})')
print(f'Profit: {profit}')
return profit
|
[
"numpy.append",
"numpy.array",
"gym.spaces.Box"
] |
[((1340, 1415), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(DATA_HIS_PERIOD + 1, 6)', 'dtype': 'np.float16'}), '(low=0, high=1, shape=(DATA_HIS_PERIOD + 1, 6), dtype=np.float16)\n', (1350, 1415), False, 'from gym import spaces\n'), ((1501, 2047), 'numpy.array', 'np.array', (["[self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step, 'open']\n .values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'low'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'volume'].values / MAX_NUM_SHARES]"], {}), "([self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'open'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'low'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'volume'].values / MAX_NUM_SHARES])\n", (1509, 2047), True, 'import numpy as np\n'), ((2166, 2455), 'numpy.append', 'np.append', (['obs', '[[self.balance / MAX_ACCOUNT_BALANCE, self.max_net_worth /\n MAX_ACCOUNT_BALANCE, self.shares_held / MAX_NUM_SHARES, self.cost_basis /\n MAX_SHARE_PRICE, self.total_shares_sold / MAX_NUM_SHARES, self.\n total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]]'], {'axis': '(0)'}), '(obs, [[self.balance / MAX_ACCOUNT_BALANCE, self.max_net_worth /\n MAX_ACCOUNT_BALANCE, self.shares_held / MAX_NUM_SHARES, self.cost_basis /\n MAX_SHARE_PRICE, self.total_shares_sold / MAX_NUM_SHARES, self.\n total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]], axis=0)\n', (2175, 2455), True, 'import numpy as np\n'), ((9930, 9956), 'numpy.array', 'np.array', (['[self.portfolio]'], {}), '([self.portfolio])\n', (9938, 9956), True, 'import numpy as np\n')]
|
from PIL import Image
import os, glob
import numpy as np
from sklearn import model_selection
classes = ["car", "bycycle", "motorcycle", "pedestrian"]
num_class = len(classes)
image_size = 50
# 画像の読み込み
X = []
Y = []
for index, classlabel in enumerate(classes):
photos_dir = "./" + classlabel
files = glob.glob(photos_dir + "/*.jpg")
for i, file in enumerate(files):
if i >=237: break
image = Image.open(file)
image = image.convert("RGB")
image = image.resize((image_size, image_size))
data = np.asarray(image) / 255
X.append(data)
Y.append(index)
X = np.array(X)
Y = np.array(Y)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y)
xy = (X_train, X_test, y_train, y_test)
np.save("./vehicle.npy", xy)
|
[
"PIL.Image.open",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.array",
"numpy.save",
"glob.glob"
] |
[((623, 634), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (631, 634), True, 'import numpy as np\n'), ((639, 650), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (647, 650), True, 'import numpy as np\n'), ((687, 725), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['X', 'Y'], {}), '(X, Y)\n', (719, 725), False, 'from sklearn import model_selection\n'), ((766, 794), 'numpy.save', 'np.save', (['"""./vehicle.npy"""', 'xy'], {}), "('./vehicle.npy', xy)\n", (773, 794), True, 'import numpy as np\n'), ((311, 343), 'glob.glob', 'glob.glob', (["(photos_dir + '/*.jpg')"], {}), "(photos_dir + '/*.jpg')\n", (320, 343), False, 'import os, glob\n'), ((423, 439), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (433, 439), False, 'from PIL import Image\n'), ((547, 564), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (557, 564), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
import numpy as np
from astropy import wcs
from bokeh.layouts import row, widgetbox,gridplot
from bokeh.models import CustomJS, Slider,HoverTool,ColorBar,LinearColorMapper,LabelSet,ColumnDataSource
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.palettes import Plasma256
class Image(object):
def __init__(self,data,header):
self.data=data
self.header=header
def change_image_contrast(self, attr, old, new):
# print attr,old,new
self.fig_im.glyph.color_mapper.update(low=self.graph_min_slider.value, high=self.graph_max_slider.value)
def get_html_draw(self,w=None,h=None, catalog=None, plot=False, vmin=None, vmax=None):
#import plotly
#import plotly.graph_objs as go
#from plotly.graph_objs import Layout
# print('vmin,vmax',vmin,vmax)
msk = ~np.isnan(self.data)
if vmin is None:
vmin = self.data[msk].min()
if vmax is None:
vmax = self.data[msk].max()
min_s = self.data.min()
max_s = self.data.max()
r = self.data.shape[0] * 2
c = self.data.shape[1] * 2
fig = figure(plot_width=w, plot_height=h, x_range=(0, c * 0.5), y_range=(0, r * 0.5),
tools=['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])
w = wcs.WCS(self.header)
color_mapper = LinearColorMapper(low=min_s, high=max_s, palette=Plasma256)
fig_im = fig.image(image=[self.data], x=[0], y=[0], dw=[c * 0.5], dh=[r * 0.5],
color_mapper=color_mapper)
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
renderers=[fig_im])
fig.add_tools(hover)
#fig, (ax) = plt.subplots(1, 1, figsize=(4, 3), subplot_kw={'projection': WCS(self.header)})
#im = ax.imshow(self.data,
# origin='lower',
# zorder=1,
# interpolation='none',
# aspect='equal',
# cmap=plt.get_cmap('jet'),
# vmin=vmin,
# vmax=vmax)
if catalog is not None:
lon = catalog.ra
lat = catalog.dec
if len(lat) > 0.:
pixcrd = w.wcs_world2pix(np.column_stack((lon, lat)), 0)
msk = ~np.isnan(pixcrd[:, 0])
#ax.plot(pixcrd[:, 0][msk], pixcrd[:, 1][msk], 'o', mfc='none')
source = ColumnDataSource(data=dict(lon=pixcrd[:, 0][msk]+0.5,
lat=pixcrd[:, 1][msk]+0.5,
names=catalog.name[msk]))
#for ID, (x, y) in enumerate(pixcrd):
# if msk[ID]:
# # print ('xy',(pixcrd[:, 0][ID], pixcrd[:, 1][ID]))
# ax.annotate('%s' % catalog.name[ID], xy=(x, y), color='white')
#print(pixcrd[:][msk])
fig.scatter(x='lon', y='lat', marker='circle', size=15,
line_color="white", fill_color=None, alpha=1.0, source=source)
labels = LabelSet(x='lon', y='lat', text='names', level='glyph',
x_offset=5, y_offset=5, render_mode='canvas', source=source, text_color='white')
fig.add_layout(labels)
#print'cat', catalog[msk]
color_bar = ColorBar(color_mapper=color_mapper,
label_standoff=12, border_line_color=None, location=(0, 0))
JS_code_slider = """
var vmin = low_slider.value;
var vmax = high_slider.value;
fig_im.glyph.color_mapper.high = vmax;
fig_im.glyph.color_mapper.low = vmin;
"""
callback = CustomJS(args=dict(fig_im=fig_im), code=JS_code_slider)
self.graph_min_slider = Slider(title="Sig. Min", start=min_s, end=max_s, step=1, value=min_s, callback=callback)
self.graph_max_slider = Slider(title="Sig. Max", start=min_s, end=max_s, step=1, value=max_s * 0.8,
callback=callback)
self.graph_min_slider.on_change('value', self.change_image_contrast)
self.graph_max_slider.on_change('value', self.change_image_contrast)
callback.args["low_slider"] = self.graph_min_slider
callback.args["high_slider"] = self.graph_max_slider
#ax.set_xlabel('RA')
#ax.set_ylabel('DEC')
#ax.grid(True, color='white')
#fig.colorbar(im, ax=ax)
#plugins.connect(fig, plugins.MousePosition(fontsize=14))
#if plot == True:
# print('plot', plot)
# mpld3.show()
fig.add_layout(color_bar, 'right')
layout = row(
fig, widgetbox(self.graph_min_slider, self.graph_max_slider),
)
#curdoc().add_root(layout)
#output_file("slider.html", title="slider.py example")
#from bokeh.io import show
#show(layout)
script, div = components(layout)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class ScatterPlot(object):
def __init__(self,w,h,x_label=None,y_label=None,x_range=None,y_range=None,title=None,y_axis_type='linear',x_axis_type='linear'):
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y")])
self.fig = figure(title=title, width=w, height=h,x_range=x_range,y_range=y_range,
y_axis_type=y_axis_type,
x_axis_type=x_axis_type,
tools=[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']
)
if x_label is not None:
self.fig.xaxis.axis_label = x_label
if y_label is not None:
self.fig.yaxis.axis_label = y_label
def add_errorbar(self, x, y, xerr=None, yerr=None, color='red',
point_kwargs={}, error_kwargs={}):
self.fig.circle(x, y, color=color, **point_kwargs)
if xerr is not None:
x_err_x = []
x_err_y = []
for px, py, err in zip(x, y, xerr):
x_err_x.append((px - err, px + err))
x_err_y.append((py, py))
self.fig.multi_line(x_err_x, x_err_y, color=color, **error_kwargs)
if yerr is not None:
y_err_x = []
y_err_y = []
for px, py, err in zip(x, y, yerr):
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
self.fig.multi_line(y_err_x, y_err_y, color=color, **error_kwargs)
def add_step_line(self,x,y,legend=None):
#print('a')
self.fig.step(x,y,name=legend, mode="center")
#print('b')
def add_line(self,x,y,legend=None,color=None):
self.fig.line(x,y,legend=legend,line_color=color)
def get_html_draw(self):
layout = row(
self.fig
)
#curdoc().add_root(layout)
#show(layout)
script, div = components(layout)
#print ('script',script)
#print ('div',div)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class GridPlot(object):
def __init__(self,f1,f2,w=None,h=None):
self.f1=f1
self.f2=f2
def get_html_draw(self,w=None,h=None):
#l = layout([self.f1.fig],[self.f2.fig])
grid = gridplot([self.f1.fig,self.f2.fig],ncols=1,plot_width=w, plot_height=h)
#curdoc().add_root(grid)
#show(grid)
#output_file("test.html")
script, div = components(grid)
html_dict={}
html_dict['script']=script
html_dict['div'] = div
return html_dict
|
[
"bokeh.models.ColorBar",
"bokeh.layouts.row",
"bokeh.plotting.figure",
"bokeh.layouts.widgetbox",
"bokeh.embed.components",
"bokeh.models.LinearColorMapper",
"numpy.column_stack",
"bokeh.layouts.gridplot",
"builtins.zip",
"numpy.isnan",
"bokeh.models.Slider",
"bokeh.models.LabelSet",
"astropy.wcs.WCS",
"bokeh.models.HoverTool"
] |
[((1396, 1547), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'w', 'plot_height': 'h', 'x_range': '(0, c * 0.5)', 'y_range': '(0, r * 0.5)', 'tools': "['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']"}), "(plot_width=w, plot_height=h, x_range=(0, c * 0.5), y_range=(0, r * \n 0.5), tools=['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])\n", (1402, 1547), False, 'from bokeh.plotting import figure\n'), ((1577, 1597), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (1584, 1597), False, 'from astropy import wcs\n'), ((1621, 1680), 'bokeh.models.LinearColorMapper', 'LinearColorMapper', ([], {'low': 'min_s', 'high': 'max_s', 'palette': 'Plasma256'}), '(low=min_s, high=max_s, palette=Plasma256)\n', (1638, 1680), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((1841, 1932), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('x', '$x'), ('y', '$y'), ('value', '@image')]", 'renderers': '[fig_im]'}), "(tooltips=[('x', '$x'), ('y', '$y'), ('value', '@image')],\n renderers=[fig_im])\n", (1850, 1932), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((3718, 3818), 'bokeh.models.ColorBar', 'ColorBar', ([], {'color_mapper': 'color_mapper', 'label_standoff': '(12)', 'border_line_color': 'None', 'location': '(0, 0)'}), '(color_mapper=color_mapper, label_standoff=12, border_line_color=\n None, location=(0, 0))\n', (3726, 3818), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((4214, 4306), 'bokeh.models.Slider', 'Slider', ([], {'title': '"""Sig. Min"""', 'start': 'min_s', 'end': 'max_s', 'step': '(1)', 'value': 'min_s', 'callback': 'callback'}), "(title='Sig. Min', start=min_s, end=max_s, step=1, value=min_s,\n callback=callback)\n", (4220, 4306), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((4335, 4433), 'bokeh.models.Slider', 'Slider', ([], {'title': '"""Sig. Max"""', 'start': 'min_s', 'end': 'max_s', 'step': '(1)', 'value': '(max_s * 0.8)', 'callback': 'callback'}), "(title='Sig. Max', start=min_s, end=max_s, step=1, value=max_s * 0.8,\n callback=callback)\n", (4341, 4433), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((5358, 5376), 'bokeh.embed.components', 'components', (['layout'], {}), '(layout)\n', (5368, 5376), False, 'from bokeh.embed import components\n'), ((5675, 5721), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('x', '$x'), ('y', '$y')]"}), "(tooltips=[('x', '$x'), ('y', '$y')])\n", (5684, 5721), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((5742, 5946), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'width': 'w', 'height': 'h', 'x_range': 'x_range', 'y_range': 'y_range', 'y_axis_type': 'y_axis_type', 'x_axis_type': 'x_axis_type', 'tools': "[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']"}), "(title=title, width=w, height=h, x_range=x_range, y_range=y_range,\n y_axis_type=y_axis_type, x_axis_type=x_axis_type, tools=[hover,\n 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])\n", (5748, 5946), False, 'from bokeh.plotting import figure\n'), ((7279, 7292), 'bokeh.layouts.row', 'row', (['self.fig'], {}), '(self.fig)\n', (7282, 7292), False, 'from bokeh.layouts import row, widgetbox, gridplot\n'), ((7397, 7415), 'bokeh.embed.components', 'components', (['layout'], {}), '(layout)\n', (7407, 7415), False, 'from bokeh.embed import components\n'), ((7814, 7888), 'bokeh.layouts.gridplot', 'gridplot', (['[self.f1.fig, self.f2.fig]'], {'ncols': '(1)', 'plot_width': 'w', 'plot_height': 'h'}), '([self.f1.fig, self.f2.fig], ncols=1, plot_width=w, plot_height=h)\n', (7822, 7888), False, 'from bokeh.layouts import row, widgetbox, gridplot\n'), ((7995, 8011), 'bokeh.embed.components', 'components', (['grid'], {}), '(grid)\n', (8005, 8011), False, 'from bokeh.embed import components\n'), ((1093, 1112), 'numpy.isnan', 'np.isnan', (['self.data'], {}), '(self.data)\n', (1101, 1112), True, 'import numpy as np\n'), ((5110, 5165), 'bokeh.layouts.widgetbox', 'widgetbox', (['self.graph_min_slider', 'self.graph_max_slider'], {}), '(self.graph_min_slider, self.graph_max_slider)\n', (5119, 5165), False, 'from bokeh.layouts import row, widgetbox, gridplot\n'), ((6486, 6501), 'builtins.zip', 'zip', (['x', 'y', 'xerr'], {}), '(x, y, xerr)\n', (6489, 6501), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((6787, 6802), 'builtins.zip', 'zip', (['x', 'y', 'yerr'], {}), '(x, y, yerr)\n', (6790, 6802), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((3443, 3583), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""lon"""', 'y': '"""lat"""', 'text': '"""names"""', 'level': '"""glyph"""', 'x_offset': '(5)', 'y_offset': '(5)', 'render_mode': '"""canvas"""', 'source': 'source', 'text_color': '"""white"""'}), "(x='lon', y='lat', text='names', level='glyph', x_offset=5,\n y_offset=5, render_mode='canvas', source=source, text_color='white')\n", (3451, 3583), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((2568, 2595), 'numpy.column_stack', 'np.column_stack', (['(lon, lat)'], {}), '((lon, lat))\n', (2583, 2595), True, 'import numpy as np\n'), ((2624, 2646), 'numpy.isnan', 'np.isnan', (['pixcrd[:, 0]'], {}), '(pixcrd[:, 0])\n', (2632, 2646), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DensityPeaks.py
# @Author: <NAME>
# @Time: 5/3/22 09:55
# @Version: 4.0
import math
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from instance_selection import ENN
from .utils import split
class STDPNF:
"""
<NAME>., <NAME>., & <NAME>. (2019). A self-training method based on density
peaks and an extended parameter-free local noise filter for k nearest
neighbor. Knowledge-Based Systems, 184, 104895.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Self-training semi-supervised classification based on density peaks of
data. Neurocomputing, 275, 180-191.
"""
def __init__(
self,
dc=None,
distance_metric="euclidean",
k=3,
gauss_cutoff=True,
percent=2.0,
density_threshold=None,
distance_threshold=None,
anormal=True,
filtering=False,
classifier=None,
classifier_params=None,
filter_method=None,
):
"""Semi Supervised Algorithm based on Density Peaks."""
self.dc = dc
self.distance_metric = distance_metric
self.k = k
self.gauss_cutoff = gauss_cutoff
self.percent = percent
self.density_threshold = density_threshold
self.distance_threshold = distance_threshold
self.anormal = anormal
self.filtering = filtering
if classifier is not None:
if isinstance(classifier_params, dict):
self.classifier = classifier(**classifier_params)
else:
self.classifier = classifier()
else:
self.classifier = None
if filter_method is not None and filter_method != "ENANE":
self.filter = filter_method()
elif isinstance(filter_method, str) and filter_method == "ENANE":
self.filter = filter_method
else:
self.filter = None
self.y = None
self.low = None
self.u = None
self.classifier_stdpnf = None
self.order = None
self.structure = None
self.structure_stdnpf = None
self.n_id = None
self.distances = None
self.max_dis = None
self.min_dis = None
self.rho = None
self.delta = None
self.nneigh = None
self.data = None
def __build_distance(self):
"""
Calculate distance dict.
:return: distance dict, max distance, min distance
"""
from scipy.spatial.distance import pdist, squareform
distance_matrix = pdist(self.data, metric=self.distance_metric)
distance_matrix = squareform(distance_matrix)
triangle_upper = np.triu_indices(self.data.shape[0], 1)
triangle_upper = distance_matrix[triangle_upper]
distance = {}
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
distance[(i, j)] = distance_matrix[i, j]
distance[(j, i)] = distance_matrix[i, j]
max_dis, min_dis = np.max(triangle_upper), np.min(triangle_upper)
return distance, max_dis, min_dis
def __auto_select_dc(self):
"""
Auto select the local density threshold that let average neighbor is 1-2
percent of all nodes.
:return: dc that local density threshold
"""
max_dis, min_dis = self.max_dis, self.min_dis
dc = (max_dis + min_dis) / 2
while True:
nneighs = (
sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2
)
if 0.01 <= nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
min_dis = dc
else:
max_dis = dc
dc = (max_dis + min_dis) / 2
if max_dis - min_dis < 0.0001:
break
return dc
def __select_dc(self):
"""
Select the local density threshold, default is the method used in paper,
'auto' is auto select.
:return: dc that local density threshold
"""
if self.dc == "auto":
dc = self.__auto_select_dc()
else:
position = int(self.n_id * (self.n_id + 1) /
2 * self.percent / 100)
dc = np.sort(list(self.distances.values()))[
position * 2 + self.n_id]
return dc
def __local_density(self):
"""
Compute all points' local density.
:return: local density vector that index is the point index
"""
def gauss_func(dij, dc):
"""
> The function takes in a distance value and a cutoff value, and
returns the value of the Gaussian function at that point
:param dij: distance between two nodes
:param dc: The cutoff distance
:return: the value of the gaussian function.
"""
return math.exp(-((dij / dc) ** 2))
def cutoff_func(dij, dc):
"""
If the distance between two atoms is less than the cutoff distance,
return 1, otherwise return 0
:param dij: distance between atoms i and j
:param dc: cutoff distance
:return: 1 if dij < dc, else 0
"""
return 1 if dij < dc else 0
func = gauss_func if self.gauss_cutoff else cutoff_func
rho = [0] * self.n_id
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
temp = func(self.distances[(i, j)], self.dc)
rho[i] += temp
rho[j] += temp
return np.array(rho, np.float32)
def __min_neighbor_and_distance(self):
"""
Compute all points' min util to the higher local density point(which is
the nearest neighbor).
:return: distance vector, nearest neighbor vector
"""
if self.rho is None:
raise ValueError("Encountered rho as None.")
sort_rho_idx = np.argsort(-self.rho)
delta, nneigh = [float(self.max_dis)] * self.n_id, [0] * self.n_id
delta[sort_rho_idx[0]] = -1.0
for i in range(self.n_id):
for j in range(0, i):
old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]
if self.distances[(old_i, old_j)] < delta[old_i]:
delta[old_i] = self.distances[(old_i, old_j)]
nneigh[old_i] = old_j
delta[sort_rho_idx[0]] = max(delta)
return np.array(delta, np.float32), np.array(nneigh, np.float32)
def __structure(self):
"""
The function takes the data and the nearest neighbor indices and creates
a dataframe with the following columns:
- sample: the data point
- next: the index of the nearest neighbor
- previous: the index of the nearest neighbor of the nearest neighbor
- label: the label of the data point
The function also creates a copy of the dataframe called
structure_stdnpf
"""
self.structure = dict.fromkeys(range(self.n_id))
for index, sample in enumerate(self.data):
self.structure[index] = [
sample,
int(self.nneigh[index]),
None,
self.y[index] if index < len(self.y) else -1,
]
for index in range(self.n_id):
if self.structure[self.structure[index][1]][2] is None:
self.structure[self.structure[index][1]][2] = index
self.structure = pd.DataFrame(
self.structure, index=["sample", "next", "previous", "label"]
).transpose()
self.structure_stdnpf = self.structure.copy(deep=True)
def __step_a(self):
"""
> The function takes the labeled samples and trains the classifier on
them
:return: The samples that have been labeled.
"""
samples_labeled = self.structure.loc[self.structure["label"] != -1]
sam_lab = samples_labeled["sample"].to_list()
y_without = samples_labeled["label"].to_list()
self.classifier.fit(sam_lab, y_without)
return samples_labeled
def __discover_structure(self):
"""Discovers the under laying structure."""
self._fit_without()
def __nan_search(self):
"""
For each point, find the set of points that are within a distance of r,
and the set of points that are within a distance of r+1.
The set of points that are within a distance of r+1 is a superset of the
set of points that are within a distance of r.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is also a superset
of the set of points that are within a distance of r+3.
And so on.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is
:return: nan, r
"""
r = 1
nan = defaultdict(set)
nb = dict.fromkeys(range(self.n_id), 0)
knn = defaultdict(set)
rnn = defaultdict(set)
cnt = defaultdict(int)
while True:
search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree")
search.fit(self.data)
for index, sample in enumerate(self.data):
r_neighs = search.kneighbors(
[sample], return_distance=False)[0][1:]
knn[index].update(list(r_neighs))
for neigh in r_neighs:
nb[neigh] += 1
rnn[neigh].add(index)
cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0))
if r > 2 and cnt[r] == cnt[r - 1]:
r -= 1
break
r += 1
for index in range(self.n_id):
nan[index] = knn[index].intersection(rnn[index])
return nan, r
def __enane(self, fx, nan, r):
"""
> The function takes in the dataframe, the list of indices of the
unlabeled data, the list of indices of the neighbors of the unlabeled
data, and the number of neighbors to use in the KNN classifier. It
then creates a new dataframe with the labeled data and the unlabeled
data, and uses the KNN classifier to predict the labels of the
unlabeled data. It then checks if the predicted label is the same as
the label of the majority of the neighbors of the unlabeled data. If
it is, then it adds the index of the unlabeled data to the list of
indices of the data to be labeled
:param fx: the indexes of the unlabeled data
:param nan: a list of lists, where each list contains the indices of the
neighbors of a sample
:param r: the number of neighbors to consider
:return: The indexes of the samples that are going to be labeled and the
labels that are going to be assigned to them.
"""
es = []
es_pred = []
local_structure = self.structure_stdnpf.copy(deep=True)
base_estimator = KNeighborsClassifier(
n_neighbors=r, metric=self.distance_metric
)
labeled_data = local_structure.loc[local_structure["label"] != -1]
nan_unlabeled = local_structure.loc[fx]
data = pd.concat([labeled_data, nan_unlabeled], join="inner")
enane_model = SelfTrainingClassifier(base_estimator)
enane_model.fit(data["sample"].tolist(), data["label"].tolist())
enane_pred = enane_model.predict(nan_unlabeled["sample"].tolist())
for (row_index, _), pred in zip(nan_unlabeled.iterrows(), enane_pred):
usefulness = 0
harmfulness = 0
for neigh in nan[row_index]:
if local_structure.loc[neigh, "label"] == pred:
usefulness += 1
else:
harmfulness += 1
if usefulness >= harmfulness:
es.append(row_index)
es_pred.append(pred)
return es, es_pred
def __init_values(self, low, u, y):
"""
It takes in the lower and upper bounds of the data, and the data itself,
and then calculates the distances between the data points,
the maximum distance, the minimum distance, the dc value, the rho
value, the delta value, the number of neighbors, and the structure
of the data
:param low: lower bound of the data
:param u: upper bound of the data
:param y: the labels of the data
"""
self.y = y
self.low = low
self.u = u
self.data = np.concatenate((low, u), axis=0)
self.n_id = self.data.shape[0]
self.distances, self.max_dis, self.min_dis = self.__build_distance()
self.dc = self.__select_dc()
self.rho = self.__local_density()
self.delta, self.nneigh = self.__min_neighbor_and_distance()
self.__structure()
def _fit_without(self):
"""
The function takes in a classifier, and then labels the next point,
and then labels the previous points, without filtering.
"""
if self.classifier is None:
self.classifier = SVC()
count = 1
self.order = dict.fromkeys(range(self.n_id), 0)
count = self._label_next_point(count)
self._label_previous_points(count)
def _label_previous_points(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the previous samples of those samples. It then labels those samples
and repeats the process until there are no more samples to label
:param count: the number of the current iteration
"""
while True:
samples_labeled = self.__step_a()
prev_rows = samples_labeled["previous"].to_numpy()
prev_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for prev_row in prev_rows:
if prev_row not in samples_labeled_index and prev_row is not None:
prev_unlabeled.append(prev_row)
self.order[prev_row] = count
if len(prev_unlabeled) == 0:
break
unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled]
lu = unlabeled_prev_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, prev_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
def _label_next_point(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the next samples in the structure. If the next samples are not
labeled, it labels them and updates the order of the samples
:param count: the number of the next point to be labeled
:return: The number of labeled samples.
"""
while True:
samples_labeled = self.__step_a()
next_rows = samples_labeled["next"].to_numpy()
next_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for next_row in next_rows:
if next_row not in samples_labeled_index:
next_unlabeled.append(next_row)
self.order[next_row] = count
if len(next_unlabeled) == 0:
break
unlabeled_next_of_labeled = self.structure.loc[next_unlabeled]
lu = unlabeled_next_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, next_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
return count
def _fit_stdpnf(self):
"""
Self Training based on Density Peaks and a parameter-free noise
filter.
"""
self.__discover_structure()
nan, lambda_param = self.__nan_search()
self.classifier_stdpnf = KNeighborsClassifier(
n_neighbors=self.k, metric=self.distance_metric
)
self.classifier_stdpnf.fit(self.low, self.y)
count = 1
while count <= max(self.order.values()):
unlabeled_rows = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] == -1
].index.to_list()
unlabeled_indexes = []
for row in unlabeled_rows:
if self.order[row] == count:
unlabeled_indexes.append(row)
if isinstance(self.filter, str) and self.filter == "ENANE":
filtered_indexes, filtered_labels = self.__enane(
unlabeled_indexes, nan, lambda_param
)
self.structure_stdnpf.at[filtered_indexes,
"label"] = filtered_labels
else:
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
complete = labeled_data["sample"]
complete_y = labeled_data["label"]
result = self._if_filter(complete, complete_y)
self._results_to_structure(complete, result)
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
count += 1
labeled_data = self.structure_stdnpf.loc[self.structure_stdnpf["label"] != -1]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
def _results_to_structure(self, complete, result):
"""
> This function takes the results of the model and compares them to the
complete data set. If the result is not in the complete data set, it is
added to the structure data set.
:param complete: the complete dataset
:param result: the result of the clustering
"""
results_to_unlabeled = []
for r in result.to_numpy():
is_in = False
for c in complete:
if np.array_equal(r, c):
is_in = True
if not is_in:
results_to_unlabeled.append(r)
for r in results_to_unlabeled:
self.structure_stdnpf.at[np.array(self.structure_stdnpf["sample"], r)][
"label"
] = -1
def _if_filter(self, complete, complete_y):
"""
If the filter is an ENN, then filter the original data, otherwise
filter the complete data
:param complete: the complete dataframe
:param complete_y: the complete y values
:return: The result is a dataframe with the filtered data.
"""
if isinstance(self.filter, ENN):
original = pd.DataFrame(self.low)
original_y = pd.DataFrame(self.y)
result, _ = self.filter.filter_original_complete(
original, original_y, complete, complete_y
)
else:
result, _ = self.filter.filter(complete, complete_y)
return result
def fit(self, samples, y):
"""Fit method."""
try:
l, u, y = split(samples, y)
except IndexError:
raise ValueError("Dimensions do not match.")
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
self.__init_values(l, u, y)
if self.filtering:
self._fit_stdpnf()
else:
self._fit_without()
def predict(self, src):
"""
Predict based on a trained classifier.
:param src: The source image
:return: The classifier is being returned.
"""
if self.classifier is None:
raise AssertionError("The model needs to be fitted first.")
return self.classifier.predict(src)
|
[
"sklearn.semi_supervised.SelfTrainingClassifier",
"scipy.spatial.distance.squareform",
"sklearn.preprocessing.LabelEncoder",
"numpy.triu_indices",
"pandas.DataFrame",
"scipy.spatial.distance.pdist",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.max",
"numpy.argsort",
"numpy.array",
"collections.defaultdict",
"numpy.array_equal",
"numpy.concatenate",
"numpy.min",
"sklearn.neighbors.NearestNeighbors",
"math.exp",
"pandas.concat",
"sklearn.svm.SVC"
] |
[((2864, 2909), 'scipy.spatial.distance.pdist', 'pdist', (['self.data'], {'metric': 'self.distance_metric'}), '(self.data, metric=self.distance_metric)\n', (2869, 2909), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2936, 2963), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {}), '(distance_matrix)\n', (2946, 2963), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2990, 3028), 'numpy.triu_indices', 'np.triu_indices', (['self.data.shape[0]', '(1)'], {}), '(self.data.shape[0], 1)\n', (3005, 3028), True, 'import numpy as np\n'), ((5980, 6005), 'numpy.array', 'np.array', (['rho', 'np.float32'], {}), '(rho, np.float32)\n', (5988, 6005), True, 'import numpy as np\n'), ((6355, 6376), 'numpy.argsort', 'np.argsort', (['(-self.rho)'], {}), '(-self.rho)\n', (6365, 6376), True, 'import numpy as np\n'), ((9557, 9573), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9568, 9573), False, 'from collections import defaultdict\n'), ((9636, 9652), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9647, 9652), False, 'from collections import defaultdict\n'), ((9667, 9683), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9678, 9683), False, 'from collections import defaultdict\n'), ((9698, 9714), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9709, 9714), False, 'from collections import defaultdict\n'), ((11671, 11735), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'r', 'metric': 'self.distance_metric'}), '(n_neighbors=r, metric=self.distance_metric)\n', (11691, 11735), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((11897, 11951), 'pandas.concat', 'pd.concat', (['[labeled_data, nan_unlabeled]'], {'join': '"""inner"""'}), "([labeled_data, nan_unlabeled], join='inner')\n", (11906, 11951), True, 'import pandas as pd\n'), ((11975, 12013), 'sklearn.semi_supervised.SelfTrainingClassifier', 'SelfTrainingClassifier', (['base_estimator'], {}), '(base_estimator)\n', (11997, 12013), False, 'from sklearn.semi_supervised import SelfTrainingClassifier\n'), ((13238, 13270), 'numpy.concatenate', 'np.concatenate', (['(low, u)'], {'axis': '(0)'}), '((low, u), axis=0)\n', (13252, 13270), True, 'import numpy as np\n'), ((16707, 16776), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'self.k', 'metric': 'self.distance_metric'}), '(n_neighbors=self.k, metric=self.distance_metric)\n', (16727, 16776), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((20181, 20195), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (20193, 20195), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3332, 3354), 'numpy.max', 'np.max', (['triangle_upper'], {}), '(triangle_upper)\n', (3338, 3354), True, 'import numpy as np\n'), ((3356, 3378), 'numpy.min', 'np.min', (['triangle_upper'], {}), '(triangle_upper)\n', (3362, 3378), True, 'import numpy as np\n'), ((5271, 5297), 'math.exp', 'math.exp', (['(-(dij / dc) ** 2)'], {}), '(-(dij / dc) ** 2)\n', (5279, 5297), False, 'import math\n'), ((6858, 6885), 'numpy.array', 'np.array', (['delta', 'np.float32'], {}), '(delta, np.float32)\n', (6866, 6885), True, 'import numpy as np\n'), ((6887, 6915), 'numpy.array', 'np.array', (['nneigh', 'np.float32'], {}), '(nneigh, np.float32)\n', (6895, 6915), True, 'import numpy as np\n'), ((9757, 9813), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(r + 1)', 'algorithm': '"""kd_tree"""'}), "(n_neighbors=r + 1, algorithm='kd_tree')\n", (9773, 9813), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((13821, 13826), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (13824, 13826), False, 'from sklearn.svm import SVC\n'), ((19667, 19689), 'pandas.DataFrame', 'pd.DataFrame', (['self.low'], {}), '(self.low)\n', (19679, 19689), True, 'import pandas as pd\n'), ((19715, 19735), 'pandas.DataFrame', 'pd.DataFrame', (['self.y'], {}), '(self.y)\n', (19727, 19735), True, 'import pandas as pd\n'), ((7906, 7981), 'pandas.DataFrame', 'pd.DataFrame', (['self.structure'], {'index': "['sample', 'next', 'previous', 'label']"}), "(self.structure, index=['sample', 'next', 'previous', 'label'])\n", (7918, 7981), True, 'import pandas as pd\n'), ((18964, 18984), 'numpy.array_equal', 'np.array_equal', (['r', 'c'], {}), '(r, c)\n', (18978, 18984), True, 'import numpy as np\n'), ((19168, 19212), 'numpy.array', 'np.array', (["self.structure_stdnpf['sample']", 'r'], {}), "(self.structure_stdnpf['sample'], r)\n", (19176, 19212), True, 'import numpy as np\n')]
|
import os
import numpy as np
import pytest
import easyidp
from easyidp.core.objects import ReconsProject, Points
from easyidp.io import metashape
module_path = os.path.join(easyidp.__path__[0], "io/tests")
def test_init_reconsproject():
attempt1 = ReconsProject("agisoft")
assert attempt1.software == "metashape"
attempt2 = ReconsProject("Metashape")
assert attempt2.software == "metashape"
with pytest.raises(LookupError):
attempt3 = ReconsProject("not_supported_sfm")
def test_local2world2local():
attempt1 = ReconsProject("agisoft")
attempt1.transform.matrix = np.asarray([[-0.86573098, -0.01489186, 0.08977677, 7.65034123],
[0.06972335, 0.44334391, 0.74589315, 1.85910928],
[-0.05848325, 0.74899678, -0.43972184, -0.1835615],
[0., 0., 0., 1.]], dtype=np.float)
w_pos = Points([0.5, 1, 1.5])
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
w_pos_ans = Points([0.4999999999999978, 0.9999999999999993, 1.5])
world_pos = attempt1.local2world(l_pos)
np.testing.assert_array_almost_equal(w_pos_ans.values, world_pos.values, decimal=6)
local_pos = attempt1.world2local(w_pos)
np.testing.assert_array_almost_equal(l_pos.values, local_pos.values, decimal=6)
def test_metashape_project_local_points_on_raw():
test_project_folder = easyidp.test_full_path("data/metashape/goya_test.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
# test for single point
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
p_dis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=False)
p_undis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=True)
# pro_api_out = np.asarray([2218.883386793118, 1991.4709388015149])
my_undistort_out = Points([2220.854889556147, 1992.6933680261686])
my_distort_out = Points([2218.47960556, 1992.46356322])
np.testing.assert_array_almost_equal(p_dis_out.values, my_distort_out.values)
np.testing.assert_array_almost_equal(p_undis_out.values, my_undistort_out.values)
# test for multiple points
l_pos_points = Points([[7.960064093299587, 1.3019528769064523, -2.6697181763370965],
[7.960064093299587, 1.3019528769064523, -2.6697181763370965]])
p_dis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=False)
p_undis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=True)
my_undistort_outs = Points([[2220.854889556147, 1992.6933680261686],
[2220.854889556147, 1992.6933680261686]])
my_distort_outs = Points([[2218.47960556, 1992.46356322],
[2218.47960556, 1992.46356322]])
np.testing.assert_array_almost_equal(p_dis_outs.values, my_distort_outs.values)
np.testing.assert_array_almost_equal(p_undis_outs.values, my_undistort_outs.values)
def test_world2crs_and_on_raw_images():
test_project_folder = easyidp.test_full_path("data/metashape/wheat_tanashi.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
local = Points([11.870130675203006, 0.858098777517136, -12.987136541275])
geocentric = Points([-3943658.7087006606, 3363404.124223561, 3704651.3067566575])
geodetic = Points([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=['lon', 'lat', 'alt'])
idp_world = chunk.local2world(local)
np.testing.assert_array_almost_equal(idp_world.values, geocentric.values, decimal=1)
idp_crs = chunk.world2crs(idp_world)
np.testing.assert_array_almost_equal(idp_crs.values, geodetic.values)
camera_id = 56 # camera_label = 'DJI_0057'
camera_pix_ans = Points([2391.7104647010146, 1481.8987733175165])
idp_cam_pix = chunk.project_local_points_on_raw(local, camera_id, distortion_correct=True)
np.testing.assert_array_almost_equal(camera_pix_ans.values, idp_cam_pix.values)
|
[
"easyidp.test_full_path",
"numpy.testing.assert_array_almost_equal",
"easyidp.core.objects.Points",
"easyidp.core.objects.ReconsProject",
"numpy.asarray",
"os.path.join",
"easyidp.io.metashape.open_project",
"pytest.raises"
] |
[((162, 207), 'os.path.join', 'os.path.join', (['easyidp.__path__[0]', '"""io/tests"""'], {}), "(easyidp.__path__[0], 'io/tests')\n", (174, 207), False, 'import os\n'), ((256, 280), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""agisoft"""'], {}), "('agisoft')\n", (269, 280), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((341, 367), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""Metashape"""'], {}), "('Metashape')\n", (354, 367), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((551, 575), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""agisoft"""'], {}), "('agisoft')\n", (564, 575), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((608, 821), 'numpy.asarray', 'np.asarray', (['[[-0.86573098, -0.01489186, 0.08977677, 7.65034123], [0.06972335, \n 0.44334391, 0.74589315, 1.85910928], [-0.05848325, 0.74899678, -\n 0.43972184, -0.1835615], [0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'np.float'}), '([[-0.86573098, -0.01489186, 0.08977677, 7.65034123], [0.06972335,\n 0.44334391, 0.74589315, 1.85910928], [-0.05848325, 0.74899678, -\n 0.43972184, -0.1835615], [0.0, 0.0, 0.0, 1.0]], dtype=np.float)\n', (618, 821), True, 'import numpy as np\n'), ((953, 974), 'easyidp.core.objects.Points', 'Points', (['[0.5, 1, 1.5]'], {}), '([0.5, 1, 1.5])\n', (959, 974), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((987, 1055), 'easyidp.core.objects.Points', 'Points', (['[7.960064093299587, 1.3019528769064523, -2.6697181763370965]'], {}), '([7.960064093299587, 1.3019528769064523, -2.6697181763370965])\n', (993, 1055), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1072, 1125), 'easyidp.core.objects.Points', 'Points', (['[0.4999999999999978, 0.9999999999999993, 1.5]'], {}), '([0.4999999999999978, 0.9999999999999993, 1.5])\n', (1078, 1125), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1175, 1262), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['w_pos_ans.values', 'world_pos.values'], {'decimal': '(6)'}), '(w_pos_ans.values, world_pos.values,\n decimal=6)\n', (1211, 1262), True, 'import numpy as np\n'), ((1308, 1387), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['l_pos.values', 'local_pos.values'], {'decimal': '(6)'}), '(l_pos.values, local_pos.values, decimal=6)\n', (1344, 1387), True, 'import numpy as np\n'), ((1466, 1520), 'easyidp.test_full_path', 'easyidp.test_full_path', (['"""data/metashape/goya_test.psx"""'], {}), "('data/metashape/goya_test.psx')\n", (1488, 1520), False, 'import easyidp\n'), ((1534, 1577), 'easyidp.io.metashape.open_project', 'metashape.open_project', (['test_project_folder'], {}), '(test_project_folder)\n', (1556, 1577), False, 'from easyidp.io import metashape\n'), ((1642, 1710), 'easyidp.core.objects.Points', 'Points', (['[7.960064093299587, 1.3019528769064523, -2.6697181763370965]'], {}), '([7.960064093299587, 1.3019528769064523, -2.6697181763370965])\n', (1648, 1710), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1981, 2028), 'easyidp.core.objects.Points', 'Points', (['[2220.854889556147, 1992.6933680261686]'], {}), '([2220.854889556147, 1992.6933680261686])\n', (1987, 2028), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2050, 2088), 'easyidp.core.objects.Points', 'Points', (['[2218.47960556, 1992.46356322]'], {}), '([2218.47960556, 1992.46356322])\n', (2056, 2088), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2094, 2171), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_dis_out.values', 'my_distort_out.values'], {}), '(p_dis_out.values, my_distort_out.values)\n', (2130, 2171), True, 'import numpy as np\n'), ((2176, 2262), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_undis_out.values', 'my_undistort_out.values'], {}), '(p_undis_out.values, my_undistort_out.\n values)\n', (2212, 2262), True, 'import numpy as np\n'), ((2309, 2446), 'easyidp.core.objects.Points', 'Points', (['[[7.960064093299587, 1.3019528769064523, -2.6697181763370965], [\n 7.960064093299587, 1.3019528769064523, -2.6697181763370965]]'], {}), '([[7.960064093299587, 1.3019528769064523, -2.6697181763370965], [\n 7.960064093299587, 1.3019528769064523, -2.6697181763370965]])\n', (2315, 2446), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2684, 2779), 'easyidp.core.objects.Points', 'Points', (['[[2220.854889556147, 1992.6933680261686], [2220.854889556147, \n 1992.6933680261686]]'], {}), '([[2220.854889556147, 1992.6933680261686], [2220.854889556147, \n 1992.6933680261686]])\n', (2690, 2779), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2829, 2901), 'easyidp.core.objects.Points', 'Points', (['[[2218.47960556, 1992.46356322], [2218.47960556, 1992.46356322]]'], {}), '([[2218.47960556, 1992.46356322], [2218.47960556, 1992.46356322]])\n', (2835, 2901), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2937, 3016), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_dis_outs.values', 'my_distort_outs.values'], {}), '(p_dis_outs.values, my_distort_outs.values)\n', (2973, 3016), True, 'import numpy as np\n'), ((3021, 3109), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_undis_outs.values', 'my_undistort_outs.values'], {}), '(p_undis_outs.values, my_undistort_outs\n .values)\n', (3057, 3109), True, 'import numpy as np\n'), ((3173, 3231), 'easyidp.test_full_path', 'easyidp.test_full_path', (['"""data/metashape/wheat_tanashi.psx"""'], {}), "('data/metashape/wheat_tanashi.psx')\n", (3195, 3231), False, 'import easyidp\n'), ((3245, 3288), 'easyidp.io.metashape.open_project', 'metashape.open_project', (['test_project_folder'], {}), '(test_project_folder)\n', (3267, 3288), False, 'from easyidp.io import metashape\n'), ((3325, 3390), 'easyidp.core.objects.Points', 'Points', (['[11.870130675203006, 0.858098777517136, -12.987136541275]'], {}), '([11.870130675203006, 0.858098777517136, -12.987136541275])\n', (3331, 3390), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3408, 3476), 'easyidp.core.objects.Points', 'Points', (['[-3943658.7087006606, 3363404.124223561, 3704651.3067566575]'], {}), '([-3943658.7087006606, 3363404.124223561, 3704651.3067566575])\n', (3414, 3476), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3492, 3594), 'easyidp.core.objects.Points', 'Points', (['[139.54033578028609, 35.73756358928734, 96.87827569602781]'], {'columns': "['lon', 'lat', 'alt']"}), "([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=\n ['lon', 'lat', 'alt'])\n", (3498, 3594), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3636, 3724), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['idp_world.values', 'geocentric.values'], {'decimal': '(1)'}), '(idp_world.values, geocentric.values,\n decimal=1)\n', (3672, 3724), True, 'import numpy as np\n'), ((3767, 3836), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['idp_crs.values', 'geodetic.values'], {}), '(idp_crs.values, geodetic.values)\n', (3803, 3836), True, 'import numpy as np\n'), ((3908, 3956), 'easyidp.core.objects.Points', 'Points', (['[2391.7104647010146, 1481.8987733175165]'], {}), '([2391.7104647010146, 1481.8987733175165])\n', (3914, 3956), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((4057, 4136), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['camera_pix_ans.values', 'idp_cam_pix.values'], {}), '(camera_pix_ans.values, idp_cam_pix.values)\n', (4093, 4136), True, 'import numpy as np\n'), ((422, 448), 'pytest.raises', 'pytest.raises', (['LookupError'], {}), '(LookupError)\n', (435, 448), False, 'import pytest\n'), ((469, 503), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""not_supported_sfm"""'], {}), "('not_supported_sfm')\n", (482, 503), False, 'from easyidp.core.objects import ReconsProject, Points\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from config import CONFIG
import json
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import io
import math
import os
import time
from absl import flags
from absl import logging
from easydict import EasyDict
import matplotlib
matplotlib.use('Agg')
FLAGS = flags.FLAGS
def visualize_batch(data, global_step, batch_size, num_steps):
"""Visualizes a batch."""
frames = data['frames']
frames_list = tf.unstack(frames, num=num_steps, axis=1)
frames_summaries = tf.concat(frames_list, axis=2)
batch_list = tf.split(frames_summaries, batch_size, axis=0)
batch_summaries = tf.concat(batch_list, axis=1)
tf.summary.image('train_batch', batch_summaries, step=global_step)
def visualize_nearest_neighbours(model, data, global_step, batch_size,
num_steps, num_frames_per_step, split):
"""Visualize nearest neighbours in embedding space."""
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
cnn = model['cnn']
emb = model['emb']
if 'tcn' in CONFIG.TRAINING_ALGO:
cnn_feats = get_cnn_feats(
cnn, data, training=False, num_steps=2 * num_steps)
emb_feats = emb(cnn_feats, 2 * num_steps)
emb_feats = tf.stack(
tf.split(emb_feats, 2 * num_steps, axis=0)[::2], axis=1)
else:
cnn_feats = get_cnn_feats(cnn, data, training=False)
emb_feats = emb(cnn_feats, num_steps)
emb_feats = tf.stack(tf.split(emb_feats, num_steps, axis=0), axis=1)
query_feats = emb_feats[0]
if CONFIG.OPTICALFLOW:
frames = data['video_frames']
else:
frames = data['frames']
image_list = tf.unstack(frames, num=batch_size, axis=0)
if 'tcn' in CONFIG.TRAINING_ALGO:
im_list = [image_list[0]
[num_frames_per_step - 1::num_frames_per_step][::2]]
else:
im_list = [image_list[0][num_frames_per_step - 1::num_frames_per_step]]
sim_matrix = np.zeros(
(batch_size-1, num_steps, num_steps), dtype=np.float32)
for i in range(1, batch_size):
candidate_feats = emb_feats[i]
if 'tcn' in CONFIG.TRAINING_ALGO:
img_list = tf.unstack(image_list[i], num=2 * num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step][::2]
else:
img_list = tf.unstack(image_list[i], num=num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step]
nn_img_list = []
for j in range(num_steps):
curr_query_feats = tf.tile(query_feats[j:j+1], [num_steps, 1])
mean_squared_distance = tf.reduce_mean(
tf.math.squared_difference(curr_query_feats, candidate_feats), axis=1)
sim_matrix[i-1, j] = softmax(-1.0 * mean_squared_distance)
nn_img_list.append(img_list[tf.argmin(mean_squared_distance)])
nn_img = tf.stack(nn_img_list, axis=0)
im_list.append(nn_img)
def vstack(im):
return tf.concat(tf.unstack(im, num=num_steps), axis=1)
summary_im = tf.expand_dims(tf.concat([vstack(im) for im in im_list],
axis=0), axis=0)
tf.summary.image('%s/nn' % split, summary_im, step=global_step)
# Convert sim_matrix to float32 as summary_image doesn't take float64
sim_matrix = sim_matrix.astype(np.float32)
tf.summary.image('%s/similarity_matrix' % split,
np.expand_dims(sim_matrix, axis=3), step=global_step)
def softmax(w, t=1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def random_choice_noreplace(m, n, axis=-1):
# Generate m random permuations of range (0, n)
# NumPy version: np.random.rand(m,n).argsort(axis=axis)
return tf.cast(tf.argsort(tf.random.uniform((m, n)), axis=axis), tf.int64)
def gen_cycles(num_cycles, batch_size, cycle_len):
"""Generate cycles for alignment."""
random_cycles = random_choice_noreplace(
num_cycles, batch_size)[:, :cycle_len]
return random_cycles
def get_warmup_lr(lr, global_step, lr_params):
"""Returns learning rate during warm up phase."""
if lr_params.NUM_WARMUP_STEPS > 0:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(
lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_lr = lr_params.INITIAL_LR * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
lr = (1.0 - is_warmup) * lr + is_warmup * warmup_lr
return lr
# Minimally adapted from Tensorflow object_detection code.
def manual_stepping(global_step, boundaries, rates):
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
rate_index = tf.reduce_max(
tf.where(
tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)), [0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries))
def get_lr_fn(optimizer_config):
"""Returns function that provides current learning rate based on config.
NOTE: This returns a function as in Eager we need to call assign to update
the learning rate.
Args:
optimizer_config: EasyDict, contains params required to initialize the
learning rate and the learning rate decay function.
Returns:
lr_fn: function, this can be called to return the current learning rate
based on the provided config.
Raises:
ValueError: in case invalid params have been passed in the config.
"""
lr_params = optimizer_config.LR
# pylint: disable=g-long-lambda
if lr_params.DECAY_TYPE == 'exp_decay':
def lr_fn(lr, global_step): return tf.train.exponential_decay(
lr,
global_step,
lr_params.EXP_DECAY_STEPS,
lr_params.EXP_DECAY_RATE,
staircase=True)()
elif lr_params.DECAY_TYPE == 'manual':
lr_step_boundaries = [int(x)
for x in lr_params.MANUAL_LR_STEP_BOUNDARIES]
f = lr_params.MANUAL_LR_DECAY_RATE
learning_rate_sequence = [(lr_params.INITIAL_LR) * f**p
for p in range(len(lr_step_boundaries) + 1)]
def lr_fn(lr, global_step): return manual_stepping(
global_step, lr_step_boundaries, learning_rate_sequence)
elif lr_params.DECAY_TYPE == 'fixed':
def lr_fn(lr, global_step): return lr_params.INITIAL_LR
elif lr_params.DECAY_TYPE == 'poly':
def lr_fn(lr, global_step): return tf.train.polynomial_decay(
lr,
global_step,
CONFIG.TRAIN.MAX_ITERS,
end_learning_rate=0.0,
power=1.0,
cycle=False)
else:
raise ValueError('Learning rate decay type %s not supported. Only support'
'the following decay types: fixed, exp_decay, manual,'
'and poly.')
return (lambda lr, global_step: get_warmup_lr(lr_fn(lr, global_step),
global_step, lr_params))
def get_optimizer(optimizer_config, learning_rate):
"""Returns optimizer based on config and learning rate."""
if optimizer_config.TYPE == 'AdamOptimizer':
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif optimizer_config.TYPE == 'MomentumOptimizer':
opt = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=0.9)
else:
raise ValueError('Optimizer %s not supported. Only support the following'
'optimizers: AdamOptimizer, MomentumOptimizer .')
return opt
def get_lr_opt_global_step():
"""Intializes learning rate, optimizer and global step."""
optimizer = get_optimizer(CONFIG.OPTIMIZER, CONFIG.OPTIMIZER.LR.INITIAL_LR)
global_step = optimizer.iterations
learning_rate = optimizer.learning_rate
return learning_rate, optimizer, global_step
def create_ckpt(logdir, restore=False, **ckpt_objects):
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(
ckpt_manager.latest_checkpoint) if restore else -1
return ckpt_manager, status, checkpoint
def restore_ckpt(logdir, **ckpt_objects):
"""Create and restore checkpoint (if one exists on the path)."""
# Instantiate checkpoint and restore from any pre-existing checkpoint.
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(ckpt_manager.latest_checkpoint)
return ckpt_manager, status, checkpoint
def to_dict(config):
if isinstance(config, list):
return [to_dict(c) for c in config]
elif isinstance(config, EasyDict):
return dict([(k, to_dict(v)) for k, v in config.items()])
else:
return config
def setup_train_dir(logdir, overwrite=False, force_train=True):
"""Setups directory for training."""
tf.io.gfile.makedirs(logdir)
config_path = os.path.join(logdir, 'config.json')
if not os.path.exists(config_path) or overwrite:
logging.info(
'Using the existing passed in config as no config.json file exists in '
'%s', logdir)
with tf.io.gfile.GFile(config_path, 'w') as config_file:
config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])
json.dump(config, config_file, sort_keys=True, indent=4)
else:
logging.info(
'Using config from config.json that exists in %s.', logdir)
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
CONFIG.update(config_dict)
train_logs_dir = os.path.join(logdir, 'train.logs')
if os.path.exists(train_logs_dir) and not force_train:
raise ValueError('You might be overwriting a directory that already '
'has train_logs. Please provide a new logdir name in '
'config or pass --force_train while launching script.')
tf.io.gfile.makedirs(train_logs_dir)
def setup_eval_dir(logdir, config_timeout_seconds=1):
"""Setups directory for evaluation."""
tf.io.gfile.makedirs(logdir)
tf.io.gfile.makedirs(os.path.join(logdir, 'eval_logs'))
config_path = os.path.join(logdir, 'config.json')
while not tf.io.gfile.exists(config_path):
logging.info('Waiting for config to exist. Going to sleep '
' %s for secs.', config_timeout_seconds)
time.sleep(config_timeout_seconds)
while True:
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
if config_dict is None:
time.sleep(config_timeout_seconds)
else:
break
CONFIG.update(config_dict)
def get_data(iterator):
"""Return a data dict which contains all the requested sequences."""
data = iterator.get_next()
return data, data['chosen_steps'], data['seq_lens']
@tf.function
def get_cnn_feats(cnn, data, training, num_steps=None):
"""Passes data through base CNN."""
if num_steps is None:
if training:
num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
else:
num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
cnn.num_steps = num_steps
cnn_feats = cnn(data['frames'])
return cnn_feats
def get_context_steps(step):
num_steps = CONFIG.DATA.NUM_STEPS
stride = CONFIG.DATA.FRAME_STRIDE
# We don't want to see the future.
steps = np.arange(step - (num_steps - 1) * stride, step + stride, stride)
return steps
def get_indices(curr_idx, num_steps, seq_len):
steps = range(curr_idx, curr_idx + num_steps)
single_steps = np.concatenate([get_context_steps(step) for step in steps])
single_steps = np.concatenate(np.array(list(map(get_context_steps,
np.arange(curr_idx, curr_idx + num_steps)))))
single_steps = np.maximum(0, single_steps)
single_steps = np.minimum(seq_len, single_steps)
return single_steps
def get_embeddings_dataset(model, iterator, frames_per_batch,
keep_data=False, optical_flow=False, keep_labels=True,
max_embs=None, callbacks=[]):
"""Get embeddings from a one epoch iterator."""
keep_labels = keep_labels and CONFIG.DATA.FRAME_LABELS
num_frames_per_step = CONFIG.DATA.NUM_STEPS
cnn = model['cnn']
emb = model['emb']
embs_list = []
labels_list = []
steps_list = []
seq_lens_list = []
names_list = []
seq_labels_list = []
if keep_data:
frames_list = []
if optical_flow:
frame_original_list = []
n = 0
def cond(n):
if max_embs is None:
return True
else:
return n < max_embs
# Make Recurrent Layers stateful, set batch size.
# We do this as we are embedding the whole sequence and that can take
# more than one batch to be passed and we don't want to automatically
# reset hidden states after each batch.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = True
gru_layer.input_spec[0].shape = [1, ]
while cond(n):
try:
print(n)
embs = []
labels = []
steps = []
seq_lens = []
names = []
seq_labels = []
if keep_data:
frames = []
if optical_flow:
frame_original = []
# Reset GRU states for each video.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.reset_states()
data, chosen_steps, seq_len = get_data(iterator)
seq_len = seq_len.numpy()[0]
num_batches = int(math.ceil(float(seq_len)/frames_per_batch))
for i in range(num_batches):
if (i + 1) * frames_per_batch > seq_len:
num_steps = seq_len - i * frames_per_batch
else:
num_steps = frames_per_batch
curr_idx = i * frames_per_batch
curr_data = {}
for k, v in data.items():
# Need to do this as some modalities might not exist.
if len(v.shape) > 1 and v.shape[1] != 0:
idxes = get_indices(curr_idx, num_steps, seq_len)
curr_data[k] = tf.gather(v, idxes, axis=1)
else:
curr_data[k] = v
cnn_feats = get_cnn_feats(cnn, curr_data,
num_steps=num_frames_per_step * num_steps,
training=False)
emb_feats = emb(cnn_feats, num_steps)
logging.debug('On sequence number %d, frames embedded %d', n,
curr_idx + num_steps)
# np.save(tf.io.gfile.GFile('/air/team/saman/test_weights_old.npy', 'w'), cnn.weights[0].numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_batch_old.npy', 'w'), curr_data["frames"])
# np.save(tf.io.gfile.GFile('/air/team/saman/test_cnn_old.npy', 'w'), cnn_feats.numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_emb_old.npy', 'w'), emb_feats.numpy())
embs.append(emb_feats.numpy())
for f in callbacks:
f(np.concatenate(embs), data, chosen_steps, seq_len)
steps.append(chosen_steps.numpy()[0])
seq_lens.append(seq_len * [seq_len])
all_labels = data['frame_labels'].numpy()[0]
name = data['name'].numpy()[0]
names.append(seq_len * [name])
seq_label = data['seq_labels'].numpy()[0]
seq_labels.append(seq_len * [seq_label])
labels.append(all_labels)
embs = np.concatenate(embs, axis=0)
labels = np.concatenate(labels, axis=0)
steps = np.concatenate(steps, axis=0)
seq_lens = np.concatenate(seq_lens, axis=0)
names = np.concatenate(names, axis=0)
seq_labels = np.concatenate(seq_labels, axis=0)
if keep_data:
frames.append(data['frames'].numpy()[0])
frames = np.concatenate(frames, axis=0)
if optical_flow:
frame_original.append(data['video_frames'].numpy()[0])
frame_original = np.concatenate(frame_original, axis=0)
if keep_labels:
labels = labels[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(labels)
seq_labels = seq_labels[~np.isnan(embs).any(axis=1)]
names = names[~np.isnan(embs).any(axis=1)]
seq_lens = seq_lens[~np.isnan(embs).any(axis=1)]
steps = steps[~np.isnan(embs).any(axis=1)]
if keep_data:
frames = frames[~np.isnan(embs).any(axis=1)]
if optical_flow:
frame_original = frame_original[~np.isnan(embs).any(axis=1)]
embs = embs[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(seq_lens)
assert len(embs) == len(steps)
assert len(names) == len(steps)
embs_list.append(embs)
if keep_labels:
labels_list.append(labels)
seq_labels_list.append(seq_labels)
steps_list.append(steps)
seq_lens_list.append(seq_lens)
names_list.append(names)
if keep_data:
frames_list.append(frames)
if optical_flow:
frame_original_list.append(frame_original)
n += 1
except tf.errors.OutOfRangeError:
logging.info('Finished embedding the dataset.')
break
dataset = {'embs': embs_list,
'seq_lens': seq_lens_list,
'steps': steps_list,
'names': names_list,
'seq_labels': seq_labels_list}
if keep_data:
dataset['frames'] = frames_list
if optical_flow:
dataset['frames_original'] = frame_original_list
if keep_labels:
dataset['labels'] = labels_list
# Reset statefulness to recurrent layers for other evaluation tasks.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = False
return dataset
def gen_plot(x, y):
"""Create a pyplot, save to buffer and return TB compatible image."""
plt.figure()
plt.plot(x, y)
plt.title('Val Accuracy')
plt.ylim(0, 1)
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
class Stopwatch(object):
"""Simple timer for measuring elapsed time."""
def __init__(self):
self.reset()
def elapsed(self):
return time.time() - self.time
def done(self, target_interval):
return self.elapsed() >= target_interval
def reset(self):
self.time = time.time()
def set_learning_phase(f):
"""Sets the correct learning phase before calling function f."""
def wrapper(*args, **kwargs):
"""Calls the function f after setting proper learning phase."""
if 'training' not in kwargs:
raise ValueError('Function called with set_learning_phase decorator which'
' does not have training argument.')
training = kwargs['training']
if training:
# Set learning_phase to True to use models in training mode.
tf.keras.backend.set_learning_phase(1)
else:
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
return f(*args, **kwargs)
return wrapper
def load_config(config_path):
config = None
if os.path.exists(config_path):
with open(config_path) as f:
config = json.load(f)
assert config is not None, "config file is not provided or is corrupted"
return config
def prepare_gpu(ind=-1):
ind = int(ind)
GPUS = tf.config.experimental.list_physical_devices('GPU')
if GPUS:
if ind > -1:
tf.config.experimental.set_visible_devices(GPUS[ind], 'GPU')
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
logging.info([len(GPUS), "Physical GPUs,", len(logical_gpus),
"Logical GPUs"])
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
logging.info(e)
os.environ["CUDA_VISIBLE_DEVICES"] = str(ind)
|
[
"tensorflow.unstack",
"tensorflow.train.Checkpoint",
"tensorflow.tile",
"tensorflow.split",
"io.BytesIO",
"absl.logging.info",
"time.sleep",
"tensorflow.config.experimental.list_logical_devices",
"numpy.array",
"tensorflow.config.experimental.set_visible_devices",
"config.CONFIG.items",
"tensorflow.cast",
"numpy.arange",
"tensorflow.summary.image",
"os.path.exists",
"tensorflow.io.gfile.GFile",
"tensorflow.math.squared_difference",
"json.dump",
"matplotlib.pyplot.plot",
"tensorflow.keras.optimizers.SGD",
"tensorflow.concat",
"numpy.concatenate",
"config.CONFIG.update",
"tensorflow.train.exponential_decay",
"matplotlib.pyplot.ylim",
"numpy.maximum",
"tensorflow.train.CheckpointManager",
"tensorflow.io.gfile.exists",
"tensorflow.stack",
"tensorflow.random.uniform",
"tensorflow.one_hot",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"tensorflow.gather",
"numpy.isnan",
"matplotlib.pyplot.title",
"tensorflow.expand_dims",
"time.time",
"tensorflow.train.polynomial_decay",
"tensorflow.argmin",
"numpy.minimum",
"tensorflow.config.experimental.set_memory_growth",
"os.path.join",
"tensorflow.io.gfile.makedirs",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"tensorflow.constant",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.tight_layout",
"numpy.expand_dims",
"json.load",
"absl.logging.debug",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.greater_equal",
"tensorflow.config.experimental.list_physical_devices"
] |
[((406, 427), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (420, 427), False, 'import matplotlib\n'), ((591, 632), 'tensorflow.unstack', 'tf.unstack', (['frames'], {'num': 'num_steps', 'axis': '(1)'}), '(frames, num=num_steps, axis=1)\n', (601, 632), True, 'import tensorflow as tf\n'), ((656, 686), 'tensorflow.concat', 'tf.concat', (['frames_list'], {'axis': '(2)'}), '(frames_list, axis=2)\n', (665, 686), True, 'import tensorflow as tf\n'), ((704, 750), 'tensorflow.split', 'tf.split', (['frames_summaries', 'batch_size'], {'axis': '(0)'}), '(frames_summaries, batch_size, axis=0)\n', (712, 750), True, 'import tensorflow as tf\n'), ((773, 802), 'tensorflow.concat', 'tf.concat', (['batch_list'], {'axis': '(1)'}), '(batch_list, axis=1)\n', (782, 802), True, 'import tensorflow as tf\n'), ((807, 873), 'tensorflow.summary.image', 'tf.summary.image', (['"""train_batch"""', 'batch_summaries'], {'step': 'global_step'}), "('train_batch', batch_summaries, step=global_step)\n", (823, 873), True, 'import tensorflow as tf\n'), ((1150, 1188), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (1185, 1188), True, 'import tensorflow as tf\n'), ((1874, 1916), 'tensorflow.unstack', 'tf.unstack', (['frames'], {'num': 'batch_size', 'axis': '(0)'}), '(frames, num=batch_size, axis=0)\n', (1884, 1916), True, 'import tensorflow as tf\n'), ((2169, 2235), 'numpy.zeros', 'np.zeros', (['(batch_size - 1, num_steps, num_steps)'], {'dtype': 'np.float32'}), '((batch_size - 1, num_steps, num_steps), dtype=np.float32)\n', (2177, 2235), True, 'import numpy as np\n'), ((3456, 3519), 'tensorflow.summary.image', 'tf.summary.image', (["('%s/nn' % split)", 'summary_im'], {'step': 'global_step'}), "('%s/nn' % split, summary_im, step=global_step)\n", (3472, 3519), True, 'import tensorflow as tf\n'), ((8624, 8659), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**ckpt_objects)\n', (8643, 8659), True, 'import tensorflow as tf\n'), ((8679, 8788), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'logdir', 'max_to_keep': '(10)', 'keep_checkpoint_every_n_hours': '(1)'}), '(checkpoint, directory=logdir, max_to_keep=10,\n keep_checkpoint_every_n_hours=1)\n', (8705, 8788), True, 'import tensorflow as tf\n'), ((9241, 9276), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**ckpt_objects)\n', (9260, 9276), True, 'import tensorflow as tf\n'), ((9296, 9405), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'logdir', 'max_to_keep': '(10)', 'keep_checkpoint_every_n_hours': '(1)'}), '(checkpoint, directory=logdir, max_to_keep=10,\n keep_checkpoint_every_n_hours=1)\n', (9322, 9405), True, 'import tensorflow as tf\n'), ((9891, 9919), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (9911, 9919), True, 'import tensorflow as tf\n'), ((9938, 9973), 'os.path.join', 'os.path.join', (['logdir', '"""config.json"""'], {}), "(logdir, 'config.json')\n", (9950, 9973), False, 'import os\n'), ((10640, 10674), 'os.path.join', 'os.path.join', (['logdir', '"""train.logs"""'], {}), "(logdir, 'train.logs')\n", (10652, 10674), False, 'import os\n'), ((10977, 11013), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['train_logs_dir'], {}), '(train_logs_dir)\n', (10997, 11013), True, 'import tensorflow as tf\n'), ((11117, 11145), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (11137, 11145), True, 'import tensorflow as tf\n'), ((11224, 11259), 'os.path.join', 'os.path.join', (['logdir', '"""config.json"""'], {}), "(logdir, 'config.json')\n", (11236, 11259), False, 'import os\n'), ((11727, 11753), 'config.CONFIG.update', 'CONFIG.update', (['config_dict'], {}), '(config_dict)\n', (11740, 11753), False, 'from config import CONFIG\n'), ((12501, 12566), 'numpy.arange', 'np.arange', (['(step - (num_steps - 1) * stride)', '(step + stride)', 'stride'], {}), '(step - (num_steps - 1) * stride, step + stride, stride)\n', (12510, 12566), True, 'import numpy as np\n'), ((12952, 12979), 'numpy.maximum', 'np.maximum', (['(0)', 'single_steps'], {}), '(0, single_steps)\n', (12962, 12979), True, 'import numpy as np\n'), ((12999, 13032), 'numpy.minimum', 'np.minimum', (['seq_len', 'single_steps'], {}), '(seq_len, single_steps)\n', (13009, 13032), True, 'import numpy as np\n'), ((19706, 19718), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19716, 19718), True, 'import matplotlib.pyplot as plt\n'), ((19723, 19737), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (19731, 19737), True, 'import matplotlib.pyplot as plt\n'), ((19742, 19767), 'matplotlib.pyplot.title', 'plt.title', (['"""Val Accuracy"""'], {}), "('Val Accuracy')\n", (19751, 19767), True, 'import matplotlib.pyplot as plt\n'), ((19772, 19786), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (19780, 19786), True, 'import matplotlib.pyplot as plt\n'), ((19791, 19809), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19807, 19809), True, 'import matplotlib.pyplot as plt\n'), ((19820, 19832), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (19830, 19832), False, 'import io\n'), ((19837, 19867), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (19848, 19867), True, 'import matplotlib.pyplot as plt\n'), ((20023, 20047), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (20037, 20047), True, 'import tensorflow as tf\n'), ((21221, 21248), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (21235, 21248), False, 'import os\n'), ((21475, 21526), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (21519, 21526), True, 'import tensorflow as tf\n'), ((3172, 3201), 'tensorflow.stack', 'tf.stack', (['nn_img_list'], {'axis': '(0)'}), '(nn_img_list, axis=0)\n', (3180, 3201), True, 'import tensorflow as tf\n'), ((3715, 3749), 'numpy.expand_dims', 'np.expand_dims', (['sim_matrix'], {'axis': '(3)'}), '(sim_matrix, axis=3)\n', (3729, 3749), True, 'import numpy as np\n'), ((3841, 3850), 'numpy.sum', 'np.sum', (['e'], {}), '(e)\n', (3847, 3850), True, 'import numpy as np\n'), ((4484, 4514), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (4491, 4514), True, 'import tensorflow as tf\n'), ((4542, 4597), 'tensorflow.constant', 'tf.constant', (['lr_params.NUM_WARMUP_STEPS'], {'dtype': 'tf.int32'}), '(lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)\n', (4553, 4597), True, 'import tensorflow as tf\n'), ((4641, 4678), 'tensorflow.cast', 'tf.cast', (['global_steps_int', 'tf.float32'], {}), '(global_steps_int, tf.float32)\n', (4648, 4678), True, 'import tensorflow as tf\n'), ((4708, 4745), 'tensorflow.cast', 'tf.cast', (['warmup_steps_int', 'tf.float32'], {}), '(warmup_steps_int, tf.float32)\n', (4715, 4745), True, 'import tensorflow as tf\n'), ((4901, 4957), 'tensorflow.cast', 'tf.cast', (['(global_steps_int < warmup_steps_int)', 'tf.float32'], {}), '(global_steps_int < warmup_steps_int, tf.float32)\n', (4908, 4957), True, 'import tensorflow as tf\n'), ((7775, 7828), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (7799, 7828), True, 'import tensorflow as tf\n'), ((10035, 10139), 'absl.logging.info', 'logging.info', (['"""Using the existing passed in config as no config.json file exists in %s"""', 'logdir'], {}), "(\n 'Using the existing passed in config as no config.json file exists in %s',\n logdir)\n", (10047, 10139), False, 'from absl import logging\n'), ((10383, 10455), 'absl.logging.info', 'logging.info', (['"""Using config from config.json that exists in %s."""', 'logdir'], {}), "('Using config from config.json that exists in %s.', logdir)\n", (10395, 10455), False, 'from absl import logging\n'), ((10591, 10617), 'config.CONFIG.update', 'CONFIG.update', (['config_dict'], {}), '(config_dict)\n', (10604, 10617), False, 'from config import CONFIG\n'), ((10682, 10712), 'os.path.exists', 'os.path.exists', (['train_logs_dir'], {}), '(train_logs_dir)\n', (10696, 10712), False, 'import os\n'), ((11171, 11204), 'os.path.join', 'os.path.join', (['logdir', '"""eval_logs"""'], {}), "(logdir, 'eval_logs')\n", (11183, 11204), False, 'import os\n'), ((11274, 11305), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['config_path'], {}), '(config_path)\n', (11292, 11305), True, 'import tensorflow as tf\n'), ((11315, 11417), 'absl.logging.info', 'logging.info', (['"""Waiting for config to exist. Going to sleep %s for secs."""', 'config_timeout_seconds'], {}), "('Waiting for config to exist. Going to sleep %s for secs.',\n config_timeout_seconds)\n", (11327, 11417), False, 'from absl import logging\n'), ((11446, 11480), 'time.sleep', 'time.sleep', (['config_timeout_seconds'], {}), '(config_timeout_seconds)\n', (11456, 11480), False, 'import time\n'), ((20381, 20392), 'time.time', 'time.time', ([], {}), '()\n', (20390, 20392), False, 'import time\n'), ((1669, 1707), 'tensorflow.split', 'tf.split', (['emb_feats', 'num_steps'], {'axis': '(0)'}), '(emb_feats, num_steps, axis=0)\n', (1677, 1707), True, 'import tensorflow as tf\n'), ((2825, 2870), 'tensorflow.tile', 'tf.tile', (['query_feats[j:j + 1]', '[num_steps, 1]'], {}), '(query_feats[j:j + 1], [num_steps, 1])\n', (2832, 2870), True, 'import tensorflow as tf\n'), ((3279, 3308), 'tensorflow.unstack', 'tf.unstack', (['im'], {'num': 'num_steps'}), '(im, num=num_steps)\n', (3289, 3308), True, 'import tensorflow as tf\n'), ((3809, 3820), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (3817, 3820), True, 'import numpy as np\n'), ((4055, 4080), 'tensorflow.random.uniform', 'tf.random.uniform', (['(m, n)'], {}), '((m, n))\n', (4072, 4080), True, 'import tensorflow as tf\n'), ((5279, 5320), 'tensorflow.greater_equal', 'tf.greater_equal', (['global_step', 'boundaries'], {}), '(global_step, boundaries)\n', (5295, 5320), True, 'import tensorflow as tf\n'), ((5419, 5463), 'tensorflow.one_hot', 'tf.one_hot', (['rate_index'], {'depth': 'num_boundaries'}), '(rate_index, depth=num_boundaries)\n', (5429, 5463), True, 'import tensorflow as tf\n'), ((7898, 7964), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate', 'momentum': '(0.9)'}), '(learning_rate=learning_rate, momentum=0.9)\n', (7921, 7964), True, 'import tensorflow as tf\n'), ((9985, 10012), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (9999, 10012), False, 'import os\n'), ((10172, 10207), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""w"""'], {}), "(config_path, 'w')\n", (10189, 10207), True, 'import tensorflow as tf\n'), ((10308, 10364), 'json.dump', 'json.dump', (['config', 'config_file'], {'sort_keys': '(True)', 'indent': '(4)'}), '(config, config_file, sort_keys=True, indent=4)\n', (10317, 10364), False, 'import json\n'), ((10482, 10517), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""r"""'], {}), "(config_path, 'r')\n", (10499, 10517), True, 'import tensorflow as tf\n'), ((10560, 10582), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (10569, 10582), False, 'import json\n'), ((11511, 11546), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""r"""'], {}), "(config_path, 'r')\n", (11528, 11546), True, 'import tensorflow as tf\n'), ((11589, 11611), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (11598, 11611), False, 'import json\n'), ((11656, 11690), 'time.sleep', 'time.sleep', (['config_timeout_seconds'], {}), '(config_timeout_seconds)\n', (11666, 11690), False, 'import time\n'), ((17059, 17087), 'numpy.concatenate', 'np.concatenate', (['embs'], {'axis': '(0)'}), '(embs, axis=0)\n', (17073, 17087), True, 'import numpy as np\n'), ((17109, 17139), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (17123, 17139), True, 'import numpy as np\n'), ((17161, 17190), 'numpy.concatenate', 'np.concatenate', (['steps'], {'axis': '(0)'}), '(steps, axis=0)\n', (17175, 17190), True, 'import numpy as np\n'), ((17214, 17246), 'numpy.concatenate', 'np.concatenate', (['seq_lens'], {'axis': '(0)'}), '(seq_lens, axis=0)\n', (17228, 17246), True, 'import numpy as np\n'), ((17267, 17296), 'numpy.concatenate', 'np.concatenate', (['names'], {'axis': '(0)'}), '(names, axis=0)\n', (17281, 17296), True, 'import numpy as np\n'), ((17322, 17356), 'numpy.concatenate', 'np.concatenate', (['seq_labels'], {'axis': '(0)'}), '(seq_labels, axis=0)\n', (17336, 17356), True, 'import numpy as np\n'), ((20228, 20239), 'time.time', 'time.time', ([], {}), '()\n', (20237, 20239), False, 'import time\n'), ((20931, 20969), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(1)'], {}), '(1)\n', (20966, 20969), True, 'import tensorflow as tf\n'), ((21071, 21109), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (21106, 21109), True, 'import tensorflow as tf\n'), ((21308, 21320), 'json.load', 'json.load', (['f'], {}), '(f)\n', (21317, 21320), False, 'import json\n'), ((21573, 21633), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['GPUS[ind]', '"""GPU"""'], {}), "(GPUS[ind], 'GPU')\n", (21615, 21633), True, 'import tensorflow as tf\n'), ((21843, 21893), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (21886, 21893), True, 'import tensorflow as tf\n'), ((1466, 1508), 'tensorflow.split', 'tf.split', (['emb_feats', '(2 * num_steps)'], {'axis': '(0)'}), '(emb_feats, 2 * num_steps, axis=0)\n', (1474, 1508), True, 'import tensorflow as tf\n'), ((2581, 2651), 'tensorflow.unstack', 'tf.unstack', (['image_list[i]'], {'num': '(num_steps * num_frames_per_step)', 'axis': '(0)'}), '(image_list[i], num=num_steps * num_frames_per_step, axis=0)\n', (2591, 2651), True, 'import tensorflow as tf\n'), ((2937, 2998), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['curr_query_feats', 'candidate_feats'], {}), '(curr_query_feats, candidate_feats)\n', (2963, 2998), True, 'import tensorflow as tf\n'), ((6209, 6325), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr', 'global_step', 'lr_params.EXP_DECAY_STEPS', 'lr_params.EXP_DECAY_RATE'], {'staircase': '(True)'}), '(lr, global_step, lr_params.EXP_DECAY_STEPS,\n lr_params.EXP_DECAY_RATE, staircase=True)\n', (6235, 6325), True, 'import tensorflow as tf\n'), ((15907, 15994), 'absl.logging.debug', 'logging.debug', (['"""On sequence number %d, frames embedded %d"""', 'n', '(curr_idx + num_steps)'], {}), "('On sequence number %d, frames embedded %d', n, curr_idx +\n num_steps)\n", (15920, 15994), False, 'from absl import logging\n'), ((17465, 17495), 'numpy.concatenate', 'np.concatenate', (['frames'], {'axis': '(0)'}), '(frames, axis=0)\n', (17479, 17495), True, 'import numpy as np\n'), ((17629, 17667), 'numpy.concatenate', 'np.concatenate', (['frame_original'], {'axis': '(0)'}), '(frame_original, axis=0)\n', (17643, 17667), True, 'import numpy as np\n'), ((18925, 18972), 'absl.logging.info', 'logging.info', (['"""Finished embedding the dataset."""'], {}), "('Finished embedding the dataset.')\n", (18937, 18972), False, 'from absl import logging\n'), ((21764, 21815), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (21804, 21815), True, 'import tensorflow as tf\n'), ((22130, 22145), 'absl.logging.info', 'logging.info', (['e'], {}), '(e)\n', (22142, 22145), False, 'from absl import logging\n'), ((2384, 2458), 'tensorflow.unstack', 'tf.unstack', (['image_list[i]'], {'num': '(2 * num_steps * num_frames_per_step)', 'axis': '(0)'}), '(image_list[i], num=2 * num_steps * num_frames_per_step, axis=0)\n', (2394, 2458), True, 'import tensorflow as tf\n'), ((3119, 3151), 'tensorflow.argmin', 'tf.argmin', (['mean_squared_distance'], {}), '(mean_squared_distance)\n', (3128, 3151), True, 'import tensorflow as tf\n'), ((12887, 12928), 'numpy.arange', 'np.arange', (['curr_idx', '(curr_idx + num_steps)'], {}), '(curr_idx, curr_idx + num_steps)\n', (12896, 12928), True, 'import numpy as np\n'), ((16585, 16605), 'numpy.concatenate', 'np.concatenate', (['embs'], {}), '(embs)\n', (16599, 16605), True, 'import numpy as np\n'), ((7047, 7164), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['lr', 'global_step', 'CONFIG.TRAIN.MAX_ITERS'], {'end_learning_rate': '(0.0)', 'power': '(1.0)', 'cycle': '(False)'}), '(lr, global_step, CONFIG.TRAIN.MAX_ITERS,\n end_learning_rate=0.0, power=1.0, cycle=False)\n', (7072, 7164), True, 'import tensorflow as tf\n'), ((10279, 10293), 'config.CONFIG.items', 'CONFIG.items', ([], {}), '()\n', (10291, 10293), False, 'from config import CONFIG\n'), ((15539, 15566), 'tensorflow.gather', 'tf.gather', (['v', 'idxes'], {'axis': '(1)'}), '(v, idxes, axis=1)\n', (15548, 15566), True, 'import tensorflow as tf\n'), ((17843, 17857), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17851, 17857), True, 'import numpy as np\n'), ((17899, 17913), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17907, 17913), True, 'import numpy as np\n'), ((17960, 17974), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17968, 17974), True, 'import numpy as np\n'), ((18015, 18029), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18023, 18029), True, 'import numpy as np\n'), ((18262, 18276), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18270, 18276), True, 'import numpy as np\n'), ((17730, 17744), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17738, 17744), True, 'import numpy as np\n'), ((18102, 18116), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18110, 18116), True, 'import numpy as np\n'), ((18208, 18222), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18216, 18222), True, 'import numpy as np\n')]
|
import copy
import time
from collections import defaultdict
import cloudpickle
import numpy as np
import pandas as pd
import woodwork as ww
from sklearn.model_selection import BaseCrossValidator
from .pipeline_search_plots import PipelineSearchPlots
from evalml.automl.automl_algorithm import IterativeAlgorithm
from evalml.automl.callbacks import log_error_callback
from evalml.automl.engine import SequentialEngine
from evalml.automl.utils import (
check_all_pipeline_names_unique,
get_default_primary_search_objective,
make_data_splitter
)
from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
get_core_objectives,
get_non_core_objectives,
get_objective
)
from evalml.pipelines import (
MeanBaselineRegressionPipeline,
ModeBaselineBinaryPipeline,
ModeBaselineMulticlassPipeline,
TimeSeriesBaselineBinaryPipeline,
TimeSeriesBaselineMulticlassPipeline,
TimeSeriesBaselineRegressionPipeline
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import split_data
from evalml.problem_types import ProblemTypes, handle_problem_types
from evalml.tuners import SKOptTuner
from evalml.utils import convert_to_seconds, infer_feature_types
from evalml.utils.logger import (
get_logger,
log_subtitle,
log_title,
time_elapsed,
update_pipeline
)
logger = get_logger(__file__)
class AutoMLSearch:
"""Automated Pipeline search."""
_MAX_NAME_LEN = 40
# Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes.
plot = PipelineSearchPlots
def __init__(self,
X_train=None,
y_train=None,
problem_type=None,
objective='auto',
max_iterations=None,
max_time=None,
patience=None,
tolerance=None,
data_splitter=None,
allowed_pipelines=None,
allowed_model_families=None,
start_iteration_callback=None,
add_result_callback=None,
error_callback=None,
additional_objectives=None,
random_seed=0,
n_jobs=-1,
tuner_class=None,
optimize_thresholds=True,
ensembling=False,
max_batches=None,
problem_configuration=None,
train_best_pipeline=True,
pipeline_parameters=None,
_ensembling_split_size=0.2,
_pipelines_per_batch=5):
"""Automated pipeline search
Arguments:
X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.
y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.
problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.
objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
When set to 'auto', chooses:
- LogLossBinary for binary classification problems,
- LogLossMulticlass for multiclass classification problems, and
- R2 for regression problems.
max_iterations (int): Maximum number of iterations to search. If max_iterations and
max_time is not set, then max_iterations will default to max_iterations of 5.
max_time (int, str): Maximum time to search for pipelines.
This will not start a new pipeline search after the duration
has elapsed. If it is an integer, then the time will be in seconds.
For strings, time can be specified as seconds, minutes, or hours.
patience (int): Number of iterations without improvement to stop search early. Must be positive.
If None, early stopping is disabled. Defaults to None.
tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
Only applicable if patience is not None. Defaults to None.
allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
allowed_model_families to be ignored.
allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
this parameter will be ignored.
data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.
tuner_class: The tuner class to use. Defaults to SKOptTuner.
optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True.
start_iteration_callback (callable): Function called before each pipeline training iteration.
Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.
add_result_callback (callable): Function called after each pipeline training iteration.
Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.
error_callback (callable): Function called when `search()` errors and raises an Exception.
Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
Defaults to None, which will call `log_error_callback`.
additional_objectives (list): Custom set of objectives to score on.
Will override default objectives for problem type if not empty.
random_seed (int): Seed for the random number generator. Defaults to 0.
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.
max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
max_iterations have precedence over stopping the search.
problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
in time series problems, values should be passed in for the gap and max_delay variables.
train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True.
pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with.
_ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True.
Must be between 0 and 1, exclusive. Defaults to 0.2
_pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
"""
if X_train is None:
raise ValueError('Must specify training data as a 2d array using the X_train argument')
if y_train is None:
raise ValueError('Must specify training data target values as a 1d vector using the y_train argument')
try:
self.problem_type = handle_problem_types(problem_type)
except ValueError:
raise ValueError('choose one of (binary, multiclass, regression) as problem_type')
self.tuner_class = tuner_class or SKOptTuner
self.start_iteration_callback = start_iteration_callback
self.add_result_callback = add_result_callback
self.error_callback = error_callback or log_error_callback
self.data_splitter = data_splitter
self.optimize_thresholds = optimize_thresholds
self.ensembling = ensembling
if objective == 'auto':
objective = get_default_primary_search_objective(self.problem_type.value)
objective = get_objective(objective, return_instance=False)
self.objective = self._validate_objective(objective)
if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator):
raise ValueError("Not a valid data splitter")
if not objective.is_defined_for_problem_type(self.problem_type):
raise ValueError("Given objective {} is not compatible with a {} problem.".format(self.objective.name, self.problem_type.value))
if additional_objectives is None:
additional_objectives = get_core_objectives(self.problem_type)
# if our main objective is part of default set of objectives for problem_type, remove it
existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None)
if existing_main_objective is not None:
additional_objectives.remove(existing_main_objective)
else:
additional_objectives = [get_objective(o) for o in additional_objectives]
additional_objectives = [self._validate_objective(obj) for obj in additional_objectives]
self.additional_objectives = additional_objectives
self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives}
if not isinstance(max_time, (int, float, str, type(None))):
raise TypeError(f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}..")
if isinstance(max_time, (int, float)) and max_time < 0:
raise ValueError(f"Parameter max_time must be None or non-negative. Received {max_time}.")
if max_batches is not None and max_batches < 0:
raise ValueError(f"Parameter max_batches must be None or non-negative. Received {max_batches}.")
if max_iterations is not None and max_iterations < 0:
raise ValueError(f"Parameter max_iterations must be None or non-negative. Received {max_iterations}.")
self.max_time = convert_to_seconds(max_time) if isinstance(max_time, str) else max_time
self.max_iterations = max_iterations
self.max_batches = max_batches
self._pipelines_per_batch = _pipelines_per_batch
if not self.max_iterations and not self.max_time and not self.max_batches:
self.max_batches = 1
logger.info("Using default limit of max_batches=1.\n")
if patience and (not isinstance(patience, int) or patience < 0):
raise ValueError("patience value must be a positive integer. Received {} instead".format(patience))
if tolerance and (tolerance > 1.0 or tolerance < 0.0):
raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".format(tolerance))
self.patience = patience
self.tolerance = tolerance or 0.0
self._results = {
'pipeline_results': {},
'search_order': [],
'errors': []
}
self.random_seed = random_seed
self.n_jobs = n_jobs
self.plot = None
try:
self.plot = PipelineSearchPlots(self)
except ImportError:
logger.warning("Unable to import plotly; skipping pipeline search plotting\n")
self.allowed_pipelines = allowed_pipelines
self.allowed_model_families = allowed_model_families
self._automl_algorithm = None
self._start = 0.0
self._baseline_cv_scores = {}
self.show_batch_output = False
self._validate_problem_type()
self.problem_configuration = self._validate_problem_configuration(problem_configuration)
self._train_best_pipeline = train_best_pipeline
self._best_pipeline = None
self._searched = False
self.X_train = infer_feature_types(X_train)
self.y_train = infer_feature_types(y_train)
self.ensembling_indices = None
default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration,
n_splits=3, shuffle=True, random_seed=self.random_seed)
self.data_splitter = self.data_splitter or default_data_splitter
self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
self.search_iteration_plot = None
self._interrupted = False
if self.allowed_pipelines is None:
logger.info("Generating pipelines to search over...")
allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families)
logger.debug(f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}")
self.allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators]
if self.allowed_pipelines == []:
raise ValueError("No allowed pipelines to search")
check_all_pipeline_names_unique(self.allowed_pipelines)
run_ensembling = self.ensembling
if run_ensembling and len(self.allowed_pipelines) == 1:
logger.warning("Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run.")
run_ensembling = False
if run_ensembling and self.max_iterations is not None:
# Baseline + first batch + each pipeline iteration + 1
first_ensembling_iteration = (1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
if self.max_iterations < first_ensembling_iteration:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling.")
else:
logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that.")
if self.max_batches and self.max_iterations is None:
self.show_batch_output = True
if run_ensembling:
ensemble_nth_batch = len(self.allowed_pipelines) + 1
num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch
if num_ensemble_batches == 0:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling.")
else:
logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.")
self.max_iterations = (1 + len(self.allowed_pipelines) +
self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) +
num_ensemble_batches)
else:
self.max_iterations = 1 + len(self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1))
if run_ensembling:
if not (0 < _ensembling_split_size < 1):
raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}")
X_shape = ww.DataTable(np.arange(self.X_train.shape[0]))
_, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed)
self.ensembling_indices = ensembling_indices.to_dataframe()[0].tolist()
self._engine = SequentialEngine(self.X_train,
self.y_train,
self.ensembling_indices,
self,
should_continue_callback=self._should_continue,
pre_evaluation_callback=self._pre_evaluation_callback,
post_evaluation_callback=self._post_evaluation_callback)
self.allowed_model_families = list(set([p.model_family for p in (self.allowed_pipelines)]))
logger.debug(f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}")
logger.debug(f"allowed_model_families set to {self.allowed_model_families}")
if len(self.problem_configuration):
pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters}
else:
pipeline_params = self.pipeline_parameters
self._automl_algorithm = IterativeAlgorithm(
max_iterations=self.max_iterations,
allowed_pipelines=self.allowed_pipelines,
tuner_class=self.tuner_class,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
number_features=self.X_train.shape[1],
pipelines_per_batch=self._pipelines_per_batch,
ensembling=run_ensembling,
pipeline_params=pipeline_params
)
def _pre_evaluation_callback(self, pipeline):
if self.start_iteration_callback:
self.start_iteration_callback(pipeline.__class__, pipeline.parameters, self)
desc = f"{pipeline.name}"
if len(desc) > AutoMLSearch._MAX_NAME_LEN:
desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..."
desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN)
batch_number = 1
if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0:
batch_number = self._automl_algorithm.batch_number
update_pipeline(logger,
desc,
len(self._results['pipeline_results']) + 1,
self.max_iterations,
self._start,
batch_number,
self.show_batch_output)
def _validate_objective(self, objective):
non_core_objectives = get_non_core_objectives()
if isinstance(objective, type):
if objective in non_core_objectives:
raise ValueError(f"{objective.name.lower()} is not allowed in AutoML! "
"Use evalml.objectives.utils.get_core_objective_names() "
"to get all objective names allowed in automl.")
return objective()
return objective
def __str__(self):
def _print_list(obj_list):
lines = sorted(['\t{}'.format(o.name) for o in obj_list])
return '\n'.join(lines)
def _get_funct_name(function):
if callable(function):
return function.__name__
else:
return None
search_desc = (
f"{handle_problem_types(self.problem_type).name} Search\n\n"
f"Parameters: \n{'='*20}\n"
f"Objective: {get_objective(self.objective).name}\n"
f"Max Time: {self.max_time}\n"
f"Max Iterations: {self.max_iterations}\n"
f"Max Batches: {self.max_batches}\n"
f"Allowed Pipelines: \n{_print_list(self.allowed_pipelines or [])}\n"
f"Patience: {self.patience}\n"
f"Tolerance: {self.tolerance}\n"
f"Data Splitting: {self.data_splitter}\n"
f"Tuner: {self.tuner_class.__name__}\n"
f"Start Iteration Callback: {_get_funct_name(self.start_iteration_callback)}\n"
f"Add Result Callback: {_get_funct_name(self.add_result_callback)}\n"
f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n"
f"Random Seed: {self.random_seed}\n"
f"n_jobs: {self.n_jobs}\n"
f"Optimize Thresholds: {self.optimize_thresholds}\n"
)
rankings_desc = ""
if not self.rankings.empty:
rankings_str = self.rankings.drop(['parameters'], axis='columns').to_string()
rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}"
return search_desc + rankings_desc
def _validate_problem_configuration(self, problem_configuration=None):
if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]:
required_parameters = {'gap', 'max_delay'}
if not problem_configuration or not all(p in problem_configuration for p in required_parameters):
raise ValueError("user_parameters must be a dict containing values for at least the gap and max_delay "
f"parameters. Received {problem_configuration}.")
return problem_configuration or {}
def _handle_keyboard_interrupt(self):
"""Presents a prompt to the user asking if they want to stop the search.
Returns:
bool: If True, search should terminate early
"""
leading_char = "\n"
start_of_loop = time.time()
while True:
choice = input(leading_char + "Do you really want to exit search (y/n)? ").strip().lower()
if choice == "y":
logger.info("Exiting AutoMLSearch.")
return True
elif choice == "n":
# So that the time in this loop does not count towards the time budget (if set)
time_in_loop = time.time() - start_of_loop
self._start += time_in_loop
return False
else:
leading_char = ""
def search(self, show_iteration_plot=True):
"""Find the best pipeline for the data set.
Arguments:
feature_types (list, optional): list of feature types, either numerical or categorical.
Categorical features will automatically be encoded
show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook.
Disabled by default in non-Jupyter enviroments.
"""
if self._searched:
logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.")
return
# don't show iteration plot outside of a jupyter notebook
if show_iteration_plot:
try:
get_ipython
except NameError:
show_iteration_plot = False
log_title(logger, "Beginning pipeline search")
logger.info("Optimizing for %s. " % self.objective.name)
logger.info("{} score is better.\n".format('Greater' if self.objective.greater_is_better else 'Lower'))
logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.")
if self.max_batches is not None:
logger.info(f"Searching up to {self.max_batches} batches for a total of {self.max_iterations} pipelines. ")
elif self.max_iterations is not None:
logger.info("Searching up to %s pipelines. " % self.max_iterations)
if self.max_time is not None:
logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.max_time)
logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.allowed_model_families]))
self.search_iteration_plot = None
if self.plot:
self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot)
self._start = time.time()
try:
self._add_baseline_pipelines()
except KeyboardInterrupt:
if self._handle_keyboard_interrupt():
self._interrupted = True
current_batch_pipelines = []
current_batch_pipeline_scores = []
new_pipeline_ids = []
loop_interrupted = False
while self._should_continue():
try:
if not loop_interrupted:
current_batch_pipelines = self._automl_algorithm.next_batch()
except StopIteration:
logger.info('AutoML Algorithm out of recommendations, ending')
break
try:
new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines)
loop_interrupted = False
except KeyboardInterrupt:
loop_interrupted = True
if self._handle_keyboard_interrupt():
break
full_rankings = self.full_rankings
current_batch_idx = full_rankings['id'].isin(new_pipeline_ids)
current_batch_pipeline_scores = full_rankings[current_batch_idx]['score']
if len(current_batch_pipeline_scores) and current_batch_pipeline_scores.isna().all():
raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.")
self.search_duration = time.time() - self._start
elapsed_time = time_elapsed(self._start)
desc = f"\nSearch finished after {elapsed_time}"
desc = desc.ljust(self._MAX_NAME_LEN)
logger.info(desc)
self._find_best_pipeline()
if self._best_pipeline is not None:
best_pipeline = self.rankings.iloc[0]
best_pipeline_name = best_pipeline["pipeline_name"]
logger.info(f"Best pipeline: {best_pipeline_name}")
logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}")
self._searched = True
def _find_best_pipeline(self):
"""Finds the best pipeline in the rankings
If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
if len(self.rankings) == 0:
return
best_pipeline = self.rankings.iloc[0]
if not (self._best_pipeline and self._best_pipeline == self.get_pipeline(best_pipeline['id'])):
best_pipeline = self.get_pipeline(best_pipeline['id'])
if self._train_best_pipeline:
if best_pipeline.model_family == ModelFamily.ENSEMBLE:
X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
else:
X_train = self.X_train
y_train = self.y_train
if hasattr(self.data_splitter, "transform_sample"):
train_indices = self.data_splitter.transform_sample(X_train, y_train)
X_train = X_train.iloc[train_indices]
y_train = y_train.iloc[train_indices]
best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train,
self.optimize_thresholds, self.objective)
self._best_pipeline = best_pipeline
def _num_pipelines(self):
"""Return the number of pipeline evaluations which have been made
Returns:
int: the number of pipeline evaluations made in the search
"""
return len(self._results['pipeline_results'])
def _should_continue(self):
"""Given the original stopping criterion and current state, should the search continue?
Returns:
bool: True if yes, False if no.
"""
if self._interrupted:
return False
# for add_to_rankings
if self._searched:
return True
# Run at least one pipeline for every search
num_pipelines = self._num_pipelines()
if num_pipelines == 0:
return True
# check max_time and max_iterations
elapsed = time.time() - self._start
if self.max_time and elapsed >= self.max_time:
return False
elif self.max_iterations and num_pipelines >= self.max_iterations:
return False
# check for early stopping
if self.patience is None or self.tolerance is None:
return True
first_id = self._results['search_order'][0]
best_score = self._results['pipeline_results'][first_id]['score']
num_without_improvement = 0
for id in self._results['search_order'][1:]:
curr_score = self._results['pipeline_results'][id]['score']
significant_change = abs((curr_score - best_score) / best_score) > self.tolerance
score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score
if score_improved and significant_change:
best_score = curr_score
num_without_improvement = 0
else:
num_without_improvement += 1
if num_without_improvement >= self.patience:
logger.info("\n\n{} iterations without improvement. Stopping search early...".format(self.patience))
return False
return True
def _validate_problem_type(self):
for obj in self.additional_objectives:
if not obj.is_defined_for_problem_type(self.problem_type):
raise ValueError("Additional objective {} is not compatible with a {} problem.".format(obj.name, self.problem_type.value))
for pipeline in self.allowed_pipelines or []:
if pipeline.problem_type != self.problem_type:
raise ValueError("Given pipeline {} is not compatible with problem_type {}.".format(pipeline.name, self.problem_type.value))
def _add_baseline_pipelines(self):
"""Fits a baseline pipeline to the data.
This is the first pipeline fit during search.
"""
if self.problem_type == ProblemTypes.BINARY:
baseline = ModeBaselineBinaryPipeline(parameters={})
elif self.problem_type == ProblemTypes.MULTICLASS:
baseline = ModeBaselineMulticlassPipeline(parameters={})
elif self.problem_type == ProblemTypes.REGRESSION:
baseline = MeanBaselineRegressionPipeline(parameters={})
else:
pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeSeriesBaselineRegressionPipeline,
ProblemTypes.TIME_SERIES_MULTICLASS: TimeSeriesBaselineMulticlassPipeline,
ProblemTypes.TIME_SERIES_BINARY: TimeSeriesBaselineBinaryPipeline}[self.problem_type]
gap = self.problem_configuration['gap']
max_delay = self.problem_configuration['max_delay']
baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "max_delay": max_delay},
"Time Series Baseline Estimator": {"gap": gap, "max_delay": max_delay}})
self._engine.evaluate_batch([baseline])
@staticmethod
def _get_mean_cv_scores_for_all_objectives(cv_data, objective_name_to_class):
scores = defaultdict(int)
n_folds = len(cv_data)
for fold_data in cv_data:
for field, value in fold_data['all_objective_scores'].items():
# The 'all_objective_scores' field contains scores for all objectives
# but also fields like "# Training" and "# Testing", so we want to exclude them since
# they are not scores
if field in objective_name_to_class:
scores[field] += value
return {objective: float(score) / n_folds for objective, score in scores.items()}
def _post_evaluation_callback(self, pipeline, evaluation_results):
training_time = evaluation_results['training_time']
cv_data = evaluation_results['cv_data']
cv_scores = evaluation_results['cv_scores']
is_baseline = pipeline.model_family == ModelFamily.BASELINE
cv_score = cv_scores.mean()
percent_better_than_baseline = {}
mean_cv_all_objectives = self._get_mean_cv_scores_for_all_objectives(cv_data, self.objective_name_to_class)
if is_baseline:
self._baseline_cv_scores = mean_cv_all_objectives
for obj_name in mean_cv_all_objectives:
objective_class = self.objective_name_to_class[obj_name]
# In the event add_to_rankings is called before search _baseline_cv_scores will be empty so we will return
# nan for the base score.
percent_better = objective_class.calculate_percent_difference(mean_cv_all_objectives[obj_name],
self._baseline_cv_scores.get(obj_name, np.nan))
percent_better_than_baseline[obj_name] = percent_better
high_variance_cv = self._check_for_high_variance(pipeline, cv_scores)
pipeline_id = len(self._results['pipeline_results'])
self._results['pipeline_results'][pipeline_id] = {
"id": pipeline_id,
"pipeline_name": pipeline.name,
"pipeline_class": type(pipeline),
"pipeline_summary": pipeline.summary,
"parameters": pipeline.parameters,
"score": cv_score,
"high_variance_cv": high_variance_cv,
"training_time": training_time,
"cv_data": cv_data,
"percent_better_than_baseline_all_objectives": percent_better_than_baseline,
"percent_better_than_baseline": percent_better_than_baseline[self.objective.name],
"validation_score": cv_scores[0]
}
if pipeline.model_family == ModelFamily.ENSEMBLE:
input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info]
self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids
self._results['search_order'].append(pipeline_id)
if not is_baseline:
score_to_minimize = -cv_score if self.objective.greater_is_better else cv_score
try:
self._automl_algorithm.add_result(score_to_minimize, pipeline, self._results['pipeline_results'][pipeline_id])
except PipelineNotFoundError:
pass
if self.search_iteration_plot:
self.search_iteration_plot.update()
if self.add_result_callback:
self.add_result_callback(self._results['pipeline_results'][pipeline_id], pipeline, self)
return pipeline_id
def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2):
"""Checks cross-validation scores and logs a warning if variance is higher than specified threshhold."""
pipeline_name = pipeline.name
high_variance_cv = bool(abs(cv_scores.std() / cv_scores.mean()) > threshold)
if high_variance_cv:
logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.")
return high_variance_cv
def get_pipeline(self, pipeline_id):
"""Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline
initialized with the parameters used to train that pipeline during automl search.
Arguments:
pipeline_id (int): pipeline to retrieve
Returns:
PipelineBase: untrained pipeline instance associated with the provided ID
"""
pipeline_results = self.results['pipeline_results'].get(pipeline_id)
if pipeline_results is None:
raise PipelineNotFoundError("Pipeline not found in automl results")
pipeline_class = pipeline_results.get('pipeline_class')
parameters = pipeline_results.get('parameters')
if pipeline_class is None or parameters is None:
raise PipelineNotFoundError("Pipeline class or parameters not found in automl results")
return pipeline_class(parameters, random_seed=self.random_seed)
def describe_pipeline(self, pipeline_id, return_dict=False):
"""Describe a pipeline
Arguments:
pipeline_id (int): pipeline to describe
return_dict (bool): If True, return dictionary of information
about pipeline. Defaults to False.
Returns:
Description of specified pipeline. Includes information such as
type of pipeline components, problem, training time, cross validation, etc.
"""
if pipeline_id not in self._results['pipeline_results']:
raise PipelineNotFoundError("Pipeline not found")
pipeline = self.get_pipeline(pipeline_id)
pipeline_results = self._results['pipeline_results'][pipeline_id]
pipeline.describe()
if pipeline.model_family == ModelFamily.ENSEMBLE:
logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids']))
log_subtitle(logger, "Training")
logger.info("Training for {} problems.".format(pipeline.problem_type))
if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold:
logger.info("Objective to optimize binary classification pipeline thresholds for: {}".format(self.objective))
logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"])
log_subtitle(logger, "Cross Validation", underline="-")
all_objective_scores = [fold["all_objective_scores"] for fold in pipeline_results["cv_data"]]
all_objective_scores = pd.DataFrame(all_objective_scores)
for c in all_objective_scores:
if c in ["# Training", "# Validation"]:
all_objective_scores[c] = all_objective_scores[c].astype("object")
continue
mean = all_objective_scores[c].mean(axis=0)
std = all_objective_scores[c].std(axis=0)
all_objective_scores.loc["mean", c] = mean
all_objective_scores.loc["std", c] = std
all_objective_scores.loc["coef of var", c] = std / mean if abs(mean) > 0 else np.inf
all_objective_scores = all_objective_scores.fillna("-")
with pd.option_context('display.float_format', '{:.3f}'.format, 'expand_frame_repr', False):
logger.info(all_objective_scores)
if return_dict:
return pipeline_results
def add_to_rankings(self, pipeline):
"""Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run.
Arguments:
pipeline (PipelineBase): pipeline to train and evaluate.
"""
pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name]
for parameter in pipeline_rows['parameters']:
if pipeline.parameters == parameter:
return
self._engine.evaluate_batch([pipeline])
self._find_best_pipeline()
@property
def results(self):
"""Class that allows access to a copy of the results from `automl_search`.
Returns: dict containing `pipeline_results`: a dict with results from each pipeline,
and `search_order`: a list describing the order the pipelines were searched.
"""
return copy.deepcopy(self._results)
@property
def rankings(self):
"""Returns a pandas.DataFrame with scoring results from the highest-scoring set of parameters used with each pipeline."""
return self.full_rankings.drop_duplicates(subset="pipeline_name", keep="first")
@property
def full_rankings(self):
"""Returns a pandas.DataFrame with scoring results from all pipelines searched"""
ascending = True
if self.objective.greater_is_better:
ascending = False
full_rankings_cols = ["id", "pipeline_name", "score", "validation_score",
"percent_better_than_baseline", "high_variance_cv", "parameters"]
if not self._results['pipeline_results']:
return pd.DataFrame(columns=full_rankings_cols)
rankings_df = pd.DataFrame(self._results['pipeline_results'].values())
rankings_df = rankings_df[full_rankings_cols]
rankings_df.sort_values("score", ascending=ascending, inplace=True)
rankings_df.reset_index(drop=True, inplace=True)
return rankings_df
@property
def best_pipeline(self):
"""Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
Returns:
PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
"""
if not self._best_pipeline:
raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.")
return self._best_pipeline
def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL):
"""Saves AutoML object at file path
Arguments:
file_path (str): location to save file
pickle_protocol (int): the pickle data stream format.
Returns:
None
"""
with open(file_path, 'wb') as f:
cloudpickle.dump(self, f, protocol=pickle_protocol)
@staticmethod
def load(file_path):
"""Loads AutoML object at file path
Arguments:
file_path (str): location to find file to load
Returns:
AutoSearchBase object
"""
with open(file_path, 'rb') as f:
return cloudpickle.load(f)
def train_pipelines(self, pipelines):
"""Train a list of pipelines on the training data.
This can be helpful for training pipelines once the search is complete.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
Returns:
Dict[str, PipelineBase]: Dictionary keyed by pipeline name that maps to the fitted pipeline.
Note that the any pipelines that error out during training will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.train_batch(pipelines)
def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives):
"""Score a list of pipelines on the given holdout data.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
X_holdout (ww.DataTable, pd.DataFrame): Holdout features.
y_holdout (ww.DataTable, pd.DataFrame): Holdout targets for scoring.
objectives (list(str), list(ObjectiveBase)): Objectives used for scoring.
Returns:
Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that maps to a dictionary of scores.
Note that the any pipelines that error out during scoring will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
|
[
"evalml.pipelines.ModeBaselineMulticlassPipeline",
"pandas.option_context",
"evalml.exceptions.PipelineNotFoundError",
"copy.deepcopy",
"evalml.pipelines.utils.make_pipeline",
"evalml.objectives.get_non_core_objectives",
"evalml.utils.logger.get_logger",
"numpy.arange",
"evalml.utils.logger.log_subtitle",
"cloudpickle.load",
"evalml.utils.infer_feature_types",
"evalml.utils.convert_to_seconds",
"evalml.objectives.get_objective",
"pandas.DataFrame",
"evalml.automl.utils.check_all_pipeline_names_unique",
"evalml.automl.automl_algorithm.IterativeAlgorithm",
"evalml.exceptions.AutoMLSearchException",
"evalml.automl.utils.make_data_splitter",
"evalml.automl.utils.get_default_primary_search_objective",
"evalml.utils.logger.time_elapsed",
"evalml.preprocessing.split_data",
"evalml.pipelines.MeanBaselineRegressionPipeline",
"cloudpickle.dump",
"time.time",
"evalml.problem_types.handle_problem_types",
"evalml.utils.logger.log_title",
"evalml.pipelines.ModeBaselineBinaryPipeline",
"evalml.automl.engine.SequentialEngine",
"collections.defaultdict",
"evalml.pipelines.components.utils.get_estimators",
"evalml.objectives.get_core_objectives"
] |
[((1498, 1518), 'evalml.utils.logger.get_logger', 'get_logger', (['__file__'], {}), '(__file__)\n', (1508, 1518), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((9426, 9473), 'evalml.objectives.get_objective', 'get_objective', (['objective'], {'return_instance': '(False)'}), '(objective, return_instance=False)\n', (9439, 9473), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((13288, 13316), 'evalml.utils.infer_feature_types', 'infer_feature_types', (['X_train'], {}), '(X_train)\n', (13307, 13316), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((13340, 13368), 'evalml.utils.infer_feature_types', 'infer_feature_types', (['y_train'], {}), '(y_train)\n', (13359, 13368), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((13441, 13600), 'evalml.automl.utils.make_data_splitter', 'make_data_splitter', (['self.X_train', 'self.y_train', 'self.problem_type', 'self.problem_configuration'], {'n_splits': '(3)', 'shuffle': '(True)', 'random_seed': 'self.random_seed'}), '(self.X_train, self.y_train, self.problem_type, self.\n problem_configuration, n_splits=3, shuffle=True, random_seed=self.\n random_seed)\n', (13459, 13600), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((14514, 14569), 'evalml.automl.utils.check_all_pipeline_names_unique', 'check_all_pipeline_names_unique', (['self.allowed_pipelines'], {}), '(self.allowed_pipelines)\n', (14545, 14569), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((17239, 17488), 'evalml.automl.engine.SequentialEngine', 'SequentialEngine', (['self.X_train', 'self.y_train', 'self.ensembling_indices', 'self'], {'should_continue_callback': 'self._should_continue', 'pre_evaluation_callback': 'self._pre_evaluation_callback', 'post_evaluation_callback': 'self._post_evaluation_callback'}), '(self.X_train, self.y_train, self.ensembling_indices, self,\n should_continue_callback=self._should_continue, pre_evaluation_callback\n =self._pre_evaluation_callback, post_evaluation_callback=self.\n _post_evaluation_callback)\n', (17255, 17488), False, 'from evalml.automl.engine import SequentialEngine\n'), ((18259, 18600), 'evalml.automl.automl_algorithm.IterativeAlgorithm', 'IterativeAlgorithm', ([], {'max_iterations': 'self.max_iterations', 'allowed_pipelines': 'self.allowed_pipelines', 'tuner_class': 'self.tuner_class', 'random_seed': 'self.random_seed', 'n_jobs': 'self.n_jobs', 'number_features': 'self.X_train.shape[1]', 'pipelines_per_batch': 'self._pipelines_per_batch', 'ensembling': 'run_ensembling', 'pipeline_params': 'pipeline_params'}), '(max_iterations=self.max_iterations, allowed_pipelines=\n self.allowed_pipelines, tuner_class=self.tuner_class, random_seed=self.\n random_seed, n_jobs=self.n_jobs, number_features=self.X_train.shape[1],\n pipelines_per_batch=self._pipelines_per_batch, ensembling=\n run_ensembling, pipeline_params=pipeline_params)\n', (18277, 18600), False, 'from evalml.automl.automl_algorithm import IterativeAlgorithm\n'), ((19640, 19665), 'evalml.objectives.get_non_core_objectives', 'get_non_core_objectives', ([], {}), '()\n', (19663, 19665), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((22545, 22556), 'time.time', 'time.time', ([], {}), '()\n', (22554, 22556), False, 'import time\n'), ((24004, 24050), 'evalml.utils.logger.log_title', 'log_title', (['logger', '"""Beginning pipeline search"""'], {}), "(logger, 'Beginning pipeline search')\n", (24013, 24050), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((25067, 25078), 'time.time', 'time.time', ([], {}), '()\n', (25076, 25078), False, 'import time\n'), ((26570, 26595), 'evalml.utils.logger.time_elapsed', 'time_elapsed', (['self._start'], {}), '(self._start)\n', (26582, 26595), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((32472, 32488), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (32483, 32488), False, 'from collections import defaultdict\n'), ((38449, 38481), 'evalml.utils.logger.log_subtitle', 'log_subtitle', (['logger', '"""Training"""'], {}), "(logger, 'Training')\n", (38461, 38481), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((38948, 39003), 'evalml.utils.logger.log_subtitle', 'log_subtitle', (['logger', '"""Cross Validation"""'], {'underline': '"""-"""'}), "(logger, 'Cross Validation', underline='-')\n", (38960, 39003), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((39138, 39172), 'pandas.DataFrame', 'pd.DataFrame', (['all_objective_scores'], {}), '(all_objective_scores)\n', (39150, 39172), True, 'import pandas as pd\n'), ((40904, 40932), 'copy.deepcopy', 'copy.deepcopy', (['self._results'], {}), '(self._results)\n', (40917, 40932), False, 'import copy\n'), ((8755, 8789), 'evalml.problem_types.handle_problem_types', 'handle_problem_types', (['problem_type'], {}), '(problem_type)\n', (8775, 8789), False, 'from evalml.problem_types import ProblemTypes, handle_problem_types\n'), ((9344, 9405), 'evalml.automl.utils.get_default_primary_search_objective', 'get_default_primary_search_objective', (['self.problem_type.value'], {}), '(self.problem_type.value)\n', (9380, 9405), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((9997, 10035), 'evalml.objectives.get_core_objectives', 'get_core_objectives', (['self.problem_type'], {}), '(self.problem_type)\n', (10016, 10035), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((11491, 11519), 'evalml.utils.convert_to_seconds', 'convert_to_seconds', (['max_time'], {}), '(max_time)\n', (11509, 11519), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((14032, 14094), 'evalml.pipelines.components.utils.get_estimators', 'get_estimators', (['self.problem_type', 'self.allowed_model_families'], {}), '(self.problem_type, self.allowed_model_families)\n', (14046, 14094), False, 'from evalml.pipelines.components.utils import get_estimators\n'), ((17001, 17135), 'evalml.preprocessing.split_data', 'split_data', (['X_shape', 'self.y_train'], {'problem_type': 'self.problem_type', 'test_size': '_ensembling_split_size', 'random_seed': 'self.random_seed'}), '(X_shape, self.y_train, problem_type=self.problem_type, test_size\n =_ensembling_split_size, random_seed=self.random_seed)\n', (17011, 17135), False, 'from evalml.preprocessing import split_data\n'), ((26521, 26532), 'time.time', 'time.time', ([], {}), '()\n', (26530, 26532), False, 'import time\n'), ((29296, 29307), 'time.time', 'time.time', ([], {}), '()\n', (29305, 29307), False, 'import time\n'), ((31329, 31370), 'evalml.pipelines.ModeBaselineBinaryPipeline', 'ModeBaselineBinaryPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31355, 31370), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((37083, 37144), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline not found in automl results"""'], {}), "('Pipeline not found in automl results')\n", (37104, 37144), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((37340, 37426), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline class or parameters not found in automl results"""'], {}), "(\n 'Pipeline class or parameters not found in automl results')\n", (37361, 37426), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((38065, 38108), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline not found"""'], {}), "('Pipeline not found')\n", (38086, 38108), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((39768, 39858), 'pandas.option_context', 'pd.option_context', (['"""display.float_format"""', '"""{:.3f}""".format', '"""expand_frame_repr"""', '(False)'], {}), "('display.float_format', '{:.3f}'.format,\n 'expand_frame_repr', False)\n", (39785, 39858), True, 'import pandas as pd\n'), ((41672, 41712), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'full_rankings_cols'}), '(columns=full_rankings_cols)\n', (41684, 41712), True, 'import pandas as pd\n'), ((42510, 42599), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""automl search must be run before selecting `best_pipeline`."""'], {}), "(\n 'automl search must be run before selecting `best_pipeline`.')\n", (42531, 42599), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((42990, 43041), 'cloudpickle.dump', 'cloudpickle.dump', (['self', 'f'], {'protocol': 'pickle_protocol'}), '(self, f, protocol=pickle_protocol)\n', (43006, 43041), False, 'import cloudpickle\n'), ((43333, 43352), 'cloudpickle.load', 'cloudpickle.load', (['f'], {}), '(f)\n', (43349, 43352), False, 'import cloudpickle\n'), ((10434, 10450), 'evalml.objectives.get_objective', 'get_objective', (['o'], {}), '(o)\n', (10447, 10450), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((14243, 14367), 'evalml.pipelines.utils.make_pipeline', 'make_pipeline', (['self.X_train', 'self.y_train', 'estimator', 'self.problem_type'], {'custom_hyperparameters': 'self.pipeline_parameters'}), '(self.X_train, self.y_train, estimator, self.problem_type,\n custom_hyperparameters=self.pipeline_parameters)\n', (14256, 14367), False, 'from evalml.pipelines.utils import make_pipeline\n'), ((16925, 16957), 'numpy.arange', 'np.arange', (['self.X_train.shape[0]'], {}), '(self.X_train.shape[0])\n', (16934, 16957), True, 'import numpy as np\n'), ((26351, 26498), 'evalml.exceptions.AutoMLSearchException', 'AutoMLSearchException', (['f"""All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}."""'], {}), "(\n f'All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.'\n )\n", (26372, 26498), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((31453, 31498), 'evalml.pipelines.ModeBaselineMulticlassPipeline', 'ModeBaselineMulticlassPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31483, 31498), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((20439, 20478), 'evalml.problem_types.handle_problem_types', 'handle_problem_types', (['self.problem_type'], {}), '(self.problem_type)\n', (20459, 20478), False, 'from evalml.problem_types import ProblemTypes, handle_problem_types\n'), ((20563, 20592), 'evalml.objectives.get_objective', 'get_objective', (['self.objective'], {}), '(self.objective)\n', (20576, 20592), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((31581, 31626), 'evalml.pipelines.MeanBaselineRegressionPipeline', 'MeanBaselineRegressionPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31611, 31626), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((22950, 22961), 'time.time', 'time.time', ([], {}), '()\n', (22959, 22961), False, 'import time\n')]
|
"""Mobjects representing vector fields."""
__all__ = [
"VectorField",
"ArrowVectorField",
"StreamLines",
]
import itertools as it
import random
from math import ceil, floor
from typing import Callable, Iterable, Optional, Sequence, Tuple, Type
import numpy as np
from colour import Color
from PIL import Image
from .. import config
from ..animation.composition import AnimationGroup, Succession
from ..animation.creation import Create
from ..animation.indication import ShowPassingFlash
from ..animation.update import UpdateFromAlphaFunc
from ..constants import OUT, RIGHT, UP
from ..mobject.geometry import Vector
from ..mobject.mobject import Mobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.bezier import interpolate, inverse_interpolate
from ..utils.color import BLUE_E, GREEN, RED, YELLOW, color_to_rgb, rgb_to_color
from ..utils.deprecation import deprecated_params
from ..utils.rate_functions import ease_out_sine, linear
from ..utils.simple_functions import sigmoid
from .types.opengl_vectorized_mobject import OpenGLVMobject
DEFAULT_SCALAR_FIELD_COLORS: list = [BLUE_E, GREEN, YELLOW, RED]
class VectorField(VGroup):
"""A vector field.
Vector fields are based on a function defining a vector at every position.
This class does by default not include any visible elements but provides
methods to move other :class:`~.Mobject` s along the vector field.
Parameters
----------
func
The function defining the rate of change at every position of the `VectorField`.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
**kwargs
):
super().__init__(**kwargs)
self.func = func
if color is None:
self.single_color = False
if color_scheme is None:
def color_scheme(p):
return np.linalg.norm(p)
self.color_scheme = color_scheme # TODO maybe other default for direction?
self.rgbs = np.array(list(map(color_to_rgb, colors)))
def pos_to_rgb(pos: np.ndarray) -> Tuple[float, float, float, float]:
vec = self.func(pos)
color_value = np.clip(
self.color_scheme(vec),
min_color_scheme_value,
max_color_scheme_value,
)
alpha = inverse_interpolate(
min_color_scheme_value,
max_color_scheme_value,
color_value,
)
alpha *= len(self.rgbs) - 1
c1 = self.rgbs[int(alpha)]
c2 = self.rgbs[min(int(alpha + 1), len(self.rgbs) - 1)]
alpha %= 1
return interpolate(c1, c2, alpha)
self.pos_to_rgb = pos_to_rgb
self.pos_to_color = lambda pos: rgb_to_color(self.pos_to_rgb(pos))
else:
self.single_color = True
self.color = color
self.submob_movement_updater = None
@staticmethod
def shift_func(
func: Callable[[np.ndarray], np.ndarray],
shift_vector: np.ndarray,
) -> Callable[[np.ndarray], np.ndarray]:
"""Shift a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The shift to be applied to the vector field.
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The shifted vector field function.
"""
return lambda p: func(p - shift_vector)
@staticmethod
def scale_func(
func: Callable[[np.ndarray], np.ndarray],
scalar: float,
) -> Callable[[np.ndarray], np.ndarray]:
"""Scale a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The scalar to be applied to the vector field.
Examples
--------
.. manim:: ScaleVectorFieldFunction
class ScaleVectorFieldFunction(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1]) * RIGHT + np.cos(pos[0]) * UP
vector_field = ArrowVectorField(func)
self.add(vector_field)
self.wait()
func = VectorField.scale_func(func, 0.5)
self.play(vector_field.animate.become(ArrowVectorField(func)))
self.wait()
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The scaled vector field function.
"""
return lambda p: func(p * scalar)
def nudge(
self,
mob: Mobject,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Nudge a :class:`~.Mobject` along the vector field.
Parameters
----------
mob
The mobject to move along the vector field
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. If `False` the
vector field takes effect on the center of the given
:class:`~.Mobject`. If `True` the vector field takes effect on the
points of the individual points of the :class:`~.Mobject`,
potentially distorting it.
Returns
-------
VectorField
This vector field.
Examples
--------
.. manim:: Nudging
class Nudging(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1] / 2) * RIGHT + np.cos(pos[0] / 2) * UP
vector_field = ArrowVectorField(
func, x_range=[-7, 7, 1], y_range=[-4, 4, 1], length_func=lambda x: x / 2
)
self.add(vector_field)
circle = Circle(radius=2).shift(LEFT)
self.add(circle.copy().set_color(GRAY))
dot = Dot().move_to(circle)
vector_field.nudge(circle, -2, 60, True)
vector_field.nudge(dot, -2, 60)
circle.add_updater(vector_field.get_nudge_updater(pointwise=True))
dot.add_updater(vector_field.get_nudge_updater())
self.add(circle, dot)
self.wait(6)
"""
def runge_kutta(self, p: Sequence[float], step_size: float) -> float:
"""Returns the change in position of a point along a vector field.
Parameters
----------
p
The position of each point being moved along the vector field.
step_size
A scalar that is used to determine how much a point is shifted in a single step.
Returns
-------
float
How much the point is shifted.
"""
k_1 = self.func(p)
k_2 = self.func(p + step_size * (k_1 * 0.5))
k_3 = self.func(p + step_size * (k_2 * 0.5))
k_4 = self.func(p + step_size * k_3)
return step_size / 6.0 * (k_1 + 2.0 * k_2 + 2.0 * k_3 + k_4)
step_size = dt / substeps
for _ in range(substeps):
if pointwise:
mob.apply_function(lambda p: p + runge_kutta(self, p, step_size))
else:
mob.shift(runge_kutta(self, mob.get_center(), step_size))
return self
def nudge_submobjects(
self,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Apply a nudge along the vector field to all submobjects.
Parameters
----------
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
for mob in self.submobjects:
self.nudge(mob, dt, substeps, pointwise)
return self
def get_nudge_updater(
self,
speed: float = 1,
pointwise: bool = False,
) -> Callable[[Mobject, float], Mobject]:
"""Get an update function to move a :class:`~.Mobject` along the vector field.
When used with :meth:`~.Mobject.add_updater`, the mobject will move along the vector field, where its speed is determined by the magnitude of the vector field.
Parameters
----------
speed
At `speed=1` the distance a mobject moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of such a mobject.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
Callable[[Mobject, float], Mobject]
The update function.
"""
return lambda mob, dt: self.nudge(mob, dt * speed, pointwise=pointwise)
def start_submobject_movement(
self,
speed: float = 1,
pointwise: bool = False,
) -> "VectorField":
"""Start continuously moving all submobjects along the vector field.
Calling this method multiple times will result in removing the previous updater created by this method.
Parameters
----------
speed
The speed at which to move the submobjects. See :meth:`get_nudge_updater` for details.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
self.stop_submobject_movement()
self.submob_movement_updater = lambda mob, dt: mob.nudge_submobjects(
dt * speed,
pointwise=pointwise,
)
self.add_updater(self.submob_movement_updater)
return self
def stop_submobject_movement(self) -> "VectorField":
"""Stops the continuous movement started using :meth:`start_submobject_movement`.
Returns
-------
VectorField
This vector field.
"""
self.remove_updater(self.submob_movement_updater)
self.submob_movement_updater = None
return self
def get_colored_background_image(self, sampling_rate: int = 5) -> Image.Image:
"""Generate an image that displays the vector field.
The color at each position is calculated by passing the positing through a
series of steps:
Calculate the vector field function at that position, map that vector to a
single value using `self.color_scheme` and finally generate a color from
that value using the color gradient.
Parameters
----------
sampling_rate
The stepsize at which pixels get included in the image. Lower values give
more accurate results, but may take a long time to compute.
Returns
-------
Image.Imgae
The vector field image.
"""
if self.single_color:
raise ValueError(
"There is no point in generating an image if the vector field uses a single color.",
)
ph = int(config["pixel_height"] / sampling_rate)
pw = int(config["pixel_width"] / sampling_rate)
fw = config["frame_width"]
fh = config["frame_height"]
points_array = np.zeros((ph, pw, 3))
x_array = np.linspace(-fw / 2, fw / 2, pw)
y_array = np.linspace(fh / 2, -fh / 2, ph)
x_array = x_array.reshape((1, len(x_array)))
y_array = y_array.reshape((len(y_array), 1))
x_array = x_array.repeat(ph, axis=0)
y_array.repeat(pw, axis=1) # TODO why not y_array = y_array.repeat(...)?
points_array[:, :, 0] = x_array
points_array[:, :, 1] = y_array
rgbs = np.apply_along_axis(self.pos_to_rgb, 2, points_array)
return Image.fromarray((rgbs * 255).astype("uint8"))
def get_vectorized_rgba_gradient_function(
self,
start: float,
end: float,
colors: Iterable,
):
"""
Generates a gradient of rgbas as a numpy array
Parameters
----------
start
start value used for inverse interpolation at :func:`~.inverse_interpolate`
end
end value used for inverse interpolation at :func:`~.inverse_interpolate`
colors
list of colors to generate the gradient
Returns
-------
function to generate the gradients as numpy arrays representing rgba values
"""
rgbs = np.array([color_to_rgb(c) for c in colors])
def func(values, opacity=1):
alphas = inverse_interpolate(start, end, np.array(values))
alphas = np.clip(alphas, 0, 1)
scaled_alphas = alphas * (len(rgbs) - 1)
indices = scaled_alphas.astype(int)
next_indices = np.clip(indices + 1, 0, len(rgbs) - 1)
inter_alphas = scaled_alphas % 1
inter_alphas = inter_alphas.repeat(3).reshape((len(indices), 3))
result = interpolate(rgbs[indices], rgbs[next_indices], inter_alphas)
result = np.concatenate(
(result, np.full([len(result), 1], opacity)),
axis=1,
)
return result
return func
class ArrowVectorField(VectorField):
"""A :class:`VectorField` represented by a set of change vectors.
Vector fields are always based on a function defining the :class:`~.Vector` at every position.
The values of this functions is displayed as a grid of vectors.
By default the color of each vector is determined by it's magnitude.
Other color schemes can be used however.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
length_func
The function determining the displayed size of the vectors. The actual size
of the vector is passed, the returned value will be used as display size for the
vector. By default this is used to cap the displayed size of vectors to reduce the clutter.
opacity
The opacity of the arrows.
vector_config
Additional arguments to be passed to the :class:`~.Vector` constructor
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(ArrowVectorField(func))
.. manim:: SizingAndSpacing
class SizingAndSpacing(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
vf = ArrowVectorField(func, x_range=[-7, 7, 1])
self.add(vf)
self.wait()
length_func = lambda x: x / 3
vf2 = ArrowVectorField(func, x_range=[-7, 7, 1], length_func=length_func)
self.play(vf.animate.become(vf2))
self.wait()
.. manim:: Coloring
:save_last_frame:
class Coloring(Scene):
def construct(self):
func = lambda pos: pos - LEFT * 5
colors = [RED, YELLOW, BLUE, DARK_GRAY]
min_radius = Circle(radius=2, color=colors[0]).shift(LEFT * 5)
max_radius = Circle(radius=10, color=colors[-1]).shift(LEFT * 5)
vf = ArrowVectorField(
func, min_color_scheme_value=2, max_color_scheme_value=10, colors=colors
)
self.add(vf, min_radius, max_radius)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining Vector positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False, # Automatically True if z_range is set
# Takes in actual norm, spits out displayed norm
length_func: Callable[[float], float] = lambda norm: 0.45 * sigmoid(norm),
opacity: float = 1.0,
vector_config: Optional[dict] = None,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.length_func = length_func
self.opacity = opacity
if vector_config is None:
vector_config = {}
self.vector_config = vector_config
self.func = func
x_range = np.arange(*self.x_range)
y_range = np.arange(*self.y_range)
z_range = np.arange(*self.z_range)
for x, y, z in it.product(x_range, y_range, z_range):
self.add(self.get_vector(x * RIGHT + y * UP + z * OUT))
self.set_opacity(self.opacity)
def get_vector(self, point: np.ndarray):
"""Creates a vector in the vector field.
The created vector is based on the function of the vector field and is
rooted in the given point. Color and length fit the specifications of
this vector field.
Parameters
----------
point
The root point of the vector.
kwargs : Any
Additional arguments to be passed to the :class:`~.Vector` constructor
"""
output = np.array(self.func(point))
norm = np.linalg.norm(output)
if norm != 0:
output *= self.length_func(norm) / norm
vect = Vector(output, **self.vector_config)
vect.shift(point)
if self.single_color:
vect.set_color(self.color)
else:
vect.set_color(self.pos_to_color(point))
return vect
class StreamLines(VectorField):
"""StreamLines represent the flow of a :class:`VectorField` using the trace of moving agents.
Vector fields are always based on a function defining the vector at every position.
The values of this functions is displayed by moving many agents along the vector field
and showing their trace.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
noise_factor
The amount by which the starting position of each agent is altered along each axis. Defaults to :code:`delta_y / 2` if not defined.
n_repeats
The number of agents generated at each starting point.
dt
The factor by which the distance an agent moves per step is stretched. Lower values result in a better approximation of the trajectories in the vector field.
virtual_time
The time the agents get to move in the vector field. Higher values therefore result in longer stream lines. However, this whole time gets simulated upon creation.
max_anchors_per_line
The maximum number of anchors per line. Lines with more anchors get reduced in complexity, not in length.
padding
The distance agents can move out of the generation area before being terminated.
stroke_width
The stroke with of the stream lines.
opacity
The opacity of the stream lines.
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(StreamLines(func))
.. manim:: SpawningAndFlowingArea
:save_last_frame:
class SpawningAndFlowingArea(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0]) * UR + np.cos(pos[1]) * LEFT + pos / 5
stream_lines = StreamLines(
func, x_range=[-3, 3, 0.2], y_range=[-2, 2, 0.2], padding=1
)
spawning_area = Rectangle(width=6, height=4)
flowing_area = Rectangle(width=8, height=6)
labels = [Tex("Spawning Area"), Tex("Flowing Area").shift(DOWN * 2.5)]
for lbl in labels:
lbl.add_background_rectangle(opacity=0.6, buff=0.05)
self.add(stream_lines, spawning_area, flowing_area, *labels)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining stream line starting positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False,
noise_factor: Optional[float] = None,
n_repeats=1,
# Determining how lines are drawn
dt=0.05,
virtual_time=3,
max_anchors_per_line=100,
padding=3,
# Determining stream line appearance:
stroke_width=1,
opacity=1,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.noise_factor = (
noise_factor if noise_factor is not None else self.y_range[2] / 2
)
self.n_repeats = n_repeats
self.virtual_time = virtual_time
self.max_anchors_per_line = max_anchors_per_line
self.padding = padding
self.stroke_width = stroke_width
half_noise = self.noise_factor / 2
np.random.seed(0)
start_points = np.array(
[
(x - half_noise) * RIGHT
+ (y - half_noise) * UP
+ (z - half_noise) * OUT
+ self.noise_factor * np.random.random(3)
for n in range(self.n_repeats)
for x in np.arange(*self.x_range)
for y in np.arange(*self.y_range)
for z in np.arange(*self.z_range)
],
)
def outside_box(p):
return (
p[0] < self.x_range[0] - self.padding
or p[0] > self.x_range[1] + self.padding - self.x_range[2]
or p[1] < self.y_range[0] - self.padding
or p[1] > self.y_range[1] + self.padding - self.y_range[2]
or p[2] < self.z_range[0] - self.padding
or p[2] > self.z_range[1] + self.padding - self.z_range[2]
)
max_steps = ceil(virtual_time / dt) + 1
if not self.single_color:
self.background_img = self.get_colored_background_image()
if config["renderer"] == "opengl":
self.values_to_rgbas = self.get_vectorized_rgba_gradient_function(
min_color_scheme_value,
max_color_scheme_value,
colors,
)
for point in start_points:
points = [point]
for _ in range(max_steps):
last_point = points[-1]
new_point = last_point + dt * func(last_point)
if outside_box(new_point):
break
points.append(new_point)
step = max_steps
if not step:
continue
if config["renderer"] == "opengl":
line = OpenGLVMobject()
else:
line = VMobject()
line.duration = step * dt
step = max(1, int(len(points) / self.max_anchors_per_line))
line.set_points_smoothly(points[::step])
if self.single_color:
line.set_stroke(self.color)
else:
if config["renderer"] == "opengl":
# scaled for compatibility with cairo
line.set_stroke(width=self.stroke_width / 4.0)
norms = np.array(
[np.linalg.norm(self.func(point)) for point in line.points],
)
line.set_rgba_array_direct(
self.values_to_rgbas(norms, opacity),
name="stroke_rgba",
)
else:
if np.any(self.z_range != np.array([0, 0.5, 0.5])):
line.set_stroke(
[self.pos_to_color(p) for p in line.get_anchors()],
)
else:
line.color_using_background_image(self.background_img)
line.set_stroke(width=self.stroke_width, opacity=opacity)
self.add(line)
self.stream_lines = [*self.submobjects]
def create(
self,
lag_ratio: Optional[float] = None,
run_time: Optional[Callable[[float], float]] = None,
**kwargs
) -> AnimationGroup:
"""The creation animation of the stream lines.
The stream lines appear in random order.
Parameters
----------
lag_ratio
The lag ratio of the animation.
If undefined, it will be selected so that the total animation length is 1.5 times the run time of each stream line creation.
run_time
The run time of every single stream line creation. The runtime of the whole animation might be longer due to the `lag_ratio`.
If undefined, the virtual time of the stream lines is used as run time.
Returns
-------
:class:`~.AnimationGroup`
The creation animation of the stream lines.
Examples
--------
.. manim:: StreamLineCreation
class StreamLineCreation(Scene):
def construct(self):
func = lambda pos: (pos[0] * UR + pos[1] * LEFT) - pos
stream_lines = StreamLines(
func,
color=YELLOW,
x_range=[-7, 7, 1],
y_range=[-4, 4, 1],
stroke_width=3,
virtual_time=1, # use shorter lines
max_anchors_per_line=5, # better performance with fewer anchors
)
self.play(stream_lines.create()) # uses virtual_time as run_time
self.wait()
"""
if run_time is None:
run_time = self.virtual_time
if lag_ratio is None:
lag_ratio = run_time / 2 / len(self.submobjects)
animations = [
Create(line, run_time=run_time, **kwargs) for line in self.stream_lines
]
random.shuffle(animations)
return AnimationGroup(*animations, lag_ratio=lag_ratio)
def start_animation(
self,
warm_up=True,
flow_speed: float = 1,
time_width: float = 0.3,
rate_func: Callable[[float], float] = linear,
line_animation_class: Type[ShowPassingFlash] = ShowPassingFlash,
**kwargs
) -> None:
"""Animates the stream lines using an updater.
The stream lines will continuously flow
Parameters
----------
warm_up : bool, optional
If `True` the animation is initialized line by line. Otherwise it starts with all lines shown.
flow_speed
At `flow_speed=1` the distance the flow moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of this flow.
time_width
The proportion of the stream line shown while being animated
rate_func
The rate function of each stream line flashing
line_animation_class
The animation class being used
Examples
--------
.. manim:: ContinuousMotion
class ContinuousMotion(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(func, stroke_width=3, max_anchors_per_line=30)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5)
self.wait(stream_lines.virtual_time / stream_lines.flow_speed)
"""
for line in self.stream_lines:
run_time = line.duration / flow_speed
line.anim = line_animation_class(
line,
run_time=run_time,
rate_func=rate_func,
time_width=time_width,
**kwargs,
)
line.anim.begin()
line.time = random.random() * self.virtual_time
if warm_up:
line.time *= -1
self.add(line.anim.mobject)
def updater(mob, dt):
for line in mob.stream_lines:
line.time += dt * flow_speed
if line.time >= self.virtual_time:
line.time -= self.virtual_time
line.anim.interpolate(np.clip(line.time / line.anim.run_time, 0, 1))
self.add_updater(updater)
self.flow_animation = updater
self.flow_speed = flow_speed
self.time_width = time_width
def end_animation(self) -> AnimationGroup:
"""End the stream line animation smoothly.
Returns an animation resulting in fully displayed stream lines without a noticeable cut.
Returns
-------
:class:`~.AnimationGroup`
The animation fading out the running stream animation.
Raises
------
ValueError
if no stream line animation is running
Examples
--------
.. manim:: EndAnimation
class EndAnimation(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(
func, stroke_width=3, max_anchors_per_line=5, virtual_time=1, color=BLUE
)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5, time_width=0.5)
self.wait(1)
self.play(stream_lines.end_animation())
"""
if self.flow_animation is None:
raise ValueError("You have to start the animation before fading it out.")
def hide_and_wait(mob, alpha):
if alpha == 0:
mob.set_stroke(opacity=0)
elif alpha == 1:
mob.set_stroke(opacity=1)
def finish_updater_cycle(line, alpha):
line.time += dt * self.flow_speed
line.anim.interpolate(min(line.time / line.anim.run_time, 1))
if alpha == 1:
self.remove(line.anim.mobject)
line.anim.finish()
max_run_time = self.virtual_time / self.flow_speed
creation_rate_func = ease_out_sine
creation_staring_speed = creation_rate_func(0.001) * 1000
creation_run_time = (
max_run_time / (1 + self.time_width) * creation_staring_speed
)
# creation_run_time is calculated so that the creation animation starts at the same speed
# as the regular line flash animation but eases out.
dt = 1 / config["frame_rate"]
animations = []
self.remove_updater(self.flow_animation)
self.flow_animation = None
for line in self.stream_lines:
create = Create(
line,
run_time=creation_run_time,
rate_func=creation_rate_func,
)
if line.time <= 0:
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
hide_and_wait,
run_time=-line.time / self.flow_speed,
),
create,
),
)
self.remove(line.anim.mobject)
line.anim.finish()
else:
remaining_time = max_run_time - line.time / self.flow_speed
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
finish_updater_cycle,
run_time=remaining_time,
),
create,
),
)
return AnimationGroup(*animations)
# TODO: Variant of StreamLines that is able to respond to changes in the vector field function
|
[
"numpy.clip",
"math.ceil",
"random.shuffle",
"math.floor",
"numpy.random.random",
"itertools.product",
"numpy.array",
"numpy.zeros",
"numpy.apply_along_axis",
"numpy.linspace",
"numpy.random.seed",
"numpy.linalg.norm",
"random.random",
"numpy.arange"
] |
[((13292, 13313), 'numpy.zeros', 'np.zeros', (['(ph, pw, 3)'], {}), '((ph, pw, 3))\n', (13300, 13313), True, 'import numpy as np\n'), ((13332, 13364), 'numpy.linspace', 'np.linspace', (['(-fw / 2)', '(fw / 2)', 'pw'], {}), '(-fw / 2, fw / 2, pw)\n', (13343, 13364), True, 'import numpy as np\n'), ((13383, 13415), 'numpy.linspace', 'np.linspace', (['(fh / 2)', '(-fh / 2)', 'ph'], {}), '(fh / 2, -fh / 2, ph)\n', (13394, 13415), True, 'import numpy as np\n'), ((13744, 13797), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.pos_to_rgb', '(2)', 'points_array'], {}), '(self.pos_to_rgb, 2, points_array)\n', (13763, 13797), True, 'import numpy as np\n'), ((20776, 20800), 'numpy.arange', 'np.arange', (['*self.x_range'], {}), '(*self.x_range)\n', (20785, 20800), True, 'import numpy as np\n'), ((20819, 20843), 'numpy.arange', 'np.arange', (['*self.y_range'], {}), '(*self.y_range)\n', (20828, 20843), True, 'import numpy as np\n'), ((20862, 20886), 'numpy.arange', 'np.arange', (['*self.z_range'], {}), '(*self.z_range)\n', (20871, 20886), True, 'import numpy as np\n'), ((20910, 20947), 'itertools.product', 'it.product', (['x_range', 'y_range', 'z_range'], {}), '(x_range, y_range, z_range)\n', (20920, 20947), True, 'import itertools as it\n'), ((21607, 21629), 'numpy.linalg.norm', 'np.linalg.norm', (['output'], {}), '(output)\n', (21621, 21629), True, 'import numpy as np\n'), ((27667, 27684), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (27681, 27684), True, 'import numpy as np\n'), ((32745, 32771), 'random.shuffle', 'random.shuffle', (['animations'], {}), '(animations)\n', (32759, 32771), False, 'import random\n'), ((14691, 14712), 'numpy.clip', 'np.clip', (['alphas', '(0)', '(1)'], {}), '(alphas, 0, 1)\n', (14698, 14712), True, 'import numpy as np\n'), ((28612, 28635), 'math.ceil', 'ceil', (['(virtual_time / dt)'], {}), '(virtual_time / dt)\n', (28616, 28635), False, 'from math import ceil, floor\n'), ((14652, 14668), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (14660, 14668), True, 'import numpy as np\n'), ((19628, 19661), 'math.floor', 'floor', (["(-config['frame_width'] / 2)"], {}), "(-config['frame_width'] / 2)\n", (19633, 19661), False, 'from math import ceil, floor\n'), ((19675, 19706), 'math.ceil', 'ceil', (["(config['frame_width'] / 2)"], {}), "(config['frame_width'] / 2)\n", (19679, 19706), False, 'from math import ceil, floor\n'), ((19766, 19800), 'math.floor', 'floor', (["(-config['frame_height'] / 2)"], {}), "(-config['frame_height'] / 2)\n", (19771, 19800), False, 'from math import ceil, floor\n'), ((19814, 19846), 'math.ceil', 'ceil', (["(config['frame_height'] / 2)"], {}), "(config['frame_height'] / 2)\n", (19818, 19846), False, 'from math import ceil, floor\n'), ((26366, 26399), 'math.floor', 'floor', (["(-config['frame_width'] / 2)"], {}), "(-config['frame_width'] / 2)\n", (26371, 26399), False, 'from math import ceil, floor\n'), ((26413, 26444), 'math.ceil', 'ceil', (["(config['frame_width'] / 2)"], {}), "(config['frame_width'] / 2)\n", (26417, 26444), False, 'from math import ceil, floor\n'), ((26504, 26538), 'math.floor', 'floor', (["(-config['frame_height'] / 2)"], {}), "(-config['frame_height'] / 2)\n", (26509, 26538), False, 'from math import ceil, floor\n'), ((26552, 26584), 'math.ceil', 'ceil', (["(config['frame_height'] / 2)"], {}), "(config['frame_height'] / 2)\n", (26556, 26584), False, 'from math import ceil, floor\n'), ((34762, 34777), 'random.random', 'random.random', ([], {}), '()\n', (34775, 34777), False, 'import random\n'), ((2995, 3012), 'numpy.linalg.norm', 'np.linalg.norm', (['p'], {}), '(p)\n', (3009, 3012), True, 'import numpy as np\n'), ((27984, 28008), 'numpy.arange', 'np.arange', (['*self.x_range'], {}), '(*self.x_range)\n', (27993, 28008), True, 'import numpy as np\n'), ((28034, 28058), 'numpy.arange', 'np.arange', (['*self.y_range'], {}), '(*self.y_range)\n', (28043, 28058), True, 'import numpy as np\n'), ((28084, 28108), 'numpy.arange', 'np.arange', (['*self.z_range'], {}), '(*self.z_range)\n', (28093, 28108), True, 'import numpy as np\n'), ((35152, 35197), 'numpy.clip', 'np.clip', (['(line.time / line.anim.run_time)', '(0)', '(1)'], {}), '(line.time / line.anim.run_time, 0, 1)\n', (35159, 35197), True, 'import numpy as np\n'), ((27892, 27911), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (27908, 27911), True, 'import numpy as np\n'), ((30366, 30389), 'numpy.array', 'np.array', (['[0, 0.5, 0.5]'], {}), '([0, 0.5, 0.5])\n', (30374, 30389), True, 'import numpy as np\n')]
|
import sys
import typing
import numpy as np
def solve(
n: int,
g: np.array,
) -> typing.NoReturn:
indeg = np.zeros(
n,
dtype=np.int64,
)
for v in g[:, 1]:
indeg[v] += 1
g = g[g[:, 0].argsort()]
i = np.searchsorted(
g[:, 0],
np.arange(n + 1)
)
q = [
v for v in range(n)
if not indeg[v]
]
dist = np.zeros(
n,
dtype=np.int64,
)
for u in q:
for j in range(
i[u], i[u + 1],
):
v = g[j, 1]
indeg[v] -= 1
dist[v] = max(
dist[v],
dist[u] + 1,
)
if indeg[v]: continue
q.append(v)
print(dist.max())
def main() -> typing.NoReturn:
n, m = map(
int, input().split(),
)
g = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2) - 1
solve(n, g)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import i8, njit
from numba.pycc import CC
cc = CC('my_module')
fn = solve
signature = (i8, i8[:, :])
cc.export(
fn.__name__,
signature,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
|
[
"numba.pycc.CC",
"numpy.zeros",
"my_module.solve",
"sys.stdin.read",
"numpy.arange"
] |
[((115, 142), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (123, 142), True, 'import numpy as np\n'), ((347, 374), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (355, 374), True, 'import numpy as np\n'), ((791, 802), 'my_module.solve', 'solve', (['n', 'g'], {}), '(n, g)\n', (796, 802), False, 'from my_module import solve\n'), ((912, 927), 'numba.pycc.CC', 'CC', (['"""my_module"""'], {}), "('my_module')\n", (914, 927), False, 'from numba.pycc import CC\n'), ((261, 277), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (270, 277), True, 'import numpy as np\n'), ((721, 737), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (735, 737), False, 'import sys\n')]
|
'''
Unit tests table.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_equal
__docformat__ = "restructuredtext en"
from statsmodels.iolib.table import Cell, SimpleTable
from statsmodels.iolib.table import default_latex_fmt
from statsmodels.iolib.table import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
def custom_labeller(cell):
if cell.data is np.nan:
return 'missing'
class TestCell(object):
def test_celldata(self):
celldata = cell0data, cell1data, row1data[0], row1data[1]
cells = [Cell(datum, datatype=i % 2)
for i, datum in enumerate(celldata)]
for cell, datum in zip(cells, celldata):
assert_equal(cell.data, datum)
class TestSimpleTable(object):
def test_txt_fmt1(self):
# Limited test of custom txt_fmt
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * 0.00 * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text()
#print('actual')
#print(actual)
#print('desired')
#print(desired)
assert_equal(actual, desired)
def test_ltx_fmt1(self):
# Limited test of custom ltx_fmt
desired = r"""
\begin{center}
\begin{tabular}{lcc}
\toprule
& \textbf{header1} & \textbf{header2} \\
\midrule
\textbf{stub1} & 0.0 & 1 \\
\textbf{stub2} & 2 & 3.333 \\
\bottomrule
\end{tabular}
\end{center}
"""
actual = '\n%s\n' % tbl.as_latex_tabular()
#print(actual)
#print(desired)
assert_equal(actual, desired)
def test_html_fmt1(self):
# Limited test of custom html_fmt
desired = """
<table class="simpletable">
<tr>
<td></td> <th>header1</th> <th>header2</th>
</tr>
<tr>
<th>stub1</th> <td>0.0</td> <td>1</td>
</tr>
<tr>
<th>stub2</th> <td>2</td> <td>3.333</td>
</tr>
</table>
"""
#the previous has significant trailing whitespace that got removed
#desired = '''\n<table class="simpletable">\n<tr>\n <td></td> <th>header1</th> <th>header2</th>\n</tr>\n<tr>\n <th>stub1</th> <td>0.0</td> <td>1</td> \n</tr>\n<tr>\n <th>stub2</th> <td>2</td> <td>3.333</td> \n</tr>\n</table>\n'''
actual = '\n%s\n' % tbl.as_html()
actual = '\n'.join((line.rstrip() for line in actual.split('\n')))
#print(actual)
#print(desired)
#print len(actual), len(desired)
assert_equal(actual, desired)
def test_customlabel(self):
# Limited test of custom custom labeling
tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)
tbl[1][1].data = np.nan
tbl.label_cells(custom_labeller)
#print([[c.datatype for c in row] for row in tbl])
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * -- * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text(missing='--')
assert_equal(actual, desired)
|
[
"numpy.testing.assert_equal",
"statsmodels.iolib.table.Cell",
"statsmodels.iolib.table.default_latex_fmt.copy",
"statsmodels.iolib.table.default_html_fmt.copy",
"statsmodels.compat.python.zip",
"statsmodels.iolib.table.SimpleTable"
] |
[((619, 643), 'statsmodels.iolib.table.default_latex_fmt.copy', 'default_latex_fmt.copy', ([], {}), '()\n', (641, 643), False, 'from statsmodels.iolib.table import default_latex_fmt\n'), ((656, 679), 'statsmodels.iolib.table.default_html_fmt.copy', 'default_html_fmt.copy', ([], {}), '()\n', (677, 679), False, 'from statsmodels.iolib.table import default_html_fmt\n'), ((1318, 1427), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['table1data', 'test1header', 'test1stubs'], {'txt_fmt': 'txt_fmt1', 'ltx_fmt': 'ltx_fmt1', 'html_fmt': 'html_fmt1'}), '(table1data, test1header, test1stubs, txt_fmt=txt_fmt1, ltx_fmt=\n ltx_fmt1, html_fmt=html_fmt1)\n', (1329, 1427), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((1756, 1776), 'statsmodels.compat.python.zip', 'zip', (['cells', 'celldata'], {}), '(cells, celldata)\n', (1759, 1776), False, 'from statsmodels.compat.python import zip\n'), ((2278, 2307), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (2290, 2307), False, 'from numpy.testing import assert_equal\n'), ((2775, 2804), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (2787, 2804), False, 'from numpy.testing import assert_equal\n'), ((3673, 3702), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (3685, 3702), False, 'from numpy.testing import assert_equal\n'), ((3799, 3865), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['table1data', 'test1header', 'test1stubs'], {'txt_fmt': 'txt_fmt1'}), '(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)\n', (3810, 3865), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((4266, 4295), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (4278, 4295), False, 'from numpy.testing import assert_equal\n'), ((1647, 1674), 'statsmodels.iolib.table.Cell', 'Cell', (['datum'], {'datatype': '(i % 2)'}), '(datum, datatype=i % 2)\n', (1651, 1674), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((1790, 1820), 'numpy.testing.assert_equal', 'assert_equal', (['cell.data', 'datum'], {}), '(cell.data, datum)\n', (1802, 1820), False, 'from numpy.testing import assert_equal\n')]
|
from matplotlib import colors
import numpy as np
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.vcenter, self.vmax], [self.vmin, self.vcenter, self.vmax]
return np.ma.masked_array(np.interp(value, x, y))
|
[
"matplotlib.colors.Normalize.__init__",
"numpy.interp"
] |
[((417, 466), 'matplotlib.colors.Normalize.__init__', 'colors.Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (442, 466), False, 'from matplotlib import colors\n'), ((737, 759), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (746, 759), True, 'import numpy as np\n')]
|
from typing import Tuple
import torch
from torch.autograd import Function
import torch.nn as nn
from metrics.pointops import pointops_cuda
import numpy as np
class FurthestSampling(Function):
@staticmethod
def forward(ctx, xyz, m):
"""
input: xyz: (b, n, 3) and n > m, m: int32
output: idx: (b, m)
"""
assert xyz.is_contiguous()
b, n, _ = xyz.size()
idx = torch.cuda.IntTensor(b, m)
temp = torch.cuda.FloatTensor(b, n).fill_(1e10)
pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx)
return idx
@staticmethod
def backward(xyz, a=None):
return None, None
furthestsampling = FurthestSampling.apply
class Gathering(Function):
@staticmethod
def forward(ctx, features, idx):
"""
input: features: (b, c, n), idx : (b, m) tensor
output: (b, c, m)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
m = idx.size(1)
output = torch.cuda.FloatTensor(b, c, m)
pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output)
ctx.for_backwards = (idx, c, n)
return output
@staticmethod
def backward(ctx, grad_out):
idx, c, n = ctx.for_backwards
b, m = idx.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data)
return grad_features, None
gathering = Gathering.apply
class NearestNeighbor(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
input: unknown: (b, n, 3), known: (b, m, 3)
output: dist2: (b, n, 3) l2 distance to the three nearest neighbors
idx: (b, n, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
b, n, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(b, n, 3)
idx = torch.cuda.IntTensor(b, n, 3)
pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
nearestneighbor = NearestNeighbor.apply
class Interpolation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
input: features: (b, c, m) features descriptors to be interpolated from
idx: (b, n, 3) three nearest neighbors of the target features in features
weight: (b, n, 3) weights
output: (b, c, n) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
b, c, m = features.size()
n = idx.size(1)
ctx.interpolation_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(b, c, n)
pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, n)
output: grad_features: (b, c, m), None, None
"""
idx, weight, m = ctx.interpolation_for_backward
b, c, n = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
interpolation = Interpolation.apply
class Grouping(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.FloatTensor(b, c, m, nsample)
pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output)
ctx.for_backwards = (idx, n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, m, nsample)
output: (b, c, n), None
"""
idx, n = ctx.for_backwards
b, c, m, nsample = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping = Grouping.apply
class GroupingInt(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.LongTensor(b, c, m, nsample)
pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output)
return output
@staticmethod
def backward(ctx, a=None):
return None, None
grouping_int = GroupingInt.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
input: radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: torch.Tensor, (b, n, 3) xyz coordinates of the features
new_xyz: torch.Tensor, (b, m, 3) centers of the ball query
output: (b, m, nsample) tensor with the indicies of the features that form the query balls
"""
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, n, _ = xyz.size()
m = new_xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ballquery = BallQuery.apply
class FeatureDistribute(Function):
@staticmethod
def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param max_xyz: (b, n, 3)
:param xyz: (b, m, 3)
:return: distribute_idx: (b, m)
"""
assert max_xyz.is_contiguous()
assert xyz.is_contiguous()
b, n, _ = max_xyz.size()
m = xyz.size(1)
distribute_idx = torch.cuda.IntTensor(b, m).zero_()
pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx)
return distribute_idx
@staticmethod
def backward(ctx, a=None):
return None, None
featuredistribute = FeatureDistribute.apply
class FeatureGather(Function):
@staticmethod
def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param max_feature: (b, c, n)
:param distribute_idx: (b, m)
:return: distribute_feature: (b, c, m)
'''
assert max_feature.is_contiguous()
assert distribute_idx.is_contiguous()
b, c, n = max_feature.size()
m = distribute_idx.size(1)
distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_()
pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature)
ctx.for_backwards = (distribute_idx, n)
return distribute_feature
@staticmethod
def backward(ctx, grad_distribute_feature: torch.Tensor):
'''
:param ctx:
:param grad_distribute_feature: (b, c, m)
:return: grad_max_feature: (b, c, n), None
'''
distribute_idx, n = ctx.for_backwards
b, c, m = grad_distribute_feature.size()
grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_()
grad_distribute_feature_data = grad_distribute_feature.data.contiguous()
pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data)
return grad_max_feature, None
featuregather = FeatureGather.apply
class LabelStatBallRange(Function):
@staticmethod
def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param radius:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
labelstat_ballrange = LabelStatBallRange.apply
class LabelStatIdx(Function):
@staticmethod
def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param nsample:
:param label_stat: (b, n, nclass)
:param idx: (b, m, nsample)
:return: new_label_stat: (b, m, nclass)
'''
assert label_stat.is_contiguous()
assert idx.is_contiguous()
b, n, nclass = label_stat.size()
m = idx.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None
labelstat_idx = LabelStatIdx.apply
class LabelStatAndBallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor):
'''
:param ctx:
:param radius:
:param nsample:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass) idx: (b, m, nsample)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat)
return new_label_stat, idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None, None, None
labelstat_and_ballquery = LabelStatAndBallQuery.apply
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
import numpy as np
return torch.clamp(dist, 0.0, np.inf)
class KNNQueryNaive(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 0:nsample].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_naive = KNNQueryNaive.apply
class KNNQuery(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
( dist2: (b, m, nsample) )
"""
if new_xyz is None:
new_xyz = xyz
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, m, _ = new_xyz.size()
n = xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
dist2 = torch.cuda.FloatTensor(b, m, nsample).zero_()
pointops_cuda.knnquery_cuda(b, n, m, nsample, xyz, new_xyz, idx, dist2)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None
knnquery = KNNQuery.apply
class KNNQueryExclude(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: new_features: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 1:nsample+1].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_exclude = KNNQueryExclude.apply
class Le_QueryAndGroup_SameSize(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_SameSize, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, n, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
assert xyz.size() == new_xyz.size()
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroup_Dilate(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup_Dilate, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, 2*self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(2*self.nsample, xyz, new_xyz) # (b, m, nsample)
idx2 = np.array([i for i in range(2*self.nsample)])
np.random.shuffle(idx2)
idx2 = idx2[:self.nsample]
idx = idx[:, :, idx2]
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class Le_QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class Gen_QueryAndGroupXYZ(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Gen_QueryAndGroupXYZ, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
#def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
#if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous() # BxNx3 -> Bx3xN
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
return grouped_xyz
class Le_QueryAndGroup_OnlyFeature(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_OnlyFeature, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
#xyz_trans = xyz.transpose(1, 2).contiguous()
#grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
#grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
"""
Groups all features
"""
def __init__(self, use_xyz: bool = True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: ignored torch
features: (b, c, n) descriptors of the features
output: new_features: (b, c+3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, 1, n)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
|
[
"metrics.pointops.pointops_cuda.gathering_backward_cuda",
"torch.cuda.LongTensor",
"metrics.pointops.pointops_cuda.featuregather_forward_cuda",
"metrics.pointops.pointops_cuda.nearestneighbor_cuda",
"metrics.pointops.pointops_cuda.gathering_forward_cuda",
"torch.sqrt",
"metrics.pointops.pointops_cuda.labelstat_and_ballquery_cuda",
"metrics.pointops.pointops_cuda.grouping_int_forward_cuda",
"metrics.pointops.pointops_cuda.featuregather_backward_cuda",
"metrics.pointops.pointops_cuda.labelstat_idx_cuda",
"torch.cuda.IntTensor",
"metrics.pointops.pointops_cuda.grouping_forward_cuda",
"metrics.pointops.pointops_cuda.knnquery_cuda",
"metrics.pointops.pointops_cuda.interpolation_forward_cuda",
"torch.sort",
"metrics.pointops.pointops_cuda.interpolation_backward_cuda",
"metrics.pointops.pointops_cuda.grouping_backward_cuda",
"torch.transpose",
"torch.clamp",
"torch.cat",
"torch.cuda.FloatTensor",
"metrics.pointops.pointops_cuda.ballquery_cuda",
"metrics.pointops.pointops_cuda.furthestsampling_cuda",
"metrics.pointops.pointops_cuda.labelstat_ballrange_cuda",
"metrics.pointops.pointops_cuda.featuredistribute_cuda",
"torch.mm",
"numpy.random.shuffle"
] |
[((12487, 12517), 'torch.clamp', 'torch.clamp', (['dist', '(0.0)', 'np.inf'], {}), '(dist, 0.0, np.inf)\n', (12498, 12517), False, 'import torch\n'), ((425, 451), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm'], {}), '(b, m)\n', (445, 451), False, 'import torch\n'), ((516, 576), 'metrics.pointops.pointops_cuda.furthestsampling_cuda', 'pointops_cuda.furthestsampling_cuda', (['b', 'n', 'm', 'xyz', 'temp', 'idx'], {}), '(b, n, m, xyz, temp, idx)\n', (551, 576), False, 'from metrics.pointops import pointops_cuda\n'), ((1055, 1086), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (1077, 1086), False, 'import torch\n'), ((1095, 1166), 'metrics.pointops.pointops_cuda.gathering_forward_cuda', 'pointops_cuda.gathering_forward_cuda', (['b', 'c', 'n', 'm', 'features', 'idx', 'output'], {}), '(b, c, n, m, features, idx, output)\n', (1131, 1166), False, 'from metrics.pointops import pointops_cuda\n'), ((1468, 1561), 'metrics.pointops.pointops_cuda.gathering_backward_cuda', 'pointops_cuda.gathering_backward_cuda', (['b', 'c', 'n', 'm', 'grad_out_data', 'idx', 'grad_features.data'], {}), '(b, c, n, m, grad_out_data, idx,\n grad_features.data)\n', (1505, 1561), False, 'from metrics.pointops import pointops_cuda\n'), ((2202, 2233), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'n', '(3)'], {}), '(b, n, 3)\n', (2224, 2233), False, 'import torch\n'), ((2248, 2277), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'n', '(3)'], {}), '(b, n, 3)\n', (2268, 2277), False, 'import torch\n'), ((2286, 2357), 'metrics.pointops.pointops_cuda.nearestneighbor_cuda', 'pointops_cuda.nearestneighbor_cuda', (['b', 'n', 'm', 'unknown', 'known', 'dist2', 'idx'], {}), '(b, n, m, unknown, known, dist2, idx)\n', (2320, 2357), False, 'from metrics.pointops import pointops_cuda\n'), ((3276, 3307), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (3298, 3307), False, 'import torch\n'), ((3316, 3403), 'metrics.pointops.pointops_cuda.interpolation_forward_cuda', 'pointops_cuda.interpolation_forward_cuda', (['b', 'c', 'm', 'n', 'features', 'idx', 'weight', 'output'], {}), '(b, c, m, n, features, idx, weight,\n output)\n', (3356, 3403), False, 'from metrics.pointops import pointops_cuda\n'), ((3864, 3969), 'metrics.pointops.pointops_cuda.interpolation_backward_cuda', 'pointops_cuda.interpolation_backward_cuda', (['b', 'c', 'n', 'm', 'grad_out_data', 'idx', 'weight', 'grad_features.data'], {}), '(b, c, n, m, grad_out_data, idx,\n weight, grad_features.data)\n', (3905, 3969), False, 'from metrics.pointops import pointops_cuda\n'), ((4499, 4539), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm', 'nsample'], {}), '(b, c, m, nsample)\n', (4521, 4539), False, 'import torch\n'), ((4548, 4627), 'metrics.pointops.pointops_cuda.grouping_forward_cuda', 'pointops_cuda.grouping_forward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'features', 'idx', 'output'], {}), '(b, c, n, m, nsample, features, idx, output)\n', (4583, 4627), False, 'from metrics.pointops import pointops_cuda\n'), ((5091, 5192), 'metrics.pointops.pointops_cuda.grouping_backward_cuda', 'pointops_cuda.grouping_backward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'grad_out_data', 'idx', 'grad_features.data'], {}), '(b, c, n, m, nsample, grad_out_data,\n idx, grad_features.data)\n', (5127, 5192), False, 'from metrics.pointops import pointops_cuda\n'), ((5709, 5748), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['b', 'c', 'm', 'nsample'], {}), '(b, c, m, nsample)\n', (5730, 5748), False, 'import torch\n'), ((5757, 5844), 'metrics.pointops.pointops_cuda.grouping_int_forward_cuda', 'pointops_cuda.grouping_int_forward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'features', 'idx', 'output'], {}), '(b, c, n, m, nsample, features, idx,\n output)\n', (5796, 5844), False, 'from metrics.pointops import pointops_cuda\n'), ((6718, 6791), 'metrics.pointops.pointops_cuda.ballquery_cuda', 'pointops_cuda.ballquery_cuda', (['b', 'n', 'm', 'radius', 'nsample', 'new_xyz', 'xyz', 'idx'], {}), '(b, n, m, radius, nsample, new_xyz, xyz, idx)\n', (6746, 6791), False, 'from metrics.pointops import pointops_cuda\n'), ((7410, 7485), 'metrics.pointops.pointops_cuda.featuredistribute_cuda', 'pointops_cuda.featuredistribute_cuda', (['b', 'n', 'm', 'max_xyz', 'xyz', 'distribute_idx'], {}), '(b, n, m, max_xyz, xyz, distribute_idx)\n', (7446, 7485), False, 'from metrics.pointops import pointops_cuda\n'), ((8188, 8293), 'metrics.pointops.pointops_cuda.featuregather_forward_cuda', 'pointops_cuda.featuregather_forward_cuda', (['b', 'n', 'm', 'c', 'max_feature', 'distribute_idx', 'distribute_feature'], {}), '(b, n, m, c, max_feature,\n distribute_idx, distribute_feature)\n', (8228, 8293), False, 'from metrics.pointops import pointops_cuda\n'), ((8852, 8978), 'metrics.pointops.pointops_cuda.featuregather_backward_cuda', 'pointops_cuda.featuregather_backward_cuda', (['b', 'n', 'm', 'c', 'grad_distribute_feature_data', 'distribute_idx', 'grad_max_feature.data'], {}), '(b, n, m, c,\n grad_distribute_feature_data, distribute_idx, grad_max_feature.data)\n', (8893, 8978), False, 'from metrics.pointops import pointops_cuda\n'), ((9710, 9819), 'metrics.pointops.pointops_cuda.labelstat_ballrange_cuda', 'pointops_cuda.labelstat_ballrange_cuda', (['b', 'n', 'm', 'radius', 'nclass', 'new_xyz', 'xyz', 'label_stat', 'new_label_stat'], {}), '(b, n, m, radius, nclass, new_xyz,\n xyz, label_stat, new_label_stat)\n', (9748, 9819), False, 'from metrics.pointops import pointops_cuda\n'), ((10543, 10638), 'metrics.pointops.pointops_cuda.labelstat_idx_cuda', 'pointops_cuda.labelstat_idx_cuda', (['b', 'n', 'm', 'nsample', 'nclass', 'label_stat', 'idx', 'new_label_stat'], {}), '(b, n, m, nsample, nclass, label_stat, idx,\n new_label_stat)\n', (10575, 10638), False, 'from metrics.pointops import pointops_cuda\n'), ((11550, 11677), 'metrics.pointops.pointops_cuda.labelstat_and_ballquery_cuda', 'pointops_cuda.labelstat_and_ballquery_cuda', (['b', 'n', 'm', 'radius', 'nsample', 'nclass', 'new_xyz', 'xyz', 'label_stat', 'idx', 'new_label_stat'], {}), '(b, n, m, radius, nsample, nclass,\n new_xyz, xyz, label_stat, idx, new_label_stat)\n', (11592, 11677), False, 'from metrics.pointops import pointops_cuda\n'), ((12246, 12270), 'torch.transpose', 'torch.transpose', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (12261, 12270), False, 'import torch\n'), ((12340, 12364), 'torch.transpose', 'torch.transpose', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (12355, 12364), False, 'import torch\n'), ((13671, 13694), 'torch.sort', 'torch.sort', (['dist'], {'dim': '(2)'}), '(dist, dim=2)\n', (13681, 13694), False, 'import torch\n'), ((14633, 14704), 'metrics.pointops.pointops_cuda.knnquery_cuda', 'pointops_cuda.knnquery_cuda', (['b', 'n', 'm', 'nsample', 'xyz', 'new_xyz', 'idx', 'dist2'], {}), '(b, n, m, nsample, xyz, new_xyz, idx, dist2)\n', (14660, 14704), False, 'from metrics.pointops import pointops_cuda\n'), ((15997, 16020), 'torch.sort', 'torch.sort', (['dist'], {'dim': '(2)'}), '(dist, dim=2)\n', (16007, 16020), False, 'import torch\n'), ((21732, 21755), 'numpy.random.shuffle', 'np.random.shuffle', (['idx2'], {}), '(idx2)\n', (21749, 21755), True, 'import numpy as np\n'), ((2373, 2390), 'torch.sqrt', 'torch.sqrt', (['dist2'], {}), '(dist2)\n', (2383, 2390), False, 'import torch\n'), ((12436, 12452), 'torch.mm', 'torch.mm', (['x', 'y_t'], {}), '(x, y_t)\n', (12444, 12452), False, 'import torch\n'), ((467, 495), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'n'], {}), '(b, n)\n', (489, 495), False, 'import torch\n'), ((1369, 1400), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (1391, 1400), False, 'import torch\n'), ((3765, 3796), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (3787, 3796), False, 'import torch\n'), ((4992, 5023), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (5014, 5023), False, 'import torch\n'), ((6666, 6701), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (6686, 6701), False, 'import torch\n'), ((7367, 7393), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm'], {}), '(b, m)\n', (7387, 7393), False, 'import torch\n'), ((8140, 8171), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (8162, 8171), False, 'import torch\n'), ((8723, 8754), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (8745, 8754), False, 'import torch\n'), ((9659, 9693), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (9679, 9693), False, 'import torch\n'), ((10492, 10526), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (10512, 10526), False, 'import torch\n'), ((11440, 11474), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (11460, 11474), False, 'import torch\n'), ((11497, 11532), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (11517, 11532), False, 'import torch\n'), ((14519, 14554), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (14539, 14554), False, 'import torch\n'), ((14579, 14616), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (14601, 14616), False, 'import torch\n'), ((20079, 20128), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (20088, 20128), False, 'import torch\n'), ((22265, 22314), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (22274, 22314), False, 'import torch\n'), ((29001, 29050), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (29010, 29050), False, 'import torch\n')]
|
import csv
import math
import numpy as np
import pandas
import scipy.optimize
import sys
import argparse
def ineq_constraint_1(v):
return np.array([vi for vi in v])
def ineq_constraint_2(v):
return np.array([-vi + 30 for vi in v])
class WeightAverage:
def __init__(self, mean, csv):
self.df = pandas.read_csv(csv)
self.course = self.df['name']
self.expected_mean = mean
self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0]
self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))])
self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0]
self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0]
self.tot_credits = sum(self.owned_credits) + sum(self.credits)
def weight_average(self, v):
term1 = 0
term2 = 0
for i in range(0, len(self.owned_grades)):
term1 = term1 + self.owned_grades[i] * self.owned_credits[i]
for i in range(0, len(v)):
term2 = term2 + v[i] * self.credits[i]
return (term1 + term2) / self.tot_credits
def eq_constraint(self, v):
return self.weight_average(v) - self.expected_mean
def solve(self):
cons = (
{'type': 'eq', 'fun': self.eq_constraint},
{'type': 'ineq', 'fun': ineq_constraint_1},
{'type': 'ineq', 'fun': ineq_constraint_2})
res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons)
if not res.success:
return None
return res.x
def error_no_solution():
print("Mean not possible with current vote :(")
exit(0)
def output_result(solver, sol):
avg = solver.weight_average(sol)
df = solver.df
print(f"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110")
if sol is None:
print("Not Possible with current grades :(")
exit()
for index, row in df.query('grade > 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}")
i = 0
for index, row in df.query('grade == 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}")
i += 1
return 0
def main():
name = "calcGrades"
description = """CalcGrades is an utility which purpose is to compute the minimum
grades required to get a certain weight average of the grades over the credits,
given the desired output and the grades already owned."""
parser = argparse.ArgumentParser(name, description=description)
parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean')
parser.add_argument('--file',dest='file', default='courses.csv', type=str,
help='path to the csv file containing the courses (default: courses.csv)')
parser.add_argument('--floor', default=False, action='store_true',
help='apply floor operation instead of round to solution')
parser.add_argument('--ceil', default=False, action='store_true',
help='apply ceil operation instead of round to solution')
args = parser.parse_args()
mean = args.mean
courses = args.file
solver = WeightAverage(mean, courses)
sol = solver.solve()
if sol is None:
error_no_solution()
if args.ceil:
sol = [math.ceil(x) for x in sol]
elif args.floor:
sol = [math.floor(x) for x in sol]
else:
sol = [round(x) for x in sol]
output_result(solver, sol)
return 0
if __name__ == '__main__':
main()
|
[
"math.ceil",
"argparse.ArgumentParser",
"math.floor",
"pandas.read_csv",
"numpy.array"
] |
[((144, 170), 'numpy.array', 'np.array', (['[vi for vi in v]'], {}), '([vi for vi in v])\n', (152, 170), True, 'import numpy as np\n'), ((210, 244), 'numpy.array', 'np.array', (['[(-vi + 30) for vi in v]'], {}), '([(-vi + 30) for vi in v])\n', (218, 244), True, 'import numpy as np\n'), ((2690, 2744), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['name'], {'description': 'description'}), '(name, description=description)\n', (2713, 2744), False, 'import argparse\n'), ((320, 340), 'pandas.read_csv', 'pandas.read_csv', (['csv'], {}), '(csv)\n', (335, 340), False, 'import pandas\n'), ((3547, 3559), 'math.ceil', 'math.ceil', (['x'], {}), '(x)\n', (3556, 3559), False, 'import math\n'), ((3610, 3623), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (3620, 3623), False, 'import math\n')]
|
# pylint: disable=no-self-use,invalid-name
import numpy as np
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBagOfWordCountsTokenEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
def test_forward_calculates_bow_properly(self):
params = Params({})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_projects_properly(self):
params = Params({"projection_dim": 50})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([self.vocab.get_token_index(x) for x in ["1", "2", "3"]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
|
[
"allennlp.common.Params",
"allennlp.data.Vocabulary",
"torch.from_numpy",
"numpy.array",
"allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params"
] |
[((484, 496), 'allennlp.data.Vocabulary', 'Vocabulary', ([], {}), '()\n', (494, 496), False, 'from allennlp.data import Vocabulary\n'), ((755, 765), 'allennlp.common.Params', 'Params', (['{}'], {}), '({})\n', (761, 765), False, 'from allennlp.common import Params\n'), ((785, 852), 'allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params', 'BagOfWordCountsTokenEmbedder.from_params', (['self.vocab'], {'params': 'params'}), '(self.vocab, params=params)\n', (825, 852), False, 'from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder\n'), ((876, 910), 'numpy.array', 'np.array', (['[[2, 0], [3, 0], [4, 4]]'], {}), '([[2, 0], [3, 0], [4, 4]])\n', (884, 910), True, 'import numpy as np\n'), ((1038, 1108), 'numpy.array', 'np.array', (['[[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]]'], {}), '([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])\n', (1046, 1108), True, 'import numpy as np\n'), ((1314, 1344), 'allennlp.common.Params', 'Params', (["{'projection_dim': 50}"], {}), "({'projection_dim': 50})\n", (1320, 1344), False, 'from allennlp.common import Params\n'), ((1364, 1431), 'allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params', 'BagOfWordCountsTokenEmbedder.from_params', (['self.vocab'], {'params': 'params'}), '(self.vocab, params=params)\n', (1404, 1431), False, 'from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder\n'), ((928, 958), 'torch.from_numpy', 'torch.from_numpy', (['numpy_tensor'], {}), '(numpy_tensor)\n', (944, 958), False, 'import torch\n'), ((1133, 1163), 'torch.from_numpy', 'torch.from_numpy', (['numpy_tensor'], {}), '(numpy_tensor)\n', (1149, 1163), False, 'import torch\n'), ((1539, 1569), 'torch.from_numpy', 'torch.from_numpy', (['numpy_tensor'], {}), '(numpy_tensor)\n', (1555, 1569), False, 'import torch\n')]
|
#!/home/a.ghaderi/.conda/envs/envjm/bin/python
# Model 2
import pystan
import pandas as pd
import numpy as np
import sys
sys.path.append('../../')
import utils
parts = 1
data = utils.get_data() #loading dateset
data = data[data['participant']==parts]
mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat
obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat
N_mis = mis.shape[0] # number of missing data
N_obs = obs.shape[0] # number of observed data
modelfile = '../../stans/res_nonhier.stan' #reading the model span
f = open(modelfile, 'r')
model_wiener = f.read()
sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan
ncohers = 2 #Number of coherence conditions
nspats = 2 #Number of spatial conditions
nconds = 4 #Number of conditions
y = data['y'].to_numpy()
cond_coher = data['cond_coher'].to_numpy()
cond_spat = data['cond_spat'].to_numpy()
conds = data['conds'].to_numpy()
n200lat = data['n200lat'].to_numpy()
#set inistial data for molde span
data_winner = {'N_obs':N_obs, #Number of trial-level observations
'N_mis':N_mis, #Number of trial-level mising data
'ncohers':ncohers, #Number of coherence conditions
'nspats':nspats, #Number of spatial conditions
'nconds':nconds, #Number of conditions
'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data
'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial
'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial
'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial
'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation
# setting MCMC arguments
niter = 10000
nwarmup = 4000
nchains = 1
thin = 1
initials = [] # initial sampling
for c in range(0, nchains):
chaininit = {
'delta': np.random.uniform(1, 3, size=ncohers),
'alpha': np.random.uniform(.5, 1.),
'eta': np.random.uniform(.01, .2),
'res': np.random.uniform(.01, .02, size=nspats),
'n200sub': np.random.uniform(.11, .2, size=nconds),
'lambda': np.random.uniform(.01, .02),
'n200lat_mis': np.random.uniform(.11, .2, size = N_mis)
}
initials.append(chaininit)
# Train the model and generate samples
fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials)
utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')
|
[
"numpy.where",
"utils.get_data",
"numpy.concatenate",
"numpy.random.uniform",
"sys.path.append",
"pystan.StanModel"
] |
[((122, 147), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (137, 147), False, 'import sys\n'), ((180, 196), 'utils.get_data', 'utils.get_data', ([], {}), '()\n', (194, 196), False, 'import utils\n'), ((683, 724), 'pystan.StanModel', 'pystan.StanModel', ([], {'model_code': 'model_wiener'}), '(model_code=model_wiener)\n', (699, 724), False, 'import pystan\n'), ((270, 333), 'numpy.where', 'np.where', (["((data['n200lat'] < 0.101) | (data['n200lat'] > 0.248))"], {}), "((data['n200lat'] < 0.101) | (data['n200lat'] > 0.248))\n", (278, 333), True, 'import numpy as np\n'), ((362, 425), 'numpy.where', 'np.where', (["((data['n200lat'] > 0.101) & (data['n200lat'] < 0.248))"], {}), "((data['n200lat'] > 0.101) & (data['n200lat'] < 0.248))\n", (370, 425), True, 'import numpy as np\n'), ((1442, 1474), 'numpy.concatenate', 'np.concatenate', (['[y[obs], y[mis]]'], {}), '([y[obs], y[mis]])\n', (1456, 1474), True, 'import numpy as np\n'), ((1559, 1609), 'numpy.concatenate', 'np.concatenate', (['[cond_coher[obs], cond_coher[mis]]'], {}), '([cond_coher[obs], cond_coher[mis]])\n', (1573, 1609), True, 'import numpy as np\n'), ((1675, 1723), 'numpy.concatenate', 'np.concatenate', (['[cond_spat[obs], cond_spat[mis]]'], {}), '([cond_spat[obs], cond_spat[mis]])\n', (1689, 1723), True, 'import numpy as np\n'), ((1785, 1825), 'numpy.concatenate', 'np.concatenate', (['[conds[obs], conds[mis]]'], {}), '([conds[obs], conds[mis]])\n', (1799, 1825), True, 'import numpy as np\n'), ((2124, 2161), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)'], {'size': 'ncohers'}), '(1, 3, size=ncohers)\n', (2141, 2161), True, 'import numpy as np\n'), ((2180, 2207), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.0)'], {}), '(0.5, 1.0)\n', (2197, 2207), True, 'import numpy as np\n'), ((2222, 2250), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.2)'], {}), '(0.01, 0.2)\n', (2239, 2250), True, 'import numpy as np\n'), ((2265, 2307), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.02)'], {'size': 'nspats'}), '(0.01, 0.02, size=nspats)\n', (2282, 2307), True, 'import numpy as np\n'), ((2331, 2372), 'numpy.random.uniform', 'np.random.uniform', (['(0.11)', '(0.2)'], {'size': 'nconds'}), '(0.11, 0.2, size=nconds)\n', (2348, 2372), True, 'import numpy as np\n'), ((2390, 2419), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.02)'], {}), '(0.01, 0.02)\n', (2407, 2419), True, 'import numpy as np\n'), ((2442, 2482), 'numpy.random.uniform', 'np.random.uniform', (['(0.11)', '(0.2)'], {'size': 'N_mis'}), '(0.11, 0.2, size=N_mis)\n', (2459, 2482), True, 'import numpy as np\n')]
|
from dataclasses import dataclass
import numpy as np
import xarray as xr
from power_perceiver.load_prepared_batches.data_sources import PV
from power_perceiver.load_prepared_batches.data_sources.prepared_data_source import XarrayBatch
@dataclass
class ReduceNumPVSystems:
"""Reduce the number of PV systems per example to `requested_num_pv_systems`.
Randomly select PV systems for each example. If there are less PV systems available
than requested, then randomly sample with duplicates allowed.
This is implemented as an xr_batch_processor so it can run after
SelectPVSystemsNearCenterOfImage.
"""
requested_num_pv_systems: int
def __post_init__(self):
self.rng = np.random.default_rng() # Seeded by seed_rngs worker_init_function
def __call__(self, xr_batch: XarrayBatch) -> XarrayBatch:
pv_batch = xr_batch[PV]
num_examples = len(pv_batch.example)
selection = np.zeros(shape=(num_examples, self.requested_num_pv_systems), dtype=np.int32)
for example_i in range(num_examples):
pv_mask_for_example = pv_batch.pv_mask.isel(example=example_i).values
all_indicies = np.nonzero(pv_mask_for_example)[0]
# Only allow a PV system to be chosen multiple times for this example if there are
# less available PV systems than requested PV systems.
replace = len(all_indicies) < self.requested_num_pv_systems
chosen_indicies = self.rng.choice(
all_indicies, size=self.requested_num_pv_systems, replace=replace
)
selection[example_i] = chosen_indicies
selection = xr.DataArray(selection, dims=("example", "pv_system"))
pv_batch = pv_batch.isel(pv_system=selection)
xr_batch[PV] = pv_batch
return xr_batch
|
[
"numpy.nonzero",
"numpy.zeros",
"numpy.random.default_rng",
"xarray.DataArray"
] |
[((713, 736), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (734, 736), True, 'import numpy as np\n'), ((942, 1019), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_examples, self.requested_num_pv_systems)', 'dtype': 'np.int32'}), '(shape=(num_examples, self.requested_num_pv_systems), dtype=np.int32)\n', (950, 1019), True, 'import numpy as np\n'), ((1659, 1713), 'xarray.DataArray', 'xr.DataArray', (['selection'], {'dims': "('example', 'pv_system')"}), "(selection, dims=('example', 'pv_system'))\n", (1671, 1713), True, 'import xarray as xr\n'), ((1175, 1206), 'numpy.nonzero', 'np.nonzero', (['pv_mask_for_example'], {}), '(pv_mask_for_example)\n', (1185, 1206), True, 'import numpy as np\n')]
|
import numpy as np
def random_augmentation(img, mask):
#you can add any augmentations you need
return img, mask
def batch_generator(image, mask,
batch_size=1,
crop_size=0,
patch_size=256,
bbox= None,
augmentation=False):
'''
image: nparray, must have 3 dimension
mask: nparray, 2 dimensions, same size as image
batch_size: int, number of images in a batch
patch_size: int, size of the image returned, patch is square
crop_size: int, how much pixels should be cropped off the mask
bbox: None or tuple of 4 ints, (min_y, max_y, min_x, max_x), the data is selected from within the bbox
augmentation: turn on/off data augmentation. The augmentation function is random_augmentation() above
returns batch of image and mask patches, image is turned to 'channels last' as required by unet
'''
if np.ndim(mask) != 2 or np.ndim(image) != 3:
raise ValueError('image must have 3 dims and mask 2 dims')
if mask.shape != image.shape[1:]:
raise ValueError('image and mask shape is different')
im_max = float(np.max(image))
mask_max = 1.0
#select subimage
if bbox is not None:
# check bbox
if bbox[0] < 0 or bbox [2] < 0 \
or bbox[1] > mask.shape[0] or bbox[3] > mask.shape[0] \
or bbox[0] + patch_size > bbox[1] or bbox[2] + patch_size > bbox[3] \
or patch_size <= 0:
raise ValueError("Incorrect bbox or patch size")
img_ = image[:, bbox[0] : bbox[1], bbox[2]:bbox[3]]
mask_ = mask[bbox[0] : bbox[1], bbox[2]:bbox[3]]
else:
img_ = image
mask_ = mask
while 1:
x = []
y = []
for i in range (batch_size):
random_x = np.random.randint(0, mask_.shape[1] - patch_size)
random_y = np.random.randint(0, mask_.shape[0] - patch_size)
img_patch = img_[:,
random_y : random_y + patch_size,
random_x : random_x + patch_size] / im_max
# transform the image from channels-first (rasterio format) to channels-last (default tensorflow format)
img_patch = np.moveaxis(img_patch, 0, 2)
mask_patch = mask_[random_y : random_y + patch_size,
random_x : random_x + patch_size] / mask_max
if augmentation:
img_patch, mask_patch = random_augmentation(img_patch, mask_patch)
# mask is cropped as it may be useful for some convnets that have output size less than input
if crop_size > 0:
mask_patch = mask_patch[crop_size : -crop_size,
crop_size : -crop_size]
mask_patch = np.expand_dims(mask_patch, 2)
x.append(img_patch)
y.append(mask_patch)
yield (np.array(x), np.array(y))
|
[
"numpy.ndim",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"numpy.expand_dims",
"numpy.moveaxis"
] |
[((1181, 1194), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1187, 1194), True, 'import numpy as np\n'), ((950, 963), 'numpy.ndim', 'np.ndim', (['mask'], {}), '(mask)\n', (957, 963), True, 'import numpy as np\n'), ((972, 986), 'numpy.ndim', 'np.ndim', (['image'], {}), '(image)\n', (979, 986), True, 'import numpy as np\n'), ((1843, 1892), 'numpy.random.randint', 'np.random.randint', (['(0)', '(mask_.shape[1] - patch_size)'], {}), '(0, mask_.shape[1] - patch_size)\n', (1860, 1892), True, 'import numpy as np\n'), ((1916, 1965), 'numpy.random.randint', 'np.random.randint', (['(0)', '(mask_.shape[0] - patch_size)'], {}), '(0, mask_.shape[0] - patch_size)\n', (1933, 1965), True, 'import numpy as np\n'), ((2266, 2294), 'numpy.moveaxis', 'np.moveaxis', (['img_patch', '(0)', '(2)'], {}), '(img_patch, 0, 2)\n', (2277, 2294), True, 'import numpy as np\n'), ((2882, 2911), 'numpy.expand_dims', 'np.expand_dims', (['mask_patch', '(2)'], {}), '(mask_patch, 2)\n', (2896, 2911), True, 'import numpy as np\n'), ((2992, 3003), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3000, 3003), True, 'import numpy as np\n'), ((3005, 3016), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3013, 3016), True, 'import numpy as np\n')]
|
import numpy as np
import pickle
from collections import defaultdict
from parsing import parser
from analysis import training
def main():
parse = parser.Parser();
train_digits = parse.parse_file('data/pendigits-train');
test_digits = parse.parse_file('data/pendigits-test')
centroids = training.get_digit_kmeans_centroids(
train_digits, 256 - 3)
training.set_digit_observations(
train_digits, centroids, 256)
training.set_digit_observations(
test_digits, centroids, 256)
train_sequences = defaultdict(list)
test_sequences = []
n_test_sequences = len(test_digits)
test_expected_labels = np.ndarray(shape=(n_test_sequences,))
for digit in train_digits:
train_sequences[digit.label].append(digit.np_array_observations)
for i, digit in enumerate(test_digits):
test_sequences.append(digit.np_array_observations)
test_expected_labels[i] = digit.label
with open('train_sequences', 'wb') as f:
pickle.dump(train_sequences, f)
with open('test_sequences', 'wb') as f:
pickle.dump(test_sequences, f)
with open('test_expected_labels', 'wb') as f:
pickle.dump(test_expected_labels, f)
if __name__ == '__main__':
main()
|
[
"pickle.dump",
"parsing.parser.Parser",
"analysis.training.get_digit_kmeans_centroids",
"numpy.ndarray",
"collections.defaultdict",
"analysis.training.set_digit_observations"
] |
[((152, 167), 'parsing.parser.Parser', 'parser.Parser', ([], {}), '()\n', (165, 167), False, 'from parsing import parser\n'), ((306, 364), 'analysis.training.get_digit_kmeans_centroids', 'training.get_digit_kmeans_centroids', (['train_digits', '(256 - 3)'], {}), '(train_digits, 256 - 3)\n', (341, 364), False, 'from analysis import training\n'), ((379, 440), 'analysis.training.set_digit_observations', 'training.set_digit_observations', (['train_digits', 'centroids', '(256)'], {}), '(train_digits, centroids, 256)\n', (410, 440), False, 'from analysis import training\n'), ((454, 514), 'analysis.training.set_digit_observations', 'training.set_digit_observations', (['test_digits', 'centroids', '(256)'], {}), '(test_digits, centroids, 256)\n', (485, 514), False, 'from analysis import training\n'), ((551, 568), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (562, 568), False, 'from collections import defaultdict\n'), ((660, 697), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(n_test_sequences,)'}), '(shape=(n_test_sequences,))\n', (670, 697), True, 'import numpy as np\n'), ((1009, 1040), 'pickle.dump', 'pickle.dump', (['train_sequences', 'f'], {}), '(train_sequences, f)\n', (1020, 1040), False, 'import pickle\n'), ((1094, 1124), 'pickle.dump', 'pickle.dump', (['test_sequences', 'f'], {}), '(test_sequences, f)\n', (1105, 1124), False, 'import pickle\n'), ((1184, 1220), 'pickle.dump', 'pickle.dump', (['test_expected_labels', 'f'], {}), '(test_expected_labels, f)\n', (1195, 1220), False, 'import pickle\n')]
|
#pylint: disable=invalid-name
#pylint: disable=too-many-instance-attributes
#pylint: disable=too-many-return-statements
#pylint: disable=too-many-statements
"""
Class structure and methods for an oscilloscope channel.
The idea is to collect all the relevant information from all the Rigol
scope waveforms into a single structure that can be handled in a uniform
and consistent manner.
Specifically this lets one just use
channel.times : numpy array of signal times
channel.volts : numpy array of signal voltages
or the stringification method to describe a channel
print(channel)
"""
from enum import Enum
import numpy as np
class UnitEnum(Enum):
"""Enumerated units for scopes without them."""
w = 0
a = 1
v = 2
u = 3
def best_scale(number):
"""Scale and units for a number with proper prefix."""
absnr = abs(number)
if absnr == 0:
return 1, ' '
if absnr < 0.99999999e-9:
return 1e12, 'p'
if absnr < 0.99999999e-6:
return 1e9, 'n'
if absnr < 0.99999999e-3:
return 1e6, 'µ'
if absnr < 0.99999999:
return 1e3, 'm'
if absnr < 0.99999999e3:
return 1, ' '
if absnr < 0.99999999e6:
return 1e-3, 'k'
if absnr < 0.999999991e9:
return 1e-6, 'M'
return 1e-9, 'G'
def engineering_string(number, n_digits):
"""Format number with proper prefix."""
scale, prefix = best_scale(number)
fformat = "%%.%df %%s" % n_digits
s = fformat % (number * scale, prefix)
return s
def _channel_bytes(channel_number, w):
"""
Return right series of bytes for a channel for 1000Z scopes.
Waveform points are interleaved stored in memory when two or more
channels are saved. This unweaves them.
Args:
channel_number: the number of enabled channels before this one
w: original waveform object
Returns
byte array for specified channel
"""
offset = 0
if w.header.stride == 2: # byte pattern CHx CHy
# use odd bytes when this is the second enabled channel
if any([w.header.ch[i].enabled for i in range(channel_number-1)]):
offset = 1
elif w.header.stride == 4: # byte pattern CH4 CH3 CH2 CH1
offset = 4 - channel_number
data = np.frombuffer(w.data.raw, dtype=np.uint8)
raw_bytes = data[offset::w.header.stride]
return raw_bytes
class Channel():
"""Base class for a single channel."""
def __init__(self, w, channel_number, scope, selected='1234'):
"""
Initialize a Channel Object.
Args:
w: Wfm object
channel_number: 1, 2, 3, or 4
scope: string describing scope
selected: string with channels chosen by user
Returns:
Channel object
"""
self.channel_number = channel_number
self.name = "CH %d" % channel_number
self.waveform = w
self.seconds_per_point = w.header.seconds_per_point
self.firmware = 'unknown'
self.unit = UnitEnum.v
self.points = 0
self.raw = None
self.volts = None
self.times = None
self.coupling = 'unknown'
self.roll_stop = 0
self.time_offset = 0
self.time_scale = 1
self.enabled = False
self.enabled_and_selected = False
self.volt_scale = 1
self.volt_offset = 0
self.y_scale = 1
self.y_offset = 0
self.volt_per_division = 1
self.probe_value = 1
self.inverted = False
# determine if this channel is one of those chosen by user
chosen = selected.find(str(channel_number)) != -1
if channel_number <= len(w.header.ch):
channel = w.header.ch[channel_number-1]
self.enabled = channel.enabled
self.enabled_and_selected = channel.enabled and chosen
self.volt_scale = channel.volt_scale
self.volt_offset = channel.volt_offset
self.y_scale = channel.volt_scale
self.y_offset = channel.volt_offset
self.volt_per_division = channel.volt_per_division
self.probe_value = channel.probe_value
self.unit = channel.unit
self.inverted = channel.inverted
if scope == 'wfm1000c':
self.ds1000c(w, channel_number)
elif scope == 'wfm1000d':
self.ds1000d(w, channel_number)
elif scope == 'wfm1000e':
self.ds1000e(w, channel_number)
elif scope == 'wfm1000z':
self.ds1000z(w, channel_number)
elif scope == 'wfm2000':
self.ds2000(w, channel_number)
elif scope == 'wfm4000':
self.ds4000(w, channel_number)
elif scope == 'wfm6000':
self.ds6000(w, channel_number)
def __str__(self):
"""Describe this channel."""
s = " Channel %d:\n" % self.channel_number
s += " Coupling = %8s\n" % self.coupling.rjust(7, ' ')
s += " Scale = %10sV/div\n" % engineering_string(self.volt_per_division, 2)
s += " Offset = %10sV\n" % engineering_string(self.volt_offset, 2)
s += " Probe = %7gX\n" % self.probe_value
s += " Inverted = %8s\n\n" % self.inverted
s += " Time Base = %10ss/div\n" % engineering_string(self.time_scale, 3)
s += " Offset = %10ss\n" % engineering_string(self.time_offset, 3)
s += " Delta = %10ss/point\n" % engineering_string(self.seconds_per_point, 3)
s += " Points = %8d\n\n" % self.points
if self.enabled_and_selected:
s += " Count = [%9d,%9d,%9d ... %9d,%9d]\n" % (
1, 2, 3, self.points-1, self.points)
s += " Raw = [%9d,%9d,%9d ... %9d,%9d]\n" % (
self.raw[0], self.raw[1], self.raw[2], self.raw[-2], self.raw[-1])
t = [engineering_string(self.times[i], 3) +
"s" for i in [0, 1, 2, -2, -1]]
s += " Times = [%9s,%9s,%9s ... %9s,%9s]\n" % (
t[0], t[1], t[2], t[-2], t[-1])
v = [engineering_string(self.volts[i], 2) +
"V" for i in [0, 1, 2, -2, -1]]
s += " Volts = [%9s,%9s,%9s ... %9s,%9s]\n" % (
v[0], v[1], v[2], v[-2], v[-1])
return s
def calc_times_and_volts(self):
"""Calculate the times and voltages for this channel."""
if self.enabled_and_selected:
self.volts = self.y_scale * (127.0 - self.raw) - self.y_offset
h = self.points * self.seconds_per_point / 2
self.times = np.linspace(-h, h, self.points) + self.time_offset
def ds1000c(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000d(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000e(self, w, channel_number):
"""Interpret waveform data for 1000D and 1000E series scopes."""
self.roll_stop = w.header.roll_stop
if channel_number == 1:
self.time_offset = w.header.ch1_time_offset
self.time_scale = w.header.ch1_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
elif channel_number == 2:
self.time_offset = w.header.ch2_time_offset
self.time_scale = w.header.ch2_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000z(self, w, channel_number):
"""Interpret waveform for the Rigol DS1000Z series."""
self.time_scale = w.header.time_scale
self.time_offset = w.header.time_offset
self.points = w.header.points
self.stride = w.header.stride
self.firmware = w.preheader.firmware_version
self.probe = w.header.ch[channel_number-1].probe_value
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = w.header.ch[channel_number-1].y_scale
self.y_offset = w.header.ch[channel_number-1].y_offset
if self.enabled_and_selected:
self.raw = _channel_bytes(channel_number, w)
self.points = len(self.raw)
self.calc_times_and_volts()
def ds2000(self, w, channel_number):
"""Interpret waveform for the Rigol DS2000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.storage_depth
self.firmware = w.header.firmware_version
self.unit = UnitEnum(w.header.ch[channel_number-1].unit_actual)
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds4000(self, w, channel_number):
"""Interpret waveform for the Rigol DS4000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds6000(self, w, channel_number):
"""Interpret waveform for the Rigol DS6000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.unit = w.header.ch[channel_number-1].unit
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.array(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.array(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.array(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.array(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
|
[
"numpy.array",
"numpy.frombuffer",
"numpy.linspace"
] |
[((2293, 2334), 'numpy.frombuffer', 'np.frombuffer', (['w.data.raw'], {'dtype': 'np.uint8'}), '(w.data.raw, dtype=np.uint8)\n', (2306, 2334), True, 'import numpy as np\n'), ((6716, 6747), 'numpy.linspace', 'np.linspace', (['(-h)', 'h', 'self.points'], {}), '(-h, h, self.points)\n', (6727, 6747), True, 'import numpy as np\n'), ((7136, 7177), 'numpy.frombuffer', 'np.frombuffer', (['w.data.ch1'], {'dtype': 'np.uint8'}), '(w.data.ch1, dtype=np.uint8)\n', (7149, 7177), True, 'import numpy as np\n'), ((7326, 7367), 'numpy.frombuffer', 'np.frombuffer', (['w.data.ch2'], {'dtype': 'np.uint8'}), '(w.data.ch2, dtype=np.uint8)\n', (7339, 7367), True, 'import numpy as np\n'), ((7774, 7815), 'numpy.frombuffer', 'np.frombuffer', (['w.data.ch1'], {'dtype': 'np.uint8'}), '(w.data.ch1, dtype=np.uint8)\n', (7787, 7815), True, 'import numpy as np\n'), ((7964, 8005), 'numpy.frombuffer', 'np.frombuffer', (['w.data.ch2'], {'dtype': 'np.uint8'}), '(w.data.ch2, dtype=np.uint8)\n', (7977, 8005), True, 'import numpy as np\n'), ((8462, 8503), 'numpy.frombuffer', 'np.frombuffer', (['w.data.ch1'], {'dtype': 'np.uint8'}), '(w.data.ch1, dtype=np.uint8)\n', (8475, 8503), True, 'import numpy as np\n'), ((10232, 10277), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_1'], {'dtype': 'np.uint8'}), '(w.header.raw_1, dtype=np.uint8)\n', (10245, 10277), True, 'import numpy as np\n'), ((10342, 10387), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_2'], {'dtype': 'np.uint8'}), '(w.header.raw_2, dtype=np.uint8)\n', (10355, 10387), True, 'import numpy as np\n'), ((10452, 10497), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_3'], {'dtype': 'np.uint8'}), '(w.header.raw_3, dtype=np.uint8)\n', (10465, 10497), True, 'import numpy as np\n'), ((10562, 10607), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_4'], {'dtype': 'np.uint8'}), '(w.header.raw_4, dtype=np.uint8)\n', (10575, 10607), True, 'import numpy as np\n'), ((11191, 11236), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_1'], {'dtype': 'np.uint8'}), '(w.header.raw_1, dtype=np.uint8)\n', (11204, 11236), True, 'import numpy as np\n'), ((11301, 11346), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_2'], {'dtype': 'np.uint8'}), '(w.header.raw_2, dtype=np.uint8)\n', (11314, 11346), True, 'import numpy as np\n'), ((11411, 11456), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_3'], {'dtype': 'np.uint8'}), '(w.header.raw_3, dtype=np.uint8)\n', (11424, 11456), True, 'import numpy as np\n'), ((11521, 11566), 'numpy.frombuffer', 'np.frombuffer', (['w.header.raw_4'], {'dtype': 'np.uint8'}), '(w.header.raw_4, dtype=np.uint8)\n', (11534, 11566), True, 'import numpy as np\n'), ((12124, 12164), 'numpy.array', 'np.array', (['w.header.raw_1'], {'dtype': 'np.uint8'}), '(w.header.raw_1, dtype=np.uint8)\n', (12132, 12164), True, 'import numpy as np\n'), ((12229, 12269), 'numpy.array', 'np.array', (['w.header.raw_2'], {'dtype': 'np.uint8'}), '(w.header.raw_2, dtype=np.uint8)\n', (12237, 12269), True, 'import numpy as np\n'), ((12334, 12374), 'numpy.array', 'np.array', (['w.header.raw_3'], {'dtype': 'np.uint8'}), '(w.header.raw_3, dtype=np.uint8)\n', (12342, 12374), True, 'import numpy as np\n'), ((12439, 12479), 'numpy.array', 'np.array', (['w.header.raw_4'], {'dtype': 'np.uint8'}), '(w.header.raw_4, dtype=np.uint8)\n', (12447, 12479), True, 'import numpy as np\n'), ((8764, 8805), 'numpy.frombuffer', 'np.frombuffer', (['w.data.ch2'], {'dtype': 'np.uint8'}), '(w.data.ch2, dtype=np.uint8)\n', (8777, 8805), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
from models_gqa.model import Model
from models_gqa.config import build_cfg_from_argparse
from util.gqa_train.data_reader import DataReader
import json
# Load config
cfg = build_cfg_from_argparse()
# Start session
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.GPU_ID)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=cfg.GPU_MEM_GROWTH)))
# Data files
imdb_file = cfg.IMDB_FILE % cfg.TEST.SPLIT_VQA
scene_graph_file = cfg.SCENE_GRAPH_FILE % \
cfg.TEST.SPLIT_VQA.replace('_balanced', '').replace('_all', '')
data_reader = DataReader(
imdb_file, shuffle=False, one_pass=True, batch_size=cfg.TEST.BATCH_SIZE,
T_encoder=cfg.T_ENCODER,
vocab_question_file=cfg.VOCAB_QUESTION_FILE,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
feature_type=cfg.FEAT_TYPE,
spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,
objects_feature_dir=cfg.OBJECTS_FEATURE_DIR,
objects_max_num=cfg.W_FEAT,
scene_graph_file=scene_graph_file,
vocab_name_file=cfg.VOCAB_NAME_FILE,
vocab_attr_file=cfg.VOCAB_ATTR_FILE,
spatial_pos_enc_dim=cfg.SPATIAL_POS_ENC_DIM,
bbox_tile_num=cfg.BBOX_TILE_NUM)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
# Inputs and model
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT, cfg.D_FEAT])
image_valid_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT])
model = Model(
input_seq_batch, seq_length_batch, image_feat_batch, image_valid_batch,
num_vocab=num_vocab, num_choices=num_choices, is_training=False)
# Load snapshot
if cfg.TEST.USE_EMA:
ema = tf.train.ExponentialMovingAverage(decay=0.9) # decay doesn't matter
var_names = {
(ema.average_name(v) if v in model.params else v.op.name): v
for v in tf.global_variables()}
else:
var_names = {v.op.name: v for v in tf.global_variables()}
snapshot_file = cfg.TEST.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TEST.ITER)
print('loading model snapshot from %s' % snapshot_file)
snapshot_saver = tf.train.Saver(var_names)
snapshot_saver.restore(sess, snapshot_file)
print('Done')
# Write results
result_dir = cfg.TEST.RESULT_DIR % (cfg.EXP_NAME, cfg.TEST.ITER)
os.makedirs(result_dir, exist_ok=True)
# Run test
answer_correct, num_questions = 0, 0
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions = []
answer_word_list = data_reader.batch_loader.answer_dict.word_list
pred_file = os.path.join(
result_dir, 'gqa_eval_preds_%s_%s_%08d.json' % (
cfg.TEST.SPLIT_VQA, cfg.EXP_NAME, cfg.TEST.ITER))
for n_batch, batch in enumerate(data_reader.batches()):
if 'answer_label_batch' not in batch:
batch['answer_label_batch'] = -np.ones(
len(batch['qid_list']), np.int32)
if num_questions == 0:
print('imdb has no answer labels. Using dummy labels.\n\n'
'**The final accuracy will be zero (no labels provided)**\n')
vqa_scores_value = sess.run(model.vqa_scores, feed_dict={
input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch'],
image_valid_batch: batch['image_valid_batch']})
# compute accuracy
vqa_labels = batch['answer_label_batch']
vqa_predictions = np.argmax(vqa_scores_value, axis=1)
answer_correct += np.sum(vqa_predictions == vqa_labels)
num_questions += len(vqa_labels)
accuracy = answer_correct / num_questions
if n_batch % 20 == 0:
print('exp: %s, iter = %d, accumulated accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions.extend([
{"questionId": qId, "prediction": answer_word_list[p]}
for qId, p in zip(batch['qid_list'], vqa_predictions)])
with open(os.path.join(
result_dir, 'vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA), 'w') as f:
print('\nexp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
print('exp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions), file=f)
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
with open(pred_file, 'w') as f:
json.dump(output_predictions, f, indent=2)
print('prediction file written to %s' % pred_file)
|
[
"os.makedirs",
"models_gqa.config.build_cfg_from_argparse",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"os.path.join",
"numpy.argmax",
"tensorflow.global_variables",
"numpy.sum",
"util.gqa_train.data_reader.DataReader",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.GPUOptions",
"json.dump",
"models_gqa.model.Model"
] |
[((226, 251), 'models_gqa.config.build_cfg_from_argparse', 'build_cfg_from_argparse', ([], {}), '()\n', (249, 251), False, 'from models_gqa.config import build_cfg_from_argparse\n'), ((615, 1173), 'util.gqa_train.data_reader.DataReader', 'DataReader', (['imdb_file'], {'shuffle': '(False)', 'one_pass': '(True)', 'batch_size': 'cfg.TEST.BATCH_SIZE', 'T_encoder': 'cfg.T_ENCODER', 'vocab_question_file': 'cfg.VOCAB_QUESTION_FILE', 'vocab_answer_file': 'cfg.VOCAB_ANSWER_FILE', 'feature_type': 'cfg.FEAT_TYPE', 'spatial_feature_dir': 'cfg.SPATIAL_FEATURE_DIR', 'objects_feature_dir': 'cfg.OBJECTS_FEATURE_DIR', 'objects_max_num': 'cfg.W_FEAT', 'scene_graph_file': 'scene_graph_file', 'vocab_name_file': 'cfg.VOCAB_NAME_FILE', 'vocab_attr_file': 'cfg.VOCAB_ATTR_FILE', 'spatial_pos_enc_dim': 'cfg.SPATIAL_POS_ENC_DIM', 'bbox_tile_num': 'cfg.BBOX_TILE_NUM'}), '(imdb_file, shuffle=False, one_pass=True, batch_size=cfg.TEST.\n BATCH_SIZE, T_encoder=cfg.T_ENCODER, vocab_question_file=cfg.\n VOCAB_QUESTION_FILE, vocab_answer_file=cfg.VOCAB_ANSWER_FILE,\n feature_type=cfg.FEAT_TYPE, spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,\n objects_feature_dir=cfg.OBJECTS_FEATURE_DIR, objects_max_num=cfg.W_FEAT,\n scene_graph_file=scene_graph_file, vocab_name_file=cfg.VOCAB_NAME_FILE,\n vocab_attr_file=cfg.VOCAB_ATTR_FILE, spatial_pos_enc_dim=cfg.\n SPATIAL_POS_ENC_DIM, bbox_tile_num=cfg.BBOX_TILE_NUM)\n', (625, 1173), False, 'from util.gqa_train.data_reader import DataReader\n'), ((1353, 1391), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (1367, 1391), True, 'import tensorflow as tf\n'), ((1411, 1443), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (1425, 1443), True, 'import tensorflow as tf\n'), ((1463, 1533), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, cfg.H_FEAT, cfg.W_FEAT, cfg.D_FEAT]'], {}), '(tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT, cfg.D_FEAT])\n', (1477, 1533), True, 'import tensorflow as tf\n'), ((1559, 1617), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, cfg.H_FEAT, cfg.W_FEAT]'], {}), '(tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT])\n', (1573, 1617), True, 'import tensorflow as tf\n'), ((1631, 1781), 'models_gqa.model.Model', 'Model', (['input_seq_batch', 'seq_length_batch', 'image_feat_batch', 'image_valid_batch'], {'num_vocab': 'num_vocab', 'num_choices': 'num_choices', 'is_training': '(False)'}), '(input_seq_batch, seq_length_batch, image_feat_batch,\n image_valid_batch, num_vocab=num_vocab, num_choices=num_choices,\n is_training=False)\n', (1636, 1781), False, 'from models_gqa.model import Model\n'), ((2239, 2264), 'tensorflow.train.Saver', 'tf.train.Saver', (['var_names'], {}), '(var_names)\n', (2253, 2264), True, 'import tensorflow as tf\n'), ((2405, 2443), 'os.makedirs', 'os.makedirs', (['result_dir'], {'exist_ok': '(True)'}), '(result_dir, exist_ok=True)\n', (2416, 2443), False, 'import os\n'), ((1831, 1875), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.9)'}), '(decay=0.9)\n', (1864, 1875), True, 'import tensorflow as tf\n'), ((2641, 2756), 'os.path.join', 'os.path.join', (['result_dir', "('gqa_eval_preds_%s_%s_%08d.json' % (cfg.TEST.SPLIT_VQA, cfg.EXP_NAME, cfg.\n TEST.ITER))"], {}), "(result_dir, 'gqa_eval_preds_%s_%s_%08d.json' % (cfg.TEST.\n SPLIT_VQA, cfg.EXP_NAME, cfg.TEST.ITER))\n", (2653, 2756), False, 'import os\n'), ((3515, 3550), 'numpy.argmax', 'np.argmax', (['vqa_scores_value'], {'axis': '(1)'}), '(vqa_scores_value, axis=1)\n', (3524, 3550), True, 'import numpy as np\n'), ((3573, 3610), 'numpy.sum', 'np.sum', (['(vqa_predictions == vqa_labels)'], {}), '(vqa_predictions == vqa_labels)\n', (3579, 3610), True, 'import numpy as np\n'), ((4142, 4209), 'os.path.join', 'os.path.join', (['result_dir', "('vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA)"], {}), "(result_dir, 'vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA)\n", (4154, 4209), False, 'import os\n'), ((4686, 4728), 'json.dump', 'json.dump', (['output_predictions', 'f'], {'indent': '(2)'}), '(output_predictions, f, indent=2)\n', (4695, 4728), False, 'import json\n'), ((2004, 2025), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2023, 2025), True, 'import tensorflow as tf\n'), ((2072, 2093), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2091, 2093), True, 'import tensorflow as tf\n'), ((379, 425), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': 'cfg.GPU_MEM_GROWTH'}), '(allow_growth=cfg.GPU_MEM_GROWTH)\n', (392, 425), True, 'import tensorflow as tf\n')]
|
# In[42]:
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# In[43]:
# describe the model
def deriv(y, t, N, beta, gamma, delta):
S, E, I, R = y
dSdt = -beta * S * I / N # S(t) – susceptible (de som är mottagliga för infektion).
dEdt = beta * S * I / N - gamma * E
dIdt = delta * E - gamma * I # I(t) – infected (de som har pågående infektion)
dRdt = gamma * I
return dSdt, dEdt, dIdt, dRdt
# In[44]:
# describe the parameters
N = 2283 #Totala befolkningen N=s(t)+I(t)+R(t)
D = 4.0 #infections last four days
gamma = 1.0 / D #Reoval rate (Hur många som tillfrisknar)
delta = 1.0 / 5.0 #incubation period of five days
R_0 = 2.5 #Reproduktionstalet
beta = R_0 * gamma #r_0=beta/gamma. antal som smittas per infekterad och per tid (beror på virusets egenskaper samt hur vi beter oss).
S0, E0, I0, R0 = N-1, 1, 0, 0 # initial conditions: one infected, rest susceptible
#Rt = R0 * S(t)/Ntot* (1 – b). b = effekt av policy och beteendeförändringar
# In[45]:
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta))
S, E, I, R = ret.T
# In[46]:
def plotsir(t, S, E, I, R):
f, ax = plt.subplots(1,1,figsize=(10,4))
ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')
ax.plot(t, E, 'y', alpha=0.7, linewidth=2, label='Exposed')
ax.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected')
ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')
ax.set_xlabel('Time (days)')
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.savefig('Plot.png')
plt.show();
# plot the graph
# In[47]:
plotsir(t, S, E, I, R)
# In[ ]:
|
[
"matplotlib.pyplot.savefig",
"scipy.integrate.odeint",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((1045, 1068), 'numpy.linspace', 'np.linspace', (['(0)', '(99)', '(100)'], {}), '(0, 99, 100)\n', (1056, 1068), True, 'import numpy as np\n'), ((1210, 1260), 'scipy.integrate.odeint', 'odeint', (['deriv', 'y0', 't'], {'args': '(N, beta, gamma, delta)'}), '(deriv, y0, t, args=(N, beta, gamma, delta))\n', (1216, 1260), False, 'from scipy.integrate import odeint\n'), ((1332, 1367), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 4)'}), '(1, 1, figsize=(10, 4))\n', (1344, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1958), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Plot.png"""'], {}), "('Plot.png')\n", (1946, 1958), True, 'import matplotlib.pyplot as plt\n'), ((1961, 1971), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1969, 1971), True, 'import matplotlib.pyplot as plt\n')]
|
"""
This script is where the preprocessed data is used to train the SVM model to
perform the classification. I am using Stratified K-Fold Cross Validation to
prevent bias and/or any imbalance that could affect the model's accuracy.
REFERENCE: https://medium.com/@bedigunjit/simple-guide-to-text-classification-nlp-using-svm-and-naive-bayes-with-python-421db3a72d34
"""
import numpy as np
import pandas as pd
from sklearn import model_selection, svm
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import StratifiedKFold
# Open preproccessed csv
df = pd.read_csv("preprocessed.csv", index_col=0)
print(df.head())
print("SPLITTING TRAIN-TEST")
x = df["Text"]
y = df["PublicationTitle"]
train_x, test_x, train_y, test_y = model_selection.train_test_split(
df["Text"], df["PublicationTitle"], test_size=0.3)
# Label encode the target variable to transform categorical data of string
# type into numerical values the model can understand
encoder = LabelEncoder()
# train_y = encoder.fit_transform(train_y)
# test_y = encoder.fit_transform(test_y)
# Word vectorization
# turning a collection of text documents into numerical feature vectors
# We are using Term Frequency - Inverse Document
tfidf_vect = TfidfVectorizer(max_features=5000)
tfidf_vect.fit(df["Text"])
# train_x_tfidf = tfidf_vect.transform(train_x)
# test_x_tfidf = tfidf_vect.transform(test_x)
x_tfidf = tfidf_vect.transform(df["Text"])
y = encoder.fit_transform(y)
# print(tfidf_vect.vocabulary_)
# Fit the training dataset to the classifier
print("TRAINING THE MODEL")
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
accuracies = []
fold = 1
for train_idx, test_idx in skf.split(x, y):
print("Working on fold", fold)
x_train_fold, x_test_fold = x_tfidf[train_idx], x_tfidf[test_idx]
y_train_fold, y_test_fold = y[train_idx], y[test_idx]
SVM.fit(x_train_fold, y_train_fold)
acc = SVM.score(x_test_fold, y_test_fold)
print("Acc", fold, ":", acc)
accuracies.append(acc)
fold += 1
print("ACCURACIES:", accuracies)
print("Max Accuracy:", np.max(accuracies))
print("Min Accuracy:", np.min(accuracies))
print("Mean of Accuracies:", np.mean(accuracies))
print("STD of Accuracies:", np.std(accuracies))
# print("RUNNING TEST PREDICTIONS")
# predictions = SVM.predict(test_x_tfidf)
# # Calculate accuracy score
# accuracy = accuracy_score(test_y, predictions)
# print("Accuracy:", str(accuracy * 100) + "%")
|
[
"numpy.mean",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.std",
"numpy.max",
"sklearn.model_selection.StratifiedKFold",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.min",
"sklearn.svm.SVC"
] |
[((686, 730), 'pandas.read_csv', 'pd.read_csv', (['"""preprocessed.csv"""'], {'index_col': '(0)'}), "('preprocessed.csv', index_col=0)\n", (697, 730), True, 'import pandas as pd\n'), ((857, 944), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (["df['Text']", "df['PublicationTitle']"], {'test_size': '(0.3)'}), "(df['Text'], df['PublicationTitle'],\n test_size=0.3)\n", (889, 944), False, 'from sklearn import model_selection, svm\n'), ((1087, 1101), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1099, 1101), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1342, 1376), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_features': '(5000)'}), '(max_features=5000)\n', (1357, 1376), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1686, 1741), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(1.0)', 'kernel': '"""linear"""', 'degree': '(3)', 'gamma': '"""auto"""'}), "(C=1.0, kernel='linear', degree=3, gamma='auto')\n", (1693, 1741), False, 'from sklearn import model_selection, svm\n'), ((1749, 1807), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(1)'}), '(n_splits=10, shuffle=True, random_state=1)\n', (1764, 1807), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2260, 2278), 'numpy.max', 'np.max', (['accuracies'], {}), '(accuracies)\n', (2266, 2278), True, 'import numpy as np\n'), ((2303, 2321), 'numpy.min', 'np.min', (['accuracies'], {}), '(accuracies)\n', (2309, 2321), True, 'import numpy as np\n'), ((2352, 2371), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (2359, 2371), True, 'import numpy as np\n'), ((2401, 2419), 'numpy.std', 'np.std', (['accuracies'], {}), '(accuracies)\n', (2407, 2419), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy as np, os, sys
from get_sepsis_score import load_sepsis_model, get_sepsis_score
def load_challenge_data(file):
with open(file, 'r') as f:
header = f.readline().strip()
column_names = header.split('|')
data = np.loadtxt(f, delimiter='|')
# Ignore SepsisLabel column if present.
if column_names[-1] == 'SepsisLabel':
column_names = column_names[:-1]
data = data[:, :-1]
return data
def save_challenge_predictions(file, scores, labels):
with open(file, 'w') as f:
f.write('PredictedProbability|PredictedLabel\n')
for (s, l) in zip(scores, labels):
f.write('%g|%d\n' % (s, l))
if __name__ == '__main__':
# Parse arguments.
if len(sys.argv) != 3:
raise Exception('Include the input and output directories as arguments, e.g., python driver.py input output.')
input_directory = sys.argv[1]
output_directory = sys.argv[2]
# Find files.
files = []
for f in os.listdir(input_directory):
if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('psv'):
files.append(f)
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
# Load model.
model = load_sepsis_model()
print(model)
# Iterate over files.
for f in files:
# Load data.
input_file = os.path.join(input_directory, f)
data = load_challenge_data(input_file)
# print(type(data))
# Make predictions.
num_rows = len(data)
scores = np.zeros(num_rows)
labels = np.zeros(num_rows)
for t in range(num_rows):
current_data = data[:t+1]
current_score, current_label = get_sepsis_score(current_data, model)
scores[t] = current_score
labels[t] = current_label
# Save results.
output_file = os.path.join(output_directory, f)
save_challenge_predictions(output_file, scores, labels)
|
[
"get_sepsis_score.load_sepsis_model",
"os.listdir",
"os.path.join",
"numpy.zeros",
"os.path.isdir",
"os.mkdir",
"get_sepsis_score.get_sepsis_score",
"numpy.loadtxt"
] |
[((1015, 1042), 'os.listdir', 'os.listdir', (['input_directory'], {}), '(input_directory)\n', (1025, 1042), False, 'import numpy as np, os, sys\n'), ((1308, 1327), 'get_sepsis_score.load_sepsis_model', 'load_sepsis_model', ([], {}), '()\n', (1325, 1327), False, 'from get_sepsis_score import load_sepsis_model, get_sepsis_score\n'), ((273, 301), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '"""|"""'}), "(f, delimiter='|')\n", (283, 301), True, 'import numpy as np, os, sys\n'), ((1209, 1240), 'os.path.isdir', 'os.path.isdir', (['output_directory'], {}), '(output_directory)\n', (1222, 1240), False, 'import numpy as np, os, sys\n'), ((1250, 1276), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (1258, 1276), False, 'import numpy as np, os, sys\n'), ((1434, 1466), 'os.path.join', 'os.path.join', (['input_directory', 'f'], {}), '(input_directory, f)\n', (1446, 1466), False, 'import numpy as np, os, sys\n'), ((1617, 1635), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (1625, 1635), True, 'import numpy as np, os, sys\n'), ((1653, 1671), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (1661, 1671), True, 'import numpy as np, os, sys\n'), ((1948, 1981), 'os.path.join', 'os.path.join', (['output_directory', 'f'], {}), '(output_directory, f)\n', (1960, 1981), False, 'import numpy as np, os, sys\n'), ((1787, 1824), 'get_sepsis_score.get_sepsis_score', 'get_sepsis_score', (['current_data', 'model'], {}), '(current_data, model)\n', (1803, 1824), False, 'from get_sepsis_score import load_sepsis_model, get_sepsis_score\n'), ((1070, 1102), 'os.path.join', 'os.path.join', (['input_directory', 'f'], {}), '(input_directory, f)\n', (1082, 1102), False, 'import numpy as np, os, sys\n')]
|
import numpy as np
import scipy
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
def truncate_text(text, max_len):
if len(text) > max_len:
return text[:int(max_len/2)-2] + "..." + text[-int(max_len/2)+1:]
else:
return text
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show()
|
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.figure",
"scipy.stats.ttest_ind",
"matplotlib.pyplot.scatter",
"numpy.min",
"warnings.warn",
"numpy.argmin",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.show"
] |
[((1417, 1443), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (1426, 1443), True, 'import matplotlib.pyplot as pl\n'), ((1792, 1805), 'numpy.min', 'np.min', (['pvals'], {}), '(pvals)\n', (1798, 1805), True, 'import numpy as np\n'), ((1998, 2064), 'matplotlib.pyplot.scatter', 'pl.scatter', (['xs', 'ys'], {'s': '(10)', 'c': 'features[:, ind]', 'cmap': 'colors.red_blue'}), '(xs, ys, s=10, c=features[:, ind], cmap=colors.red_blue)\n', (2008, 2064), True, 'import matplotlib.pyplot as pl\n'), ((2073, 2098), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Sample index"""'], {}), "('Sample index')\n", (2082, 2098), True, 'import matplotlib.pyplot as pl\n'), ((2375, 2388), 'matplotlib.pyplot.colorbar', 'pl.colorbar', ([], {}), '()\n', (2386, 2388), True, 'import matplotlib.pyplot as pl\n'), ((2625, 2634), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2632, 2634), True, 'import matplotlib.pyplot as pl\n'), ((134, 182), 'warnings.warn', 'warnings.warn', (['"""matplotlib could not be loaded!"""'], {}), "('matplotlib could not be loaded!')\n", (147, 182), False, 'import warnings\n'), ((1712, 1749), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['ys[:i]', 'ys[i:]'], {}), '(ys[:i], ys[i:])\n', (1733, 1749), False, 'import scipy\n'), ((1912, 1984), 'matplotlib.pyplot.axvline', 'pl.axvline', (['min_pval_ind'], {'linestyle': '"""dashed"""', 'color': '"""#666666"""', 'alpha': '(0.2)'}), "(min_pval_ind, linestyle='dashed', color='#666666', alpha=0.2)\n", (1922, 1984), True, 'import matplotlib.pyplot as pl\n'), ((1825, 1841), 'numpy.argmin', 'np.argmin', (['pvals'], {}), '(pvals)\n', (1834, 1841), True, 'import numpy as np\n'), ((2182, 2190), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (2188, 2190), True, 'import matplotlib.pyplot as pl\n'), ((2230, 2238), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (2236, 2238), True, 'import matplotlib.pyplot as pl\n'), ((2276, 2284), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (2282, 2284), True, 'import matplotlib.pyplot as pl\n'), ((2324, 2332), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (2330, 2332), True, 'import matplotlib.pyplot as pl\n'), ((2472, 2480), 'matplotlib.pyplot.gcf', 'pl.gcf', ([], {}), '()\n', (2478, 2480), True, 'import matplotlib.pyplot as pl\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 14:57:32 2020
@author: Nicolai
"""
import sys
import os
importpath = os.path.dirname(os.path.realpath(__file__)) + "/../"
sys.path.append(importpath)
from FemPdeBase import FemPdeBase
import numpy as np
# import from ngsolve
import ngsolve as ngs
from netgen.geom2d import unit_square
import time
import psutil
import gc
class FemPde1(FemPdeBase):
"""
**Implementation of PDE1 of the testbed:**
.. math::
- \Delta u(\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10}
- 200x^9(1-x)^9 + 90x^{10}(1-x)^8]
-2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10}
- 200y^9(1-y)^9 + 90y^{10}(1-y)^8]
\Omega: \mathbf{x} \in [0,1]
u(\mathbf{x})|_{\partial \Omega} = 0
**with the solution:**
.. math::
u(\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10}
Attributes
----------
max_nodf: int
the maximum number of degrees of freedom that can be created in the
adaptive mesh refinement, standard value is 50000
Methods
-------
solve()
solves the pde by calling ngsolve, provides: static condensation,
adaptive mesh refinement, parallelisation (where possible), sets the
internal variables for evaluating the exact solution and calculating
the distance between exact and approx solution
also sets execution time and memory consumption
Examples
--------
>>> import numpy as np
>>> fempde2 = FemPde2(True)
>>> pos = np.array([0.5, 0.5])
>>> fempde2.exact(pos)
>>> x -> numpy.ndarray with shape (2,)
_mesh -> ngs.comp.Mesh
_ngs_ex -> ngs.fem.CoefficientFunction
-> try to call solve() first
>>> fempde2.solve()
>>> fempde2.exact(pos)
1.0
>>> fempde2.approx(pos)
0.999998924259486
>>> fempde2.normL2()
5.853102150391562e-07
>>> fempde2.exec_time
3.830256175994873
>>> fempde2.mem_consumption
76705792
"""
def __init__(self, show_gui, max_ndof=50000):
super().__init__(show_gui)
# init protected
self._pde_string = "-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))"
self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10)
# init public
self.max_ndof = max_ndof
def solve(self):
# disable garbage collector
# --------------------------------------------------------------------#
gc.disable()
while(gc.isenabled()):
time.sleep(0.1)
# --------------------------------------------------------------------#
# measure how much memory is used until here
process = psutil.Process()
memstart = process.memory_info().vms
# starts timer
tstart = time.time()
if self.show_gui:
import netgen.gui
# create mesh with initial size 0.1
self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1))
#create finite element space
self._fes = ngs.H1(self._mesh, order=2, dirichlet=".*", autoupdate=True)
# test and trail function
u = self._fes.TrialFunction()
v = self._fes.TestFunction()
# create bilinear form and enable static condensation
self._a = ngs.BilinearForm(self._fes, condense=True)
self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx
# creat linear functional and apply RHS
self._f = ngs.LinearForm(self._fes)
self._f += ( \
-(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \
-(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx
# preconditioner: multigrid - what prerequisits must the problem have?
self._c = ngs.Preconditioner(self._a,"multigrid")
# create grid function that holds the solution and set the boundary to 0
self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution
self._g = 0.0
self._gfu.Set(self._g, definedon=self._mesh.Boundaries(".*"))
# draw grid function in gui
if self.show_gui:
ngs.Draw(self._gfu)
# create Hcurl space for flux calculation and estimate error
self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True)
self._gf_flux = ngs.GridFunction(self._space_flux, "flux", autoupdate=True)
# TaskManager starts threads that (standard thread nr is numer of cores)
with ngs.TaskManager():
# this is the adaptive loop
while self._fes.ndof < self.max_ndof:
self._solveStep()
self._estimateError()
self._mesh.Refine()
# since the adaptive loop stopped with a mesh refinement, the gfu must be
# calculated one last time
self._solveStep()
if self.show_gui:
ngs.Draw(self._gfu)
# set measured exectution time
self._exec_time = time.time() - tstart
# set measured used memory
memstop = process.memory_info().vms - memstart
self._mem_consumption = memstop
# enable garbage collector
# --------------------------------------------------------------------#
gc.enable()
gc.collect()
# --------------------------------------------------------------------#
if __name__ == "__main__":
fempde1 = FemPde1(True)
print(fempde1.pde_string)
try:
fempde1.exact(np.array([0.5,0.5]))
except:
print("Î error message above")
try:
fempde1.approx(np.array([0.5,0.5]))
except:
print("Î error message above")
fempde1.solve()
print("-------------------------------------")
print("exact(0.5, 0.5) = {}".format(fempde1.exact(np.array([0.5,0.5]))))
print("approx(0.5, 0.5) = {}".format(fempde1.approx(np.array([0.5,0.5]))))
print("L2 norm to the real solution {}".format(fempde1.normL2()))
print("solving took {} sec".format(fempde1.exec_time))
print("solving uses {} Mb".format(fempde1.mem_consumption/1000000))
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.exact(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
fig.tight_layout()
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0, X1)")
plt.show()
fig.savefig("sol_pde_1.pdf", bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.approx(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
|
[
"psutil.Process",
"netgen.geom2d.unit_square.GenerateMesh",
"time.sleep",
"numpy.array",
"ngsolve.Preconditioner",
"sys.path.append",
"numpy.arange",
"gc.enable",
"gc.disable",
"gc.isenabled",
"ngsolve.GridFunction",
"numpy.meshgrid",
"ngsolve.LinearForm",
"ngsolve.HDiv",
"gc.collect",
"time.time",
"matplotlib.pyplot.show",
"ngsolve.BilinearForm",
"ngsolve.Draw",
"ngsolve.grad",
"os.path.realpath",
"matplotlib.pyplot.figure",
"ngsolve.H1",
"numpy.ravel",
"ngsolve.TaskManager"
] |
[((174, 201), 'sys.path.append', 'sys.path.append', (['importpath'], {}), '(importpath)\n', (189, 201), False, 'import sys\n'), ((6776, 6788), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6786, 6788), True, 'import matplotlib.pyplot as plt\n'), ((6848, 6872), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.01)'], {}), '(0, 1.01, 0.01)\n', (6857, 6872), True, 'import numpy as np\n'), ((6884, 6901), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (6895, 6901), True, 'import numpy as np\n'), ((7203, 7213), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7211, 7213), True, 'import matplotlib.pyplot as plt\n'), ((7288, 7300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7298, 7300), True, 'import matplotlib.pyplot as plt\n'), ((7360, 7384), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.01)'], {}), '(0, 1.01, 0.01)\n', (7369, 7384), True, 'import numpy as np\n'), ((7396, 7413), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (7407, 7413), True, 'import numpy as np\n'), ((7687, 7697), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7695, 7697), True, 'import matplotlib.pyplot as plt\n'), ((137, 163), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (153, 163), False, 'import os\n'), ((2784, 2796), 'gc.disable', 'gc.disable', ([], {}), '()\n', (2794, 2796), False, 'import gc\n'), ((2811, 2825), 'gc.isenabled', 'gc.isenabled', ([], {}), '()\n', (2823, 2825), False, 'import gc\n'), ((3016, 3032), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (3030, 3032), False, 'import psutil\n'), ((3127, 3138), 'time.time', 'time.time', ([], {}), '()\n', (3136, 3138), False, 'import time\n'), ((3380, 3440), 'ngsolve.H1', 'ngs.H1', (['self._mesh'], {'order': '(2)', 'dirichlet': '""".*"""', 'autoupdate': '(True)'}), "(self._mesh, order=2, dirichlet='.*', autoupdate=True)\n", (3386, 3440), True, 'import ngsolve as ngs\n'), ((3648, 3690), 'ngsolve.BilinearForm', 'ngs.BilinearForm', (['self._fes'], {'condense': '(True)'}), '(self._fes, condense=True)\n', (3664, 3690), True, 'import ngsolve as ngs\n'), ((3812, 3837), 'ngsolve.LinearForm', 'ngs.LinearForm', (['self._fes'], {}), '(self._fes)\n', (3826, 3837), True, 'import ngsolve as ngs\n'), ((4231, 4271), 'ngsolve.Preconditioner', 'ngs.Preconditioner', (['self._a', '"""multigrid"""'], {}), "(self._a, 'multigrid')\n", (4249, 4271), True, 'import ngsolve as ngs\n'), ((4381, 4425), 'ngsolve.GridFunction', 'ngs.GridFunction', (['self._fes'], {'autoupdate': '(True)'}), '(self._fes, autoupdate=True)\n', (4397, 4425), True, 'import ngsolve as ngs\n'), ((4739, 4785), 'ngsolve.HDiv', 'ngs.HDiv', (['self._mesh'], {'order': '(2)', 'autoupdate': '(True)'}), '(self._mesh, order=2, autoupdate=True)\n', (4747, 4785), True, 'import ngsolve as ngs\n'), ((4810, 4869), 'ngsolve.GridFunction', 'ngs.GridFunction', (['self._space_flux', '"""flux"""'], {'autoupdate': '(True)'}), "(self._space_flux, 'flux', autoupdate=True)\n", (4826, 4869), True, 'import ngsolve as ngs\n'), ((5772, 5783), 'gc.enable', 'gc.enable', ([], {}), '()\n', (5781, 5783), False, 'import gc\n'), ((5792, 5804), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5802, 5804), False, 'import gc\n'), ((2840, 2855), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2850, 2855), False, 'import time\n'), ((3278, 3312), 'netgen.geom2d.unit_square.GenerateMesh', 'unit_square.GenerateMesh', ([], {'maxh': '(0.1)'}), '(maxh=0.1)\n', (3302, 3312), False, 'from netgen.geom2d import unit_square\n'), ((4614, 4633), 'ngsolve.Draw', 'ngs.Draw', (['self._gfu'], {}), '(self._gfu)\n', (4622, 4633), True, 'import ngsolve as ngs\n'), ((4973, 4990), 'ngsolve.TaskManager', 'ngs.TaskManager', ([], {}), '()\n', (4988, 4990), True, 'import ngsolve as ngs\n'), ((5381, 5400), 'ngsolve.Draw', 'ngs.Draw', (['self._gfu'], {}), '(self._gfu)\n', (5389, 5400), True, 'import ngsolve as ngs\n'), ((5479, 5490), 'time.time', 'time.time', ([], {}), '()\n', (5488, 5490), False, 'import time\n'), ((6022, 6042), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (6030, 6042), True, 'import numpy as np\n'), ((6131, 6151), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (6139, 6151), True, 'import numpy as np\n'), ((3710, 3721), 'ngsolve.grad', 'ngs.grad', (['u'], {}), '(u)\n', (3718, 3721), True, 'import ngsolve as ngs\n'), ((3722, 3733), 'ngsolve.grad', 'ngs.grad', (['v'], {}), '(v)\n', (3730, 3733), True, 'import ngsolve as ngs\n'), ((6343, 6363), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (6351, 6363), True, 'import numpy as np\n'), ((6422, 6442), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (6430, 6442), True, 'import numpy as np\n'), ((6947, 6963), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (6955, 6963), True, 'import numpy as np\n'), ((7460, 7476), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (7468, 7476), True, 'import numpy as np\n'), ((6979, 6990), 'numpy.ravel', 'np.ravel', (['X'], {}), '(X)\n', (6987, 6990), True, 'import numpy as np\n'), ((6992, 7003), 'numpy.ravel', 'np.ravel', (['Y'], {}), '(Y)\n', (7000, 7003), True, 'import numpy as np\n'), ((7492, 7503), 'numpy.ravel', 'np.ravel', (['X'], {}), '(X)\n', (7500, 7503), True, 'import numpy as np\n'), ((7505, 7516), 'numpy.ravel', 'np.ravel', (['Y'], {}), '(Y)\n', (7513, 7516), True, 'import numpy as np\n')]
|
"""Hyper-distributions."""
from libqif.core.secrets import Secrets
from libqif.core.channel import Channel
from numpy import array, arange, zeros
from numpy import delete as npdelete
class Hyper:
def __init__(self, channel):
"""Hyper-distribution. To create an instance of this class it is
class it is necessary to have an instance of :py:class:`.Channel`
class. Once created an instance of :py:class:`.Hyper`, the constructor
generates the joint, outer and inner distributions.
Attributes
----------
channel : core.Channel
Channel object.
joint : numpy.ndarray
Matrix of joint distribution.
outer : numpy.ndarray
Outer distribution.
inners : numpy.ndarray
Matrix of inner distributions.
num_posteriors : int
Number of posterior distributions resulted by reducing the
hyper-distribution, i.e., remove columns that contains only
zeros and merge columns which one of them a linear combination
of the other.
Parameters
----------
channel : core.Channel
Channel object.
"""
self._check_types(channel)
self.channel = channel
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def update_prior(self, prior):
"""Update the prior distribution on set of secrets.
The number of secrets must match the current number of rows of the channel.
Parameters
----------
prior : list, numpy.ndarray
Prior distribution on the set of secrets. prior[i] is the
probability of secret named labels[i] beeing the real secret.
"""
self.channel.update_prior(prior)
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def _check_types(self, channel):
if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))):
raise TypeError('The parameter \'channel\' must be a core.channel.Channel object')
def _generate_joint_distribution(self):
joint = []
channel_t = self.channel.matrix.T
for i in arange(self.channel.num_outputs):
joint.append(self.channel.secrets.prior * channel_t[i])
return array(joint).T
def _generate_posteriors(self):
joint_t = self.joint.T.copy()
outer = []
for i in arange(self.channel.num_outputs):
outer.append(joint_t[i].sum())
if outer[i] > 0:
joint_t[i] = joint_t[i]/outer[i]
return array(outer), joint_t.T
def _reduce_hyper(self):
"""Given the hyper-distribution generated by _generate_posteriors
remove columns with zeros and merge columns that are a linear
combination of others. Thus algorithm has time complexity of O(n*m^2)
where n is the number of secrets and m is the number of outputs in
the.
"""
epsilon = 10**(-6)
# Delete inners that have 0 probability of occuring
zero_prob = self.outer < epsilon
self.outer = npdelete(self.outer, zero_prob, 0)
self.inners = npdelete(self.inners, zero_prob, 1)
delete_inner = [False] * len(self.outer)
for i in arange(self.inners.shape[1]):
for j in arange(i+1, self.inners.shape[1]):
# Check if inner i is equal to inner j
if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets:
delete_inner[j] = True # Delete inner j
self.outer[i] += self.outer[j] # Merge inner j into inner i
self.outer = npdelete(self.outer, delete_inner, 0)
self.inners = npdelete(self.inners, delete_inner, 1)
|
[
"numpy.delete",
"numpy.array",
"libqif.core.secrets.Secrets",
"numpy.arange"
] |
[((2486, 2518), 'numpy.arange', 'arange', (['self.channel.num_outputs'], {}), '(self.channel.num_outputs)\n', (2492, 2518), False, 'from numpy import array, arange, zeros\n'), ((2742, 2774), 'numpy.arange', 'arange', (['self.channel.num_outputs'], {}), '(self.channel.num_outputs)\n', (2748, 2774), False, 'from numpy import array, arange, zeros\n'), ((3447, 3481), 'numpy.delete', 'npdelete', (['self.outer', 'zero_prob', '(0)'], {}), '(self.outer, zero_prob, 0)\n', (3455, 3481), True, 'from numpy import delete as npdelete\n'), ((3504, 3539), 'numpy.delete', 'npdelete', (['self.inners', 'zero_prob', '(1)'], {}), '(self.inners, zero_prob, 1)\n', (3512, 3539), True, 'from numpy import delete as npdelete\n'), ((3607, 3635), 'numpy.arange', 'arange', (['self.inners.shape[1]'], {}), '(self.inners.shape[1])\n', (3613, 3635), False, 'from numpy import array, arange, zeros\n'), ((4025, 4062), 'numpy.delete', 'npdelete', (['self.outer', 'delete_inner', '(0)'], {}), '(self.outer, delete_inner, 0)\n', (4033, 4062), True, 'from numpy import delete as npdelete\n'), ((4085, 4123), 'numpy.delete', 'npdelete', (['self.inners', 'delete_inner', '(1)'], {}), '(self.inners, delete_inner, 1)\n', (4093, 4123), True, 'from numpy import delete as npdelete\n'), ((2604, 2616), 'numpy.array', 'array', (['joint'], {}), '(joint)\n', (2609, 2616), False, 'from numpy import array, arange, zeros\n'), ((2921, 2933), 'numpy.array', 'array', (['outer'], {}), '(outer)\n', (2926, 2933), False, 'from numpy import array, arange, zeros\n'), ((3658, 3693), 'numpy.arange', 'arange', (['(i + 1)', 'self.inners.shape[1]'], {}), '(i + 1, self.inners.shape[1])\n', (3664, 3693), False, 'from numpy import array, arange, zeros\n'), ((2211, 2240), 'libqif.core.secrets.Secrets', 'Secrets', (["['x1', 'x2']", '[1, 0]'], {}), "(['x1', 'x2'], [1, 0])\n", (2218, 2240), False, 'from libqif.core.secrets import Secrets\n'), ((2248, 2265), 'numpy.array', 'array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (2253, 2265), False, 'from numpy import array, arange, zeros\n')]
|
import sys
import pytz
#import xml.utils.iso8601
import time
import numpy
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt
from exchange import cb_exchange as cb_exchange
from exchange import CoinbaseExchangeAuth
from abc import ABCMeta, abstractmethod
class strategy(object):
"""`strategy` defines an abstract base strategy class. Minimum required to create a strategy is a file with a class which inherits from strategy containing a backtest_strategy function. As a bonus, strategy includes utility functions like calculate_historic_data.
"""
__metaclass__ = ABCMeta
def __init__(name="default name", interval=5):
"""Constructor for an abstract strategy. You can modify it as needed.
\n`interval`: a.k.a timeslice the amount of time in seconds for each 'tick' default is 5
\n`name`: a string name for the strategy
"""
self.name = name
self.interval = interval
self.times_recalculated = 0
@abstractmethod
def trade(self, timeslice):
"""Perform operations on a timeslice.
\n`timeslice`: a section of trade data with time length equal to the strategy's interval, formatted as follows:
\n[time, low, high, open, close, volume]
"""
return
def backtest_strategy(self, historic_data):
"""Returns performance of a strategy vs market performance.
"""
# Reverse the data since Coinbase returns it in reverse chronological
# now historic_data strarts with the oldest entry
historic_data = list(reversed(historic_data))
earliest_time = float(historic_data[0][0])
latest_time = float(historic_data[-1][0])
start_price = float(historic_data[0][4])
end_price = float(historic_data[-1][4])
market_performance = ((end_price-start_price)/start_price)*100
print("Running simulation on historic data. This may take some time....")
for timeslice in historic_data:
# Display what percent through the data we are
idx = historic_data.index(timeslice)
percent = (float(idx)/float(len(historic_data)))*100 + 1
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
self.trade(timeslice)
# Calculate performance
end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc)
end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal)
start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc)
strategy_performance = ((end_amt-start_amt)/start_amt)*100
print("\n")
print("Times recalculated: "+str(self.times_recalculated))
print("Times bought: "+str(self.exchange.times_bought))
print("Times sold: "+str(self.exchange.times_sold))
print("The Market's performance: "+str(market_performance)+" %")
print("Strategy's performance: "+str(strategy_performance)+" %")
print("Account's ending value if no trades were made: "+str(end_amt_no_trades)+" BTC")
print("Account's ending value with this strategy: "+str(end_amt)+" BTC")
strategy_performance_vs_market = strategy_performance - market_performance
if strategy_performance > market_performance:
print("Congratulations! This strategy has beat the market by: "+str(strategy_performance_vs_market)+" %")
elif strategy_performance < market_performance:
print("This strategy has preformed: "+str(strategy_performance_vs_market)+" % worse than market.")
return strategy_performance_vs_market, strategy_performance, market_performance
@staticmethod
def calculate_historic_data(data, pivot):
"""Returns average price weighted according to volume, and the number of bitcoins traded
above and below a price point, called a pivot.\n
\npivot: the price used for returning volume above and below
\ndata: a list of lists formated as follows [time, low, high, open, close]
\n[
\n\t["2014-11-07 22:19:28.578544+00", "0.32", "4.2", "0.35", "4.2", "12.3"],
\n\t\t...
\n]
"""
price_list = []
weights = []
if data is None:
pass
min_price = float(data[0][1])
max_price = float(data[0][2])
discrete_prices = {}
for timeslice in data:
timeslice = [float(i) for i in timeslice]
if max_price < timeslice[2]:
max_prie = timeslice[2]
if min_price > timeslice[1]:
min_price = timeslice[1]
closing_price = timeslice[4]
volume = timeslice[5]
if closing_price not in discrete_prices.keys():
discrete_prices[str(closing_price)] = volume
else:
discrete[str(closing_price)] += volume
idx = data.index(timeslice)
price_list.append(closing_price)
weights.append(volume)
fltprices = [float(i) for i in discrete_prices.keys()]
fltvolumes = [float(i) for i in discrete_prices.values()]
np_discrete_prices = numpy.array(fltprices)
np_volume_per_price = numpy.array(fltvolumes)
weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price)
num_above = 0
num_below = 0
num_at = 0
for key in discrete_prices.keys():
value = discrete_prices[key]
if float(key) > pivot:
num_above+=value
elif float(key) < pivot:
num_below+=value
elif float(key) == pivot:
num_at+=value
total_volume = 0.0
for volume in fltvolumes:
total_volume+=volume
fltprops = []
for volume in fltvolumes:
fltprops.append((volume/total_volume))
#print("num_below: "+str(num_below))
#print("num_above: "+str(num_above))
#print("num_at: "+str(num_at))
#print("weighted_average: "+str(weighted_avg))
#plt.title("Price distribution")
#plt.xlabel("Price (USD)")
#plt.ylabel("Volume")
#plt.bar(fltprices, fltprops)
#plt.show()
return weighted_avg, num_above, num_below
|
[
"sys.stdout.write",
"numpy.array",
"sys.stdout.flush",
"numpy.average"
] |
[((5274, 5296), 'numpy.array', 'numpy.array', (['fltprices'], {}), '(fltprices)\n', (5285, 5296), False, 'import numpy\n'), ((5327, 5350), 'numpy.array', 'numpy.array', (['fltvolumes'], {}), '(fltvolumes)\n', (5338, 5350), False, 'import numpy\n'), ((5374, 5436), 'numpy.average', 'numpy.average', (['np_discrete_prices'], {'weights': 'np_volume_per_price'}), '(np_discrete_prices, weights=np_volume_per_price)\n', (5387, 5436), False, 'import numpy\n'), ((2197, 2233), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%d%%' % percent)"], {}), "('\\r%d%%' % percent)\n", (2213, 2233), False, 'import sys\n'), ((2246, 2264), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2262, 2264), False, 'import sys\n')]
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFlatten(unittest.TestCase):
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.g_shape = (numpy.prod((1,) + self.shape),)
self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.flatten(x)
self.assertEqual(y.shape, self.g_shape)
self.assertEqual(y.dtype, self.dtype)
testing.assert_allclose(self.x.flatten(), y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, g_data):
gradient_check.check_backward(
functions.Flatten(), x_data, g_data, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g))
testing.run_module(__name__, __file__)
|
[
"numpy.prod",
"chainer.Variable",
"chainer.testing.run_module",
"chainer.testing.product",
"chainer.functions.Flatten",
"chainer.functions.flatten",
"numpy.random.uniform",
"chainer.cuda.to_gpu"
] |
[((1406, 1444), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (1424, 1444), False, 'from chainer import testing\n'), ((678, 702), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (694, 702), False, 'import chainer\n'), ((715, 735), 'chainer.functions.flatten', 'functions.flatten', (['x'], {}), '(x)\n', (732, 735), False, 'from chainer import functions\n'), ((222, 323), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64]}"], {}), "({'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.\n float32, numpy.float64]})\n", (237, 323), False, 'from chainer import testing\n'), ((518, 547), 'numpy.prod', 'numpy.prod', (['((1,) + self.shape)'], {}), '((1,) + self.shape)\n', (528, 547), False, 'import numpy\n'), ((1031, 1050), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1042, 1050), False, 'from chainer import cuda\n'), ((1150, 1169), 'chainer.functions.Flatten', 'functions.Flatten', ([], {}), '()\n', (1167, 1169), False, 'from chainer import functions\n'), ((1362, 1381), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1373, 1381), False, 'from chainer import cuda\n'), ((1383, 1402), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.g'], {}), '(self.g)\n', (1394, 1402), False, 'from chainer import cuda\n'), ((435, 474), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (455, 474), False, 'import numpy\n'), ((567, 608), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.g_shape'], {}), '(-1, 1, self.g_shape)\n', (587, 608), False, 'import numpy\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: <NAME> <<EMAIL>>
# All Rights Reserved
import os
import logging
import pickle
import multiprocessing
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from vbdiar.features.segments import get_frames_from_time
from vbdiar.embeddings.embedding import extract_embeddings
from vbdiar.utils import mkdir_p
from vbdiar.utils.utils import Utils
logger = logging.getLogger(__name__)
def process_files(fns, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):
"""
Args:
fns:
speakers_dict:
features_extractor:
embedding_extractor:
audio_dir:
wav_suffix:
in_rttm_dir:
rttm_suffix:
min_length:
n_jobs:
Returns:
"""
kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,
embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,
in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)
if n_jobs == 1:
ret = _process_files((fns, kwargs))
else:
pool = multiprocessing.Pool(n_jobs)
ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))
return ret
def _process_files(dargs):
"""
Args:
dargs:
Returns:
"""
fns, kwargs = dargs
ret = []
for fn in fns:
ret.append(process_file(file_name=fn, **kwargs))
return ret
def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):
""" Extract embeddings for all defined speakers.
Args:
file_name (string_types): path to input audio file
speakers_dict (dict): dictionary containing all embedding across speakers
features_extractor (Any):
embedding_extractor (Any):
audio_dir (string_types):
wav_suffix (string_types):
in_rttm_dir (string_types):
rttm_suffix (string_types):
min_length (float):
Returns:
dict: updated dictionary with speakers
"""
logger.info('Processing file `{}`.'.format(file_name.split()[0]))
# extract features from whole audio
features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))
# process utterances of the speakers
features_dict = {}
with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:
for line in f:
start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)
speaker = line.split()[7]
if dur > min_length:
end_time = start_time + dur
start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))
if speaker not in features_dict:
features_dict[speaker] = {}
assert 0 <= start < end, \
f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'
if end >= features.shape[0]:
end = features.shape[0] - 1
features_dict[speaker][(start_time, end_time)] = features[start:end]
for speaker in features_dict:
embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)
embeddings_long = embedding_set.get_all_embeddings()
if speaker not in speakers_dict.keys():
speakers_dict[speaker] = embeddings_long
else:
speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)
return speakers_dict
class Normalization(object):
""" Speaker normalization S-Norm. """
embeddings = None
in_emb_dir = None
def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None,
out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None,
plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1):
""" Initialize normalization object.
Args:
norm_list (string_types): path to normalization list
audio_dir (string_types|None): path to audio directory
in_rttm_dir (string_types|None): path to directory with rttm files
in_emb_dir (str|None): path to directory with i-vectors
out_emb_dir (str|None): path to directory for storing embeddings
min_length (int): minimal length for extracting embeddings
features_extractor (Any): object for feature extraction
embedding_extractor (Any): object for extracting embedding
plda (PLDA|None): plda model object
wav_suffix (string_types): suffix of wav files
rttm_suffix (string_types): suffix of rttm files
"""
if audio_dir:
self.audio_dir = os.path.abspath(audio_dir)
self.norm_list = norm_list
if in_rttm_dir:
self.in_rttm_dir = os.path.abspath(in_rttm_dir)
else:
raise ValueError('It is required to have input rttm files for normalization.')
self.features_extractor = features_extractor
self.embedding_extractor = embedding_extractor
self.plda = plda
self.wav_suffix = wav_suffix
self.rttm_suffix = rttm_suffix
if in_emb_dir:
self.in_emb_dir = os.path.abspath(in_emb_dir)
if out_emb_dir:
self.out_emb_dir = os.path.abspath(out_emb_dir)
self.min_length = min_length
self.n_jobs = n_jobs
if self.in_emb_dir is None:
self.embeddings = self.extract_embeddings()
else:
self.embeddings = self.load_embeddings()
self.mean = np.mean(self.embeddings, axis=0)
def __iter__(self):
current = 0
while current < len(self.embeddings):
yield self.embeddings[current]
current += 1
def __getitem__(self, key):
return self.embeddings[key]
def __setitem__(self, key, value):
self.embeddings[key] = value
def __len__(self):
return len(self.embeddings)
def extract_embeddings(self):
""" Extract normalization embeddings using averaging.
Returns:
Tuple[np.array, np.array]: vectors for individual speakers, global mean over all speakers
"""
speakers_dict, fns = {}, []
with open(self.norm_list) as f:
for line in f:
if len(line.split()) > 1: # number of speakers is defined
line = line.split()[0]
else:
line = line.replace(os.linesep, '')
fns.append(line)
speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor,
embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir,
wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir,
rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs)
assert len(speakers_dict) == len(fns)
# all are the same
merged_speakers_dict = speakers_dict[0]
if self.out_emb_dir:
for speaker in merged_speakers_dict:
out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl')
mkdir_p(os.path.dirname(out_path))
with open(out_path, 'wb') as f:
pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)
for speaker in merged_speakers_dict:
merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0)
return np.array(list(merged_speakers_dict.values()))
def load_embeddings(self):
""" Load normalization embeddings from pickle files.
Returns:
np.array: embeddings per speaker
"""
embeddings, speakers = [], set()
with open(self.norm_list) as f:
for file_name in f:
if len(file_name.split()) > 1: # number of speakers is defined
file_name = file_name.split()[0]
else:
file_name = file_name.replace(os.linesep, '')
with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp:
for line in fp:
speakers.add(line.split()[7])
logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir))
for speaker in speakers:
embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker))
if os.path.isfile(embedding_path):
logger.info('Loading normalization pickle file `{}`.'.format(speaker))
with open(embedding_path, 'rb') as f:
# append mean from speaker's embeddings
speaker_embeddings = pickle.load(f)
embeddings.append(np.mean(speaker_embeddings, axis=0))
else:
logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir))
return np.array(embeddings)
def s_norm(self, test, enroll):
""" Run speaker normalization (S-Norm) on cached embeddings.
Args:
test (np.array): test embedding
enroll (np.array): enroll embedding
Returns:
float: hypothesis
"""
if self.plda:
a = self.plda.score(test, self.embeddings).T
b = self.plda.score(enroll, self.embeddings).T
c = self.plda.score(enroll, test).T
else:
a = cosine_similarity(test, self.embeddings).T
b = cosine_similarity(enroll, self.embeddings).T
c = cosine_similarity(enroll, test).T
scores = []
for ii in range(test.shape[0]):
test_scores = []
for jj in range(enroll.shape[0]):
test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii])
enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj])
s = c[ii][jj]
test_scores.append((((s - test_mean) / test_std + (s - enroll_mean) / enroll_std) / 2))
scores.append(test_scores)
return np.array(scores)
|
[
"logging.getLogger",
"numpy.mean",
"pickle.dump",
"sklearn.metrics.pairwise.cosine_similarity",
"vbdiar.embeddings.embedding.extract_embeddings",
"numpy.std",
"os.path.join",
"pickle.load",
"os.path.isfile",
"numpy.array",
"os.path.dirname",
"multiprocessing.Pool",
"numpy.concatenate",
"vbdiar.utils.utils.Utils.partition",
"os.path.abspath"
] |
[((488, 515), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (505, 515), False, 'import logging\n'), ((1308, 1336), 'multiprocessing.Pool', 'multiprocessing.Pool', (['n_jobs'], {}), '(n_jobs)\n', (1328, 1336), False, 'import multiprocessing\n'), ((3560, 3623), 'vbdiar.embeddings.embedding.extract_embeddings', 'extract_embeddings', (['features_dict[speaker]', 'embedding_extractor'], {}), '(features_dict[speaker], embedding_extractor)\n', (3578, 3623), False, 'from vbdiar.embeddings.embedding import extract_embeddings\n'), ((6040, 6072), 'numpy.mean', 'np.mean', (['self.embeddings'], {'axis': '(0)'}), '(self.embeddings, axis=0)\n', (6047, 6072), True, 'import numpy as np\n'), ((9540, 9560), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (9548, 9560), True, 'import numpy as np\n'), ((10675, 10691), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (10683, 10691), True, 'import numpy as np\n'), ((3837, 3902), 'numpy.concatenate', 'np.concatenate', (['(speakers_dict[speaker], embeddings_long)'], {'axis': '(0)'}), '((speakers_dict[speaker], embeddings_long), axis=0)\n', (3851, 3902), True, 'import numpy as np\n'), ((5170, 5196), 'os.path.abspath', 'os.path.abspath', (['audio_dir'], {}), '(audio_dir)\n', (5185, 5196), False, 'import os\n'), ((5287, 5315), 'os.path.abspath', 'os.path.abspath', (['in_rttm_dir'], {}), '(in_rttm_dir)\n', (5302, 5315), False, 'import os\n'), ((5683, 5710), 'os.path.abspath', 'os.path.abspath', (['in_emb_dir'], {}), '(in_emb_dir)\n', (5698, 5710), False, 'import os\n'), ((5766, 5794), 'os.path.abspath', 'os.path.abspath', (['out_emb_dir'], {}), '(out_emb_dir)\n', (5781, 5794), False, 'import os\n'), ((7994, 8040), 'numpy.mean', 'np.mean', (['merged_speakers_dict[speaker]'], {'axis': '(0)'}), '(merged_speakers_dict[speaker], axis=0)\n', (8001, 8040), True, 'import numpy as np\n'), ((9037, 9067), 'os.path.isfile', 'os.path.isfile', (['embedding_path'], {}), '(embedding_path)\n', (9051, 9067), False, 'import os\n'), ((7665, 7713), 'os.path.join', 'os.path.join', (['self.out_emb_dir', 'f"""{speaker}.pkl"""'], {}), "(self.out_emb_dir, f'{speaker}.pkl')\n", (7677, 7713), False, 'import os\n'), ((10050, 10090), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['test', 'self.embeddings'], {}), '(test, self.embeddings)\n', (10067, 10090), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((10109, 10151), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['enroll', 'self.embeddings'], {}), '(enroll, self.embeddings)\n', (10126, 10151), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((10170, 10201), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['enroll', 'test'], {}), '(enroll, test)\n', (10187, 10201), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((1404, 1432), 'vbdiar.utils.utils.Utils.partition', 'Utils.partition', (['fns', 'n_jobs'], {}), '(fns, n_jobs)\n', (1419, 1432), False, 'from vbdiar.utils.utils import Utils\n'), ((2645, 2681), 'os.path.join', 'os.path.join', (['in_rttm_dir', 'file_name'], {}), '(in_rttm_dir, file_name)\n', (2657, 2681), False, 'import os\n'), ((7738, 7763), 'os.path.dirname', 'os.path.dirname', (['out_path'], {}), '(out_path)\n', (7753, 7763), False, 'import os\n'), ((7833, 7903), 'pickle.dump', 'pickle.dump', (['merged_speakers_dict[speaker]', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)\n', (7844, 7903), False, 'import pickle\n'), ((9311, 9325), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9322, 9325), False, 'import pickle\n'), ((10377, 10393), 'numpy.mean', 'np.mean', (['a.T[ii]'], {}), '(a.T[ii])\n', (10384, 10393), True, 'import numpy as np\n'), ((10395, 10410), 'numpy.std', 'np.std', (['a.T[ii]'], {}), '(a.T[ii])\n', (10401, 10410), True, 'import numpy as np\n'), ((10453, 10469), 'numpy.mean', 'np.mean', (['b.T[jj]'], {}), '(b.T[jj])\n', (10460, 10469), True, 'import numpy as np\n'), ((10471, 10486), 'numpy.std', 'np.std', (['b.T[jj]'], {}), '(b.T[jj])\n', (10477, 10486), True, 'import numpy as np\n'), ((9364, 9399), 'numpy.mean', 'np.mean', (['speaker_embeddings'], {'axis': '(0)'}), '(speaker_embeddings, axis=0)\n', (9371, 9399), True, 'import numpy as np\n'), ((8645, 8686), 'os.path.join', 'os.path.join', (['self.in_rttm_dir', 'file_name'], {}), '(self.in_rttm_dir, file_name)\n', (8657, 8686), False, 'import os\n')]
|
import numpy as np
import os
import logging
from sklearn.model_selection import train_test_split
DATASET_ROOT_FOLDER = os.path.abspath('datasets')
class DataLoader:
train = None
validation = None
test = None
mode = None
partial_dataset = None
@staticmethod
def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000):
if train_path is not None:
DataLoader.train = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length)
if validation_path is not None:
DataLoader.validation = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length)
elif train_validation_split_point is not None and train_validation_split_point > 0:
if DataLoader.mode is None or DataLoader.partial_dataset is not None:
train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8)
splited_train = {
'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :],
'labels': DataLoader.train['labels'][0:train_validation_split_point]
}
splited_validation = {
'images': DataLoader.train['images'][train_validation_split_point:, :, :, :],
'labels': DataLoader.train['labels'][train_validation_split_point:]
}
DataLoader.train = splited_train
DataLoader.validation = splited_validation
if test_path is not None:
DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length)
logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape)))
logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape)))
logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape)))
return DataLoader
@staticmethod
def get_training_data():
"""
get training data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.train.images
labels = DataLoader.train.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_validation_data():
"""
get validation data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.validation.images
labels = DataLoader.validation.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_test_data():
"""
get test data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.test.images
labels = DataLoader.test.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def load_image_data_with_label_at_end(path, height, length):
data = np.loadtxt(path)
if DataLoader.mode is None:
data = data[0:1000, :]
elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1:
# randomly pick partial dataset
cut_point = int(data.shape[0] * DataLoader.partial_dataset)
indices = np.random.permutation(data.shape[0])
training_idx= indices[:cut_point]
data = data[training_idx, :]
images = data[:, 0:-1]
labels = data[:, -1]
images = np.reshape(images, [images.shape[0], height, length, 1], order='F')
return {
'images': images,
'labels': labels
}
|
[
"numpy.reshape",
"os.path.join",
"os.path.abspath",
"numpy.loadtxt",
"numpy.random.permutation"
] |
[((120, 147), 'os.path.abspath', 'os.path.abspath', (['"""datasets"""'], {}), "('datasets')\n", (135, 147), False, 'import os\n'), ((3261, 3277), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (3271, 3277), True, 'import numpy as np\n'), ((3810, 3877), 'numpy.reshape', 'np.reshape', (['images', '[images.shape[0], height, length, 1]'], {'order': '"""F"""'}), "(images, [images.shape[0], height, length, 1], order='F')\n", (3820, 3877), True, 'import numpy as np\n'), ((540, 585), 'os.path.join', 'os.path.join', (['DATASET_ROOT_FOLDER', 'train_path'], {}), '(DATASET_ROOT_FOLDER, train_path)\n', (552, 585), False, 'import os\n'), ((755, 805), 'os.path.join', 'os.path.join', (['DATASET_ROOT_FOLDER', 'validation_path'], {}), '(DATASET_ROOT_FOLDER, validation_path)\n', (767, 805), False, 'import os\n'), ((1765, 1809), 'os.path.join', 'os.path.join', (['DATASET_ROOT_FOLDER', 'test_path'], {}), '(DATASET_ROOT_FOLDER, test_path)\n', (1777, 1809), False, 'import os\n'), ((3609, 3645), 'numpy.random.permutation', 'np.random.permutation', (['data.shape[0]'], {}), '(data.shape[0])\n', (3630, 3645), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from warp_mls import WarpMLS
def distort(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut // 3
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def stretch(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut * 4 // 5
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def perspective(src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
|
[
"warp_mls.WarpMLS",
"numpy.random.randint",
"numpy.arange"
] |
[((934, 958), 'numpy.arange', 'np.arange', (['(1)', 'segment', '(1)'], {}), '(1, segment, 1)\n', (943, 958), True, 'import numpy as np\n'), ((1373, 1417), 'warp_mls.WarpMLS', 'WarpMLS', (['src', 'src_pts', 'dst_pts', 'img_w', 'img_h'], {}), '(src, src_pts, dst_pts, img_w, img_h)\n', (1380, 1417), False, 'from warp_mls import WarpMLS\n'), ((2014, 2038), 'numpy.arange', 'np.arange', (['(1)', 'segment', '(1)'], {}), '(1, segment, 1)\n', (2023, 2038), True, 'import numpy as np\n'), ((2309, 2353), 'warp_mls.WarpMLS', 'WarpMLS', (['src', 'src_pts', 'dst_pts', 'img_w', 'img_h'], {}), '(src, src_pts, dst_pts, img_w, img_h)\n', (2316, 2353), False, 'from warp_mls import WarpMLS\n'), ((2913, 2957), 'warp_mls.WarpMLS', 'WarpMLS', (['src', 'src_pts', 'dst_pts', 'img_w', 'img_h'], {}), '(src, src_pts, dst_pts, img_w, img_h)\n', (2920, 2957), False, 'from warp_mls import WarpMLS\n'), ((563, 588), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (580, 588), True, 'import numpy as np\n'), ((590, 615), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (607, 615), True, 'import numpy as np\n'), ((674, 699), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (691, 699), True, 'import numpy as np\n'), ((815, 840), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (832, 840), True, 'import numpy as np\n'), ((2056, 2081), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2073, 2081), True, 'import numpy as np\n'), ((2690, 2715), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2707, 2715), True, 'import numpy as np\n'), ((2746, 2771), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2763, 2771), True, 'import numpy as np\n'), ((647, 672), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (664, 672), True, 'import numpy as np\n'), ((731, 756), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (748, 756), True, 'import numpy as np\n'), ((766, 791), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (783, 791), True, 'import numpy as np\n'), ((850, 875), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (867, 875), True, 'import numpy as np\n'), ((2810, 2835), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2827, 2835), True, 'import numpy as np\n'), ((2870, 2895), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2887, 2895), True, 'import numpy as np\n'), ((1159, 1184), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1176, 1184), True, 'import numpy as np\n'), ((1093, 1118), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1110, 1118), True, 'import numpy as np\n'), ((1242, 1267), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1259, 1267), True, 'import numpy as np\n'), ((1316, 1341), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1333, 1341), True, 'import numpy as np\n')]
|
import logging
from typing import Dict, List, Optional
import numpy as np
import qiskit
from qiskit.circuit import Barrier, Delay, Reset
from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate,
PhaseGate, RXGate, RYGate, RZGate, U1Gate,
U2Gate, U3Gate, UGate)
from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate,
SGate, TdgGate, TGate,
ZGate)
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.basepasses import TransformationPass
logger = logging.getLogger(__name__)
class RemoveSmallRotations(TransformationPass):
"""Return a circuit with small rotation gates removed."""
def __init__(self, epsilon: float = 0, modulo2pi=False):
"""Remove all small rotations from a circuit
Args:
epsilon: Threshold for rotation angle to be removed
modulo2pi: If True, then rotations multiples of 2pi are removed as well
"""
super().__init__()
self.epsilon = epsilon
self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1))
self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2))
self.mod2pi = modulo2pi
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the pass on `dag`.
Args:
dag: input dag.
Returns:
Output dag with small rotations removed
"""
def modulo_2pi(x):
x = float(x)
return np.mod(x + np.pi, 2 * np.pi) - np.pi
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag1)
elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag2)
return dag
class RemoveDiagonalGatesAfterInput(TransformationPass):
"""Remove diagonal gates (including diagonal 2Q gates) at the start of a circuit.
Transpiler pass to remove diagonal gates (like RZ, T, Z, etc) at the start of a circuit.
Including diagonal 2Q gates. Nodes after a reset are also included.
"""
def run(self, dag):
"""Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
"""
diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate)
diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate)
nodes_to_remove = set()
for input_node in (dag.input_map.values()):
try:
successor = next(dag.quantum_successors(input_node))
except StopIteration:
continue
if successor.type == "op" and isinstance(successor.op, diagonal_1q_gates):
nodes_to_remove.add(successor)
def valid_predecessor(s):
""" Return True of node is valid predecessor for removal """
if s.type == 'in':
return True
if s.type == "op" and isinstance(s.op, Reset):
return True
return False
if successor.type == "op" and isinstance(successor.op, diagonal_2q_gates):
predecessors = dag.quantum_predecessors(successor)
if all(valid_predecessor(s) for s in predecessors):
nodes_to_remove.add(successor)
for node_to_remove in nodes_to_remove:
dag.remove_op_node(node_to_remove)
return dag
class DecomposeU(TransformationPass):
""" Decompose U gates into elementary rotations Rx, Ry, Rz
The U gates are decomposed using McKay decomposition.
"""
def __init__(self, verbose=0):
"""
Args:
"""
super().__init__()
self._subdags = []
self.verbose = verbose
self.initial_layout = None
def ugate_replacement_circuit(self, ugate):
qc = QuantumCircuit(1)
if isinstance(ugate, (U3Gate, UGate)):
theta, phi, lam = ugate.params
if theta == np.pi/2:
# a u2 gate
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
else:
# from https://arxiv.org/pdf/1707.03429.pdf
qc.rz(lam, 0)
qc.rx(np.pi / 2, 0)
qc.rz(theta + np.pi, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi, 0)
elif isinstance(ugate, U2Gate):
phi, lam = ugate.params
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
elif isinstance(ugate, (U1Gate, PhaseGate)):
lam, = ugate.params
qc.rz(lam, 0)
else:
raise Exception(f'unknown gate type {ugate}')
return qc
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input DAG.
Returns:
Output DAG where ``U`` gates have been decomposed.
"""
# Walk through the DAG and expand each node if required
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)):
subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op))
dag.substitute_node_with_dag(node, subdag)
return dag
class DecomposeCX(TransformationPass):
""" Decompose CX into CZ and single qubit rotations
"""
def __init__(self, mode: str = 'ry'):
"""
Args:
"""
super().__init__()
self._subdags: List = []
self.initial_layout = None
self.gate = qiskit.circuit.library.CXGate
self.decomposition = QuantumCircuit(2)
if mode == 'ry':
self.decomposition.ry(-np.pi / 2, 1)
self.decomposition.cz(0, 1)
self.decomposition.ry(np.pi / 2, 1)
else:
self.decomposition.h(1)
self.decomposition.cz(0, 1)
self.decomposition.h(1)
self._dag = circuit_to_dag(self.decomposition)
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input dag.
Returns:
output dag where ``CX`` was expanded.
"""
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
dag.substitute_node_with_dag(node, self._dag)
return dag
class SequentialPass(TransformationPass):
"""Adds barriers between gates to make the circuit sequential."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for node in dag.op_nodes():
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
logger.info('SequentialPass: adding node {node.name}')
if node.name in ['barrier', 'measure']:
continue
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class LinearTopologyParallelPass(TransformationPass):
"""Adds barriers to enforce a linear topology
The barrier are placed between gates such that no two qubit gates are executed
at the same time and only single qubit gates on non-neighboring qubits can
be executed in parallel. It assumes a linear topology."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for ii, layer in enumerate(dag.layers()):
gates_1q = []
gates_2q = []
other_gates = []
for node in layer['graph'].op_nodes():
if len(node.qargs) == 2:
gates_2q.append(node)
elif len(node.qargs) == 1:
gates_1q.append(node)
else:
logging.info(f'layer {ii}: other type of node {node}')
other_gates.append(node)
even = []
odd = []
for node in gates_1q:
if node.qargs[0].index % 2 == 0:
even.append(node)
else:
odd.append(node)
logging.info(
f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}')
if len(even) > 0:
for node in even:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
if len(odd) > 0:
for node in odd:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in gates_2q:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in other_gates:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class DelayPass(TransformationPass):
"""Adds delay gates when the qubits are idle.
For every layer of the circuit it finds the gate that
lasts the longest and applies appropriate delays on the
other qubits.
"""
def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None):
"""
Args:
gate_durations: Gate durations in the units of dt
"""
super().__init__()
self.gate_durations = gate_durations
self.delay_quantum = delay_quantum
def add_delay_to_dag(self, duration, dag, qargs, cargs):
if self.delay_quantum:
number_of_delays = int(duration/self.delay_quantum)
for ii in range(number_of_delays):
dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs)
else:
dag.apply_operation_back(Delay(duration), qargs, cargs)
@staticmethod
def _determine_delay_target_qubits(dag, layer):
""" Determine qubits in specified layer which require a delay gate """
partition = layer['partition']
lst = list(dag.qubits)
for el in partition:
for q in el:
if q in lst:
lst.remove(q)
return lst
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for layer_idx, layer in enumerate(dag.layers()):
max_duration = 0
durations = {}
for node in layer['graph'].op_nodes():
if node.name in self.gate_durations:
max_duration = max(max_duration, self.gate_durations[node.name])
for q in node.qargs:
durations[q] = self.gate_durations[node.name]
else:
logger.info('layer {layer_idx}, could not find duration for node {node.name}')
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
partition = layer['partition']
if len(partition) == 0:
continue
lst = DelayPass._determine_delay_target_qubits(dag, layer)
logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}')
for el in lst:
logger.info(f'apply_operation_back: {[el]}')
self.add_delay_to_dag(max_duration, new_dag, [el], [])
for q in durations:
if max_duration - durations[q] > 0:
self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], [])
return new_dag
|
[
"logging.getLogger",
"qiskit.circuit.quantumcircuit.QuantumCircuit",
"qiskit.circuit.Delay",
"numpy.abs",
"logging.info",
"qiskit.dagcircuit.DAGCircuit",
"qiskit.converters.circuit_to_dag.circuit_to_dag",
"numpy.mod"
] |
[((785, 812), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (802, 812), False, 'import logging\n'), ((5004, 5021), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (5018, 5021), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((6869, 6886), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (6883, 6886), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((7196, 7230), 'qiskit.converters.circuit_to_dag.circuit_to_dag', 'circuit_to_dag', (['self.decomposition'], {}), '(self.decomposition)\n', (7210, 7230), False, 'from qiskit.converters.circuit_to_dag import circuit_to_dag\n'), ((7790, 7802), 'qiskit.dagcircuit.DAGCircuit', 'DAGCircuit', ([], {}), '()\n', (7800, 7802), False, 'from qiskit.dagcircuit import DAGCircuit\n'), ((8704, 8716), 'qiskit.dagcircuit.DAGCircuit', 'DAGCircuit', ([], {}), '()\n', (8714, 8716), False, 'from qiskit.dagcircuit import DAGCircuit\n'), ((12222, 12234), 'qiskit.dagcircuit.DAGCircuit', 'DAGCircuit', ([], {}), '()\n', (12232, 12234), False, 'from qiskit.dagcircuit import DAGCircuit\n'), ((1333, 1350), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (1347, 1350), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((1412, 1429), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (1426, 1429), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((1742, 1770), 'numpy.mod', 'np.mod', (['(x + np.pi)', '(2 * np.pi)'], {}), '(x + np.pi, 2 * np.pi)\n', (1748, 1770), True, 'import numpy as np\n'), ((11792, 11807), 'qiskit.circuit.Delay', 'Delay', (['duration'], {}), '(duration)\n', (11797, 11807), False, 'from qiskit.circuit import Barrier, Delay, Reset\n'), ((11700, 11725), 'qiskit.circuit.Delay', 'Delay', (['self.delay_quantum'], {}), '(self.delay_quantum)\n', (11705, 11725), False, 'from qiskit.circuit import Barrier, Delay, Reset\n'), ((2203, 2214), 'numpy.abs', 'np.abs', (['phi'], {}), '(phi)\n', (2209, 2214), True, 'import numpy as np\n'), ((9261, 9315), 'logging.info', 'logging.info', (['f"""layer {ii}: other type of node {node}"""'], {}), "(f'layer {ii}: other type of node {node}')\n", (9273, 9315), False, 'import logging\n'), ((2691, 2702), 'numpy.abs', 'np.abs', (['phi'], {}), '(phi)\n', (2697, 2702), True, 'import numpy as np\n')]
|
def help():
return '''
Isotropic-Anisotropic Filtering Norm Nesterov Algorithm
Solves the filtering norm minimization + quadratic term problem
Nesterov algorithm, with continuation:
argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta
If no filter is provided, solves the L1.
Continuation is performed by sequentially applying Nesterov's algorithm
with a decreasing sequence of values of mu0 >= mu >= muf
The observation matrix A must be a projector (non projector not implemented yet)
Inputs:
IAFNNESTA(b, #Observed data, a m x 1 array
A=identity,At=identity, # measurement matrix and adjoint (either a matrix, function handles)
muf=0.0001, #final mu value, smaller leads to higher accuracy
delta, #l2 error bound. This enforces how close the variable
#must fit the observations b, i.e. || y - Ax ||_2 <= delta
#If delta = 0, enforces y = Ax
#delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise).
L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms
verbose=0, #whether to print internal steps
maxit=1000, #maximum iterations at the inner loop
x0=[], #initial solution, if not provided, will be At(b)
U=identity,Ut=identity, #Analysis/Synthesis operators
stopTest=1, #stopTest == 1 : stop when the relative change in the objective
function is less than TolVar
stopTest == 2 : stop with the l_infinity norm of difference in
the xk variable is less than TolVar
TolVar = 1e-5, #tolerance for the stopping criteria
AAtinv=[], #not implemented
normU=1, #if U is provided, this should be norm(U)
H=[],Ht=[]): #filter operations in sparse matrix form
#also accepts the string 'tv' as input,
#in that case, calculates the tv norm
Outputs:
return xk, #estimated x reconstructed signal
niter, #number of iterations
residuals #first column is the residual at every step,
#second column is the value of f_mu at every step
'''
import IAFNNesterov
import numpy as np
from scipy import sparse
import fil2mat
def identity(x):
return x
def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]):
if delta<0:
raise Exception('Delta must not be negative')
if not callable(A): #If not function
A=lambda x:np.matmul(A,x)
At=lambda x:np.matmul(np.transpose(A),x)
b=b.reshape((-1,1))
Atb=At(b)
if sig_size==0:
sig_size=Atb.shape
if callable(AAtinv):
AtAAtb = At( AAtinv(b) )
else:
if len(AAtinv)>0:
AAtinv=lambda x: np.matmul(AAtinv,x)
AtAAtb = At( AAtinv(b) )
else: #default
AtAAtb = Atb
AAtinv=identity
if len(x0)==0:
x0 = AtAAtb
if len(H)==0:
Hf=identity
Hft=identity
else:
if not sparse.issparse(H):
if isinstance(H, str):
if H=='tv':
hs=[]
hs.append(np.array([[1,-1]]))
hs.append(np.array([[1],[-1]]))
H,_,_,_=fil2mat.fil2mat(hs,sig_size)
else:
print('H not recognized. Must be a sparse matrix, a list of filters or the string tv')
else:
#list of filters:
H,_,_,_=fil2mat.fil2mat(H,sig_size)
#print(H.shape)
#print(H)
#print(type(H))
Ht=H.transpose()
Hf=lambda x: H@x
Hft=lambda x: Ht@x
HU=lambda x: Hf(U(x))
UtHt=lambda x: Ut(Hft(x))
typemin=''
if L1w>0:
typemin+="iso"
if L2w>0:
typemin+="aniso"
typemin+='tropic '
if callable(H):
typemin+='filtering norm '
mu0=0
if L1w>0:
mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1))
if L2w>0:
mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2))
niter = 0
Gamma = np.power(muf/mu0,1/MaxIntIter)
mu = mu0
Gammat= np.power(TolVar/0.1,1/MaxIntIter)
TolVar = 0.1
for i in range(MaxIntIter):
mu = mu*Gamma
TolVar=TolVar*Gammat;
if verbose>0:
#if k%verbose==0:
print("\tBeginning %s Minimization; mu = %g\n" %(typemin,mu))
xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft)
xplug = xk
niter = niter_int + niter
if i==0:
residuals=res
else:
residuals = np.vstack((residuals, res))
return xk.reshape(sig_size)
if __name__ == "__main__":
print(help())
|
[
"IAFNNesterov.IAFNNesterov",
"fil2mat.fil2mat",
"numpy.power",
"scipy.sparse.issparse",
"numpy.array",
"numpy.matmul",
"numpy.vstack",
"numpy.transpose"
] |
[((4800, 4835), 'numpy.power', 'np.power', (['(muf / mu0)', '(1 / MaxIntIter)'], {}), '(muf / mu0, 1 / MaxIntIter)\n', (4808, 4835), True, 'import numpy as np\n'), ((4856, 4894), 'numpy.power', 'np.power', (['(TolVar / 0.1)', '(1 / MaxIntIter)'], {}), '(TolVar / 0.1, 1 / MaxIntIter)\n', (4864, 4894), True, 'import numpy as np\n'), ((5171, 5383), 'IAFNNesterov.IAFNNesterov', 'IAFNNesterov.IAFNNesterov', (['b'], {'A': 'A', 'At': 'At', 'mu': 'mu', 'delta': 'delta', 'L1w': 'L1w', 'L2w': 'L2w', 'verbose': 'verbose', 'maxit': 'maxit', 'x0': 'x0', 'U': 'U', 'Ut': 'Ut', 'stopTest': 'stopTest', 'TolVar': 'TolVar', 'AAtinv': 'AAtinv', 'normU': 'normU', 'H': 'Hf', 'Ht': 'Hft'}), '(b, A=A, At=At, mu=mu, delta=delta, L1w=L1w, L2w=\n L2w, verbose=verbose, maxit=maxit, x0=x0, U=U, Ut=Ut, stopTest=stopTest,\n TolVar=TolVar, AAtinv=AAtinv, normU=normU, H=Hf, Ht=Hft)\n', (5196, 5383), False, 'import IAFNNesterov\n'), ((3145, 3160), 'numpy.matmul', 'np.matmul', (['A', 'x'], {}), '(A, x)\n', (3154, 3160), True, 'import numpy as np\n'), ((3706, 3724), 'scipy.sparse.issparse', 'sparse.issparse', (['H'], {}), '(H)\n', (3721, 3724), False, 'from scipy import sparse\n'), ((5503, 5530), 'numpy.vstack', 'np.vstack', (['(residuals, res)'], {}), '((residuals, res))\n', (5512, 5530), True, 'import numpy as np\n'), ((3190, 3205), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (3202, 3205), True, 'import numpy as np\n'), ((3431, 3451), 'numpy.matmul', 'np.matmul', (['AAtinv', 'x'], {}), '(AAtinv, x)\n', (3440, 3451), True, 'import numpy as np\n'), ((4199, 4227), 'fil2mat.fil2mat', 'fil2mat.fil2mat', (['H', 'sig_size'], {}), '(H, sig_size)\n', (4214, 4227), False, 'import fil2mat\n'), ((3945, 3974), 'fil2mat.fil2mat', 'fil2mat.fil2mat', (['hs', 'sig_size'], {}), '(hs, sig_size)\n', (3960, 3974), False, 'import fil2mat\n'), ((3845, 3864), 'numpy.array', 'np.array', (['[[1, -1]]'], {}), '([[1, -1]])\n', (3853, 3864), True, 'import numpy as np\n'), ((3895, 3916), 'numpy.array', 'np.array', (['[[1], [-1]]'], {}), '([[1], [-1]])\n', (3903, 3916), True, 'import numpy as np\n')]
|
import BboxToolkit as bt
import pickle
import copy
import numpy as np
path1="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl"
path2="/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl"#
with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl
data2 = pickle.load(f)
with open(path1,'rb') as f:
obbdets = pickle.load(f)
polydets=copy.deepcopy(obbdets)
for i in range(len(obbdets)):
for j in range(len(obbdets[0][1])):
data=obbdets[i][1][j]
if data.size!= 0:
polys=[]
for k in range(len(data)):
poly = bt.obb2poly(data[k][0:5])
poly=np.append(poly,data[k][5])
polys.append(poly)
else:
polys=[]
polydets[i][1][j]=polys
savepath="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/"
for i in range(len(polydets)):
txtfile=savepath+polydets[i][0]+".txt"
f = open(txtfile, "w")
for j in range(len(polydets[0][1])):
if polydets[i][1][j]!=[]:
for k in range(len(polydets[i][1][j])):
f.write(str(polydets[i][1][j][k][0])+" "+
str(polydets[i][1][j][k][1])+" "+
str(polydets[i][1][j][k][2])+" "+
str(polydets[i][1][j][k][3])+" "+
str(polydets[i][1][j][k][4])+" "+
str(polydets[i][1][j][k][5])+" "+
str(polydets[i][1][j][k][6])+" "+
str(polydets[i][1][j][k][7])+" "+
str(data2["cls"][j])+" "+
str(polydets[i][1][j][k][8])+"\n")
f.close()
|
[
"numpy.append",
"BboxToolkit.obb2poly",
"pickle.load",
"copy.deepcopy"
] |
[((344, 358), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (355, 358), False, 'import pickle\n'), ((402, 416), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (413, 416), False, 'import pickle\n'), ((430, 452), 'copy.deepcopy', 'copy.deepcopy', (['obbdets'], {}), '(obbdets)\n', (443, 452), False, 'import copy\n'), ((662, 687), 'BboxToolkit.obb2poly', 'bt.obb2poly', (['data[k][0:5]'], {}), '(data[k][0:5])\n', (673, 687), True, 'import BboxToolkit as bt\n'), ((709, 736), 'numpy.append', 'np.append', (['poly', 'data[k][5]'], {}), '(poly, data[k][5])\n', (718, 736), True, 'import numpy as np\n')]
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.tests.helper import pytest
from .. import FITSFigure
def test_grid_addremove():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.remove_grid()
f.add_grid()
f.close()
def test_grid_showhide():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.hide()
f.grid.show()
f.close()
def test_grid_spacing():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_xspacing(1.)
f.grid.set_xspacing('tick')
with pytest.raises(ValueError):
f.grid.set_xspacing('auto')
f.grid.set_yspacing(2.)
f.grid.set_yspacing('tick')
with pytest.raises(ValueError):
f.grid.set_yspacing('auto')
f.close()
def test_grid_color():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_color('black')
f.grid.set_color('#003344')
f.grid.set_color((1.0, 0.4, 0.3))
f.close()
def test_grid_alpha():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_alpha(0.0)
f.grid.set_alpha(0.3)
f.grid.set_alpha(1.0)
f.close()
def test_grid_linestyle():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linestyle('solid')
f.grid.set_linestyle('dashed')
f.grid.set_linestyle('dotted')
f.close()
def test_grid_linewidth():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linewidth(0)
f.grid.set_linewidth(2)
f.grid.set_linewidth(5)
f.close()
|
[
"matplotlib.use",
"numpy.zeros",
"astropy.tests.helper.pytest.raises"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((167, 185), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (175, 185), True, 'import numpy as np\n'), ((318, 336), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (326, 336), True, 'import numpy as np\n'), ((467, 485), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (475, 485), True, 'import numpy as np\n'), ((842, 860), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (850, 860), True, 'import numpy as np\n'), ((1053, 1071), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (1061, 1071), True, 'import numpy as np\n'), ((1246, 1264), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (1254, 1264), True, 'import numpy as np\n'), ((1465, 1483), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (1473, 1483), True, 'import numpy as np\n'), ((597, 622), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (610, 622), False, 'from astropy.tests.helper import pytest\n'), ((729, 754), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (742, 754), False, 'from astropy.tests.helper import pytest\n')]
|
# microsig
"""
Author: <NAME>
More detail about the MicroSIG can be found at:
Website:
https://gitlab.com/defocustracking/microsig-python
Publication:
Rossi M, Synthetic image generator for defocusing and astigmatic PIV/PTV, Meas. Sci. Technol., 31, 017003 (2020)
DOI:10.1088/1361-6501/ab42bb.
"""
import numpy as np
import imageio
import tkinter as tk
import os
from os import listdir
from os.path import isfile, basename, join, isdir
import sys
import glob
# import time as tm
from tkinter import filedialog
# ----- code adapted by <NAME> ------
# 2.0 define class
class CurlypivMicrosigCollection(object):
def __init__(self, testSetup, synCol, use_gui=False,
use_internal_setting=False, setting_file=None,
use_internal_data=False, data_files=None,
to_internal_sequence=False, destination_folder=None,
output_dtype='np.uint16'):
if not isinstance(testSetup, object):
raise ValueError("{} must be a CurlypivTestSetup class object".format(testSetup))
if not isinstance(synCol, object):
raise ValueError("{} must be a CurlypivSyntheticCollection class object".format(synCol))
valid_output_dtype = ['np.uint16', 'np.uint8']
if output_dtype not in valid_output_dtype:
raise ValueError("{} must be one of {}".format(output_dtype, valid_output_dtype))
self.testSetup = testSetup
self.synCol = synCol
self.use_gui = use_gui
self.output_dtype = output_dtype
if self.use_gui:
run()
else:
if use_internal_setting:
self.setting_file = self.synCol.microsigSetup
else:
if not isinstance(setting_file, str):
raise ValueError("{} must be a filepath to microsig settings text file".format(setting_file))
self.setting_file = os.path.abspath(setting_file)
if use_internal_data:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(data_files, str):
raise ValueError("{} must be a filepath to particle location text files".format(data_files))
all_files = glob.glob(data_files + '/*.txt')
save_files = []
for ff in [f for f in all_files if f.endswith('.txt')]:
save_files.append(ff)
save_files.sort()
self.data_files = save_files
if to_internal_sequence:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(destination_folder, str):
raise ValueError("{} must be a filepath to write output images".format(destination_folder))
self.destination_folder = os.path.abspath(destination_folder)
self.generate()
def generate(self):
# %%
mic = {}
f = open(self.setting_file)
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(self.data_files)
for data in self.data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
if self.output_dtype == 'np.uint16':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint16(I))
elif self.output_dtype == 'np.uint8':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint8(I))
print('done!')
# %%
def sorter(f):
sorting = int(f[:-4])
return sorting
def run():
# %%
root = tk.Tk()
root.attributes('-topmost', True)
root.withdraw()
setting_file = filedialog.askopenfilenames(
title="Select settings file", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
data_files = filedialog.askopenfilenames(
title="Select data file(s)", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
destination_folder = filedialog.askdirectory(
title="Select destination file", parent=root)
if not setting_file:
sys.exit('input file not valid')
# %%
mic = {}
f = open(setting_file[0])
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(data_files)
for data in data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
print('done!')
# %%
def take_image(mic, P):
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']));
dp_s = np.unique(P[:, 3])
if P.shape[1] == 5 or P.shape[1] == 8:
k_id = P[:, -1]
else:
k_id = np.ones(P.shape[0])
if P.shape[1] <= 5 and dp_s.size == 1:
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(dp_s * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(dp_s, n_points, mic['n_rays'])
for ii in range(0, P.shape[0]):
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] <= 5 and dp_s.size != 1:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(P[ii, 3], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] >= 7:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
ecc = P[ii, 4]
if ecc > 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc / np.sqrt(1 - 1 / ecc ** 2)
* np.arcsin(np.sqrt(1 - 1 / ecc ** 2)))
n_points = int(np.round(fact * n_points))
elif ecc < 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc ** 2 / np.sqrt(1 - ecc ** 2)
* np.arctan(np.sqrt(1 - ecc ** 2)))
n_points = int(np.round(fact * n_points))
xp = create_ellipsoid(P[ii, 3:7], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3]);
I = I + Id * k_id[ii]
I = I * mic['gain']
if mic['background_mean'] != 0:
I = I + mic['background_mean']
if mic['background_noise'] != 0:
Irand = np.random.normal(0, mic['background_noise'],
(mic['pixel_dim_y'], mic['pixel_dim_x']))
I = I + np.round(Irand)
# I = np.round(I+random('norm',0,mic.background_noise,...
# mic.pixel_dim_y,mic.pixel_dim_x));
return I
# %%
def image_spherical(mic, xp, P1):
# take image of a particle with a spherical lens
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P1
lens_radius = (np.tan(np.arcsin(mic['numerical_aperture']))
* (1 + 1 / mic['magnification']) * mic['focal_length'])
# distance lens-ccd
dCCD = -mic['focal_length'] * (mic['magnification'] + 1);
# distance particle-lens
dPART = P1[2] + mic['focal_length'] * (1 / mic['magnification'] + 1);
# linear transformation from the object plane to the lens plane
T2 = np.array([[1, 0, dPART, 0],
[0, 1, 0, dPART],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field right before the lens
x = np.linalg.inv(T2) @ xp
# remove rays outside of the lens aperture
ind = x[0, :] ** 2 + x[1, :] ** 2 <= lens_radius ** 2
x = x[:, ind]
# transformation of the light field with spherical lens
a = x[0, :];
b = x[1, :]
c = x[2, :];
d = x[3, :]
# radius of curvature of the lens
rk = mic['focal_length'] * (mic['ri_lens'] / mic['ri_medium'] - 1) * 2
dum = a * 0
# refraction medium-lens
# ray-vector befor lens
Vr = np.vstack((1 + dum, c, d))
Vr = (Vr / np.tile(np.sqrt(sum(Vr ** 2)), (3, 1)))
# normal-vector to the lens surface
Vl = np.vstack((rk + dum, a, b))
Vl = (Vl / np.tile(np.sqrt(sum(Vl ** 2)), (3, 1)))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_medium'] / mic['ri_lens'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector inside the lens
Vr11 = (Vl * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# refraction lens-medium
# normal-vector to the lens surface
Vl2 = np.vstack((Vl[0, :], -Vl[1:, :]))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl2, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl2, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl2, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_lens'] / mic['ri_medium'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector outside the lens
Vr11 = (Vl2 * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# light field after the spherical lens
x[2, :] = Vr[1, :]
x[3, :] = Vr[2, :]
if mic['cyl_focal_length'] == 0:
# linear transformation from the lens plane to the ccd plane
T1 = np.array([[1, 0, -dCCD, 0],
[0, 1, 0, -dCCD],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field at the ccd plane
xs = np.linalg.inv(T1) @ x
else:
# # linear transformation from the lens plane to the cyl_lens plane
T1c = np.array([[1, 0, -dCCD * 1 / 3, 0],
[0, 1, 0, -dCCD * 1 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# # light field at the cylindrical lens plane
xc = np.linalg.inv(T1c) @ x
# # light field after the cylindrical lens plane
Tc = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[-1 / mic['cyl_focal_length'], 0, 1, 0],
[0, 0, 0, 1]])
xc_a = np.linalg.inv(Tc) @ xc
# # light field at the ccd plane
T1 = np.array([[1, 0, -dCCD * 2 / 3, 0],
[0, 1, 0, -dCCD * 2 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]]);
# # light field at the ccd plane
xs = np.linalg.inv(T1) @ xc_a
# transform the position in pixel units
X = np.round(xs[0, :] / mic['pixel_size'] + P1[0])
Y = np.round(xs[1, :] / mic['pixel_size'] + P1[1])
# remove rays outside the CCD
ind = np.all([X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'],
X.imag == 0, Y.imag == 0], axis=0)
# count number of rays in each pixel
countXY = np.sort(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])
indi, ia = np.unique(countXY, return_index=True)
nCounts = np.hstack((ia[1:], countXY.size + 1)) - ia
# prepare image
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']))
Ifr = I.flatten('F')
Ifr[indi.astype(int) - 1] = nCounts
I = Ifr.reshape(mic['pixel_dim_y'], mic['pixel_dim_x'], order='F')
return I
# %%
def create_particle(D, Ns, Nr):
R = D / 2
V = spiral_sphere(Ns)
V[0:2, V[0, :] > 0] = -V[0:2, V[0, :] > 0]
x = R * V[0, :]
y = R * V[1, :]
z = R * V[2, :]
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def create_ellipsoid(Deab, Ns, Nr):
D = Deab[0];
ecc = Deab[1]
alpha = Deab[2];
beta = Deab[3]
R = D / 2
V = spiral_sphere(Ns)
V = R * V
V[2, :] = V[2, :] * ecc
R_beta = np.array([[np.cos(beta), 0, np.sin(beta)],
[0, 1, 0],
[-np.sin(beta), 0, np.cos(beta)]])
R_alpha = np.array([[np.cos(alpha), -np.sin(alpha), 0],
[np.sin(alpha), np.cos(alpha), 0],
[0, 0, 1]])
Vf = R_alpha @ (R_beta @ V)
ii1 = (Vf[1, :] == np.min(Vf[1, :])).nonzero()[0][0]
ii2 = (Vf[1, :] == np.max(Vf[1, :])).nonzero()[0][0]
ii3 = (Vf[2, :] == np.min(Vf[2, :])).nonzero()[0][0]
ii4 = (Vf[2, :] == np.max(Vf[2, :])).nonzero()[0][0]
Vdum = Vf[:, [ii1, ii2, ii3, ii4]]
A = np.c_[Vdum[1, :], Vdum[2, :], np.ones(Vdum.shape[1])]
C, _, _, _ = np.linalg.lstsq(A, Vdum[0, :], rcond=None)
V1dum = C[0] * Vf[1, :] + C[1] * Vf[2, :] + C[2]
ind = (Vf[0, :] - V1dum) < 0
x = Vf[0, ind]
y = Vf[1, ind]
z = Vf[2, ind]
Ns = z.size
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def spiral_sphere(N):
gr = (1 + np.sqrt(5)) / 2 # golden ratio
ga = 2 * np.pi * (1 - 1 / gr) # golden angle
ind_p = np.arange(0, N) # particle (i.e., point sample) index
lat = np.arccos(1 - 2 * ind_p / (
N - 1)) # latitude is defined so that particle index is proportional to surface area between 0 and lat
lon = ind_p * ga # position particles at even intervals along longitude
# Convert from spherical to Cartesian co-ordinates
x = np.sin(lat) * np.cos(lon)
y = np.sin(lat) * np.sin(lon)
z = np.cos(lat)
V = np.vstack((x, y, z))
return V
# %%
if __name__ == '__main__':
run()
|
[
"numpy.uint8",
"tkinter.filedialog.askdirectory",
"numpy.arccos",
"numpy.sqrt",
"numpy.hstack",
"numpy.array",
"sys.exit",
"numpy.sin",
"numpy.genfromtxt",
"numpy.arange",
"numpy.cross",
"numpy.sort",
"os.path.split",
"numpy.max",
"tkinter.filedialog.askopenfilenames",
"numpy.vstack",
"numpy.linalg.lstsq",
"numpy.min",
"numpy.round",
"glob.glob",
"numpy.random.normal",
"numpy.tile",
"numpy.arctan",
"numpy.uint16",
"numpy.ones",
"numpy.cos",
"numpy.unique",
"numpy.arcsin",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"tkinter.Tk",
"numpy.linalg.inv",
"numpy.random.uniform",
"os.path.abspath",
"numpy.all"
] |
[((4177, 4184), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (4182, 4184), True, 'import tkinter as tk\n'), ((4263, 4395), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', ([], {'title': '"""Select settings file"""', 'parent': 'root', 'filetypes': "(('txt files', '*.txt'), ('all files', '*.*'))"}), "(title='Select settings file', parent=root,\n filetypes=(('txt files', '*.txt'), ('all files', '*.*')))\n", (4290, 4395), False, 'from tkinter import filedialog\n'), ((4494, 4625), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', ([], {'title': '"""Select data file(s)"""', 'parent': 'root', 'filetypes': "(('txt files', '*.txt'), ('all files', '*.*'))"}), "(title='Select data file(s)', parent=root,\n filetypes=(('txt files', '*.txt'), ('all files', '*.*')))\n", (4521, 4625), False, 'from tkinter import filedialog\n'), ((4732, 4801), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'title': '"""Select destination file"""', 'parent': 'root'}), "(title='Select destination file', parent=root)\n", (4755, 4801), False, 'from tkinter import filedialog\n'), ((5683, 5733), 'numpy.zeros', 'np.zeros', (["(mic['pixel_dim_y'], mic['pixel_dim_x'])"], {}), "((mic['pixel_dim_y'], mic['pixel_dim_x']))\n", (5691, 5733), True, 'import numpy as np\n'), ((5747, 5765), 'numpy.unique', 'np.unique', (['P[:, 3]'], {}), '(P[:, 3])\n', (5756, 5765), True, 'import numpy as np\n'), ((8732, 8806), 'numpy.array', 'np.array', (['[[1, 0, dPART, 0], [0, 1, 0, dPART], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, dPART, 0], [0, 1, 0, dPART], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (8740, 8806), True, 'import numpy as np\n'), ((9382, 9408), 'numpy.vstack', 'np.vstack', (['(1 + dum, c, d)'], {}), '((1 + dum, c, d))\n', (9391, 9408), True, 'import numpy as np\n'), ((9513, 9540), 'numpy.vstack', 'np.vstack', (['(rk + dum, a, b)'], {}), '((rk + dum, a, b))\n', (9522, 9540), True, 'import numpy as np\n'), ((9648, 9682), 'numpy.cross', 'np.cross', (['Vr', 'Vl'], {'axisa': '(0)', 'axisb': '(0)'}), '(Vr, Vl, axisa=0, axisb=0)\n', (9656, 9682), True, 'import numpy as np\n'), ((9850, 9873), 'numpy.sum', 'np.sum', (['(Vr * Vl)'], {'axis': '(0)'}), '(Vr * Vl, axis=0)\n', (9856, 9873), True, 'import numpy as np\n'), ((9899, 9924), 'numpy.sum', 'np.sum', (['(Vr * Vrot)'], {'axis': '(0)'}), '(Vr * Vrot, axis=0)\n', (9905, 9924), True, 'import numpy as np\n'), ((10306, 10339), 'numpy.vstack', 'np.vstack', (['(Vl[0, :], -Vl[1:, :])'], {}), '((Vl[0, :], -Vl[1:, :]))\n', (10315, 10339), True, 'import numpy as np\n'), ((10392, 10427), 'numpy.cross', 'np.cross', (['Vr', 'Vl2'], {'axisa': '(0)', 'axisb': '(0)'}), '(Vr, Vl2, axisa=0, axisb=0)\n', (10400, 10427), True, 'import numpy as np\n'), ((10596, 10620), 'numpy.sum', 'np.sum', (['(Vr * Vl2)'], {'axis': '(0)'}), '(Vr * Vl2, axis=0)\n', (10602, 10620), True, 'import numpy as np\n'), ((10646, 10671), 'numpy.sum', 'np.sum', (['(Vr * Vrot)'], {'axis': '(0)'}), '(Vr * Vrot, axis=0)\n', (10652, 10671), True, 'import numpy as np\n'), ((12418, 12464), 'numpy.round', 'np.round', (["(xs[0, :] / mic['pixel_size'] + P1[0])"], {}), "(xs[0, :] / mic['pixel_size'] + P1[0])\n", (12426, 12464), True, 'import numpy as np\n'), ((12473, 12519), 'numpy.round', 'np.round', (["(xs[1, :] / mic['pixel_size'] + P1[1])"], {}), "(xs[1, :] / mic['pixel_size'] + P1[1])\n", (12481, 12519), True, 'import numpy as np\n'), ((12565, 12676), 'numpy.all', 'np.all', (["[X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'], X.imag == \n 0, Y.imag == 0]"], {'axis': '(0)'}), "([X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'], X.\n imag == 0, Y.imag == 0], axis=0)\n", (12571, 12676), True, 'import numpy as np\n'), ((12746, 12797), 'numpy.sort', 'np.sort', (["(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])"], {}), "(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])\n", (12753, 12797), True, 'import numpy as np\n'), ((12813, 12850), 'numpy.unique', 'np.unique', (['countXY'], {'return_index': '(True)'}), '(countXY, return_index=True)\n', (12822, 12850), True, 'import numpy as np\n'), ((12937, 12987), 'numpy.zeros', 'np.zeros', (["(mic['pixel_dim_y'], mic['pixel_dim_x'])"], {}), "((mic['pixel_dim_y'], mic['pixel_dim_x']))\n", (12945, 12987), True, 'import numpy as np\n'), ((13386, 13405), 'numpy.tile', 'np.tile', (['x', '(Nr, 1)'], {}), '(x, (Nr, 1))\n', (13393, 13405), True, 'import numpy as np\n'), ((13414, 13433), 'numpy.tile', 'np.tile', (['y', '(Nr, 1)'], {}), '(y, (Nr, 1))\n', (13421, 13433), True, 'import numpy as np\n'), ((13473, 13513), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', 'z.size'], {}), '(-np.pi, np.pi, z.size)\n', (13490, 13513), True, 'import numpy as np\n'), ((13523, 13534), 'numpy.cos', 'np.cos', (['phs'], {}), '(phs)\n', (13529, 13534), True, 'import numpy as np\n'), ((13544, 13555), 'numpy.sin', 'np.sin', (['phs'], {}), '(phs)\n', (13550, 13555), True, 'import numpy as np\n'), ((14902, 14944), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'Vdum[0, :]'], {'rcond': 'None'}), '(A, Vdum[0, :], rcond=None)\n', (14917, 14944), True, 'import numpy as np\n'), ((15165, 15184), 'numpy.tile', 'np.tile', (['x', '(Nr, 1)'], {}), '(x, (Nr, 1))\n', (15172, 15184), True, 'import numpy as np\n'), ((15193, 15212), 'numpy.tile', 'np.tile', (['y', '(Nr, 1)'], {}), '(y, (Nr, 1))\n', (15200, 15212), True, 'import numpy as np\n'), ((15252, 15292), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', 'z.size'], {}), '(-np.pi, np.pi, z.size)\n', (15269, 15292), True, 'import numpy as np\n'), ((15302, 15313), 'numpy.cos', 'np.cos', (['phs'], {}), '(phs)\n', (15308, 15313), True, 'import numpy as np\n'), ((15323, 15334), 'numpy.sin', 'np.sin', (['phs'], {}), '(phs)\n', (15329, 15334), True, 'import numpy as np\n'), ((15932, 15947), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (15941, 15947), True, 'import numpy as np\n'), ((15997, 16031), 'numpy.arccos', 'np.arccos', (['(1 - 2 * ind_p / (N - 1))'], {}), '(1 - 2 * ind_p / (N - 1))\n', (16006, 16031), True, 'import numpy as np\n'), ((16354, 16365), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (16360, 16365), True, 'import numpy as np\n'), ((16374, 16394), 'numpy.vstack', 'np.vstack', (['(x, y, z)'], {}), '((x, y, z))\n', (16383, 16394), True, 'import numpy as np\n'), ((4443, 4475), 'sys.exit', 'sys.exit', (['"""input file not valid"""'], {}), "('input file not valid')\n", (4451, 4475), False, 'import sys\n'), ((4673, 4705), 'sys.exit', 'sys.exit', (['"""input file not valid"""'], {}), "('input file not valid')\n", (4681, 4705), False, 'import sys\n'), ((4845, 4877), 'sys.exit', 'sys.exit', (['"""input file not valid"""'], {}), "('input file not valid')\n", (4853, 4877), False, 'import sys\n'), ((5327, 5346), 'numpy.genfromtxt', 'np.genfromtxt', (['data'], {}), '(data)\n', (5340, 5346), True, 'import numpy as np\n'), ((5429, 5448), 'os.path.split', 'os.path.split', (['data'], {}), '(data)\n', (5442, 5448), False, 'import os\n'), ((5858, 5877), 'numpy.ones', 'np.ones', (['P.shape[0]'], {}), '(P.shape[0])\n', (5865, 5877), True, 'import numpy as np\n'), ((7801, 7892), 'numpy.random.normal', 'np.random.normal', (['(0)', "mic['background_noise']", "(mic['pixel_dim_y'], mic['pixel_dim_x'])"], {}), "(0, mic['background_noise'], (mic['pixel_dim_y'], mic[\n 'pixel_dim_x']))\n", (7817, 7892), True, 'import numpy as np\n'), ((8913, 8930), 'numpy.linalg.inv', 'np.linalg.inv', (['T2'], {}), '(T2)\n', (8926, 8930), True, 'import numpy as np\n'), ((10199, 10226), 'numpy.tile', 'np.tile', (['Vr11[0, :]', '(3, 1)'], {}), '(Vr11[0, :], (3, 1))\n', (10206, 10226), True, 'import numpy as np\n'), ((10949, 10976), 'numpy.tile', 'np.tile', (['Vr11[0, :]', '(3, 1)'], {}), '(Vr11[0, :], (3, 1))\n', (10956, 10976), True, 'import numpy as np\n'), ((11190, 11264), 'numpy.array', 'np.array', (['[[1, 0, -dCCD, 0], [0, 1, 0, -dCCD], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, -dCCD, 0], [0, 1, 0, -dCCD], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (11198, 11264), True, 'import numpy as np\n'), ((11519, 11613), 'numpy.array', 'np.array', (['[[1, 0, -dCCD * 1 / 3, 0], [0, 1, 0, -dCCD * 1 / 3], [0, 0, 1, 0], [0, 0, 0, 1]\n ]'], {}), '([[1, 0, -dCCD * 1 / 3, 0], [0, 1, 0, -dCCD * 1 / 3], [0, 0, 1, 0],\n [0, 0, 0, 1]])\n', (11527, 11613), True, 'import numpy as np\n'), ((11856, 11954), 'numpy.array', 'np.array', (["[[1, 0, 0, 0], [0, 1, 0, 0], [-1 / mic['cyl_focal_length'], 0, 1, 0], [0, 0,\n 0, 1]]"], {}), "([[1, 0, 0, 0], [0, 1, 0, 0], [-1 / mic['cyl_focal_length'], 0, 1, \n 0], [0, 0, 0, 1]])\n", (11864, 11954), True, 'import numpy as np\n'), ((12118, 12212), 'numpy.array', 'np.array', (['[[1, 0, -dCCD * 2 / 3, 0], [0, 1, 0, -dCCD * 2 / 3], [0, 0, 1, 0], [0, 0, 0, 1]\n ]'], {}), '([[1, 0, -dCCD * 2 / 3, 0], [0, 1, 0, -dCCD * 2 / 3], [0, 0, 1, 0],\n [0, 0, 0, 1]])\n', (12126, 12212), True, 'import numpy as np\n'), ((12865, 12902), 'numpy.hstack', 'np.hstack', (['(ia[1:], countXY.size + 1)'], {}), '((ia[1:], countXY.size + 1))\n', (12874, 12902), True, 'import numpy as np\n'), ((13597, 13657), 'numpy.array', 'np.array', (['[[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]'], {}), '([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]])\n', (13605, 13657), True, 'import numpy as np\n'), ((15376, 15436), 'numpy.array', 'np.array', (['[[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]'], {}), '([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]])\n', (15384, 15436), True, 'import numpy as np\n'), ((16286, 16297), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (16292, 16297), True, 'import numpy as np\n'), ((16300, 16311), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (16306, 16311), True, 'import numpy as np\n'), ((16320, 16331), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (16326, 16331), True, 'import numpy as np\n'), ((16334, 16345), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (16340, 16345), True, 'import numpy as np\n'), ((3513, 3532), 'numpy.genfromtxt', 'np.genfromtxt', (['data'], {}), '(data)\n', (3526, 3532), True, 'import numpy as np\n'), ((3627, 3646), 'os.path.split', 'os.path.split', (['data'], {}), '(data)\n', (3640, 3646), False, 'import os\n'), ((5393, 5406), 'numpy.array', 'np.array', (['[P]'], {}), '([P])\n', (5401, 5406), True, 'import numpy as np\n'), ((5946, 6052), 'numpy.round', 'np.round', (["(mic['points_per_pixel'] * 2 * np.pi * (dp_s * mic['magnification'] / mic[\n 'pixel_size']) ** 2)"], {}), "(mic['points_per_pixel'] * 2 * np.pi * (dp_s * mic['magnification'] /\n mic['pixel_size']) ** 2)\n", (5954, 6052), True, 'import numpy as np\n'), ((7937, 7952), 'numpy.round', 'np.round', (['Irand'], {}), '(Irand)\n', (7945, 7952), True, 'import numpy as np\n'), ((9694, 9730), 'numpy.cross', 'np.cross', (['Vrot', 'Vl'], {'axisa': '(1)', 'axisb': '(0)'}), '(Vrot, Vl, axisa=1, axisb=0)\n', (9702, 9730), True, 'import numpy as np\n'), ((10439, 10476), 'numpy.cross', 'np.cross', (['Vrot', 'Vl2'], {'axisa': '(1)', 'axisb': '(0)'}), '(Vrot, Vl2, axisa=1, axisb=0)\n', (10447, 10476), True, 'import numpy as np\n'), ((11390, 11407), 'numpy.linalg.inv', 'np.linalg.inv', (['T1'], {}), '(T1)\n', (11403, 11407), True, 'import numpy as np\n'), ((11756, 11774), 'numpy.linalg.inv', 'np.linalg.inv', (['T1c'], {}), '(T1c)\n', (11769, 11774), True, 'import numpy as np\n'), ((12034, 12051), 'numpy.linalg.inv', 'np.linalg.inv', (['Tc'], {}), '(Tc)\n', (12047, 12051), True, 'import numpy as np\n'), ((12340, 12357), 'numpy.linalg.inv', 'np.linalg.inv', (['T1'], {}), '(T1)\n', (12353, 12357), True, 'import numpy as np\n'), ((14861, 14883), 'numpy.ones', 'np.ones', (['Vdum.shape[1]'], {}), '(Vdum.shape[1])\n', (14868, 14883), True, 'import numpy as np\n'), ((15837, 15847), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (15844, 15847), True, 'import numpy as np\n'), ((1924, 1953), 'os.path.abspath', 'os.path.abspath', (['setting_file'], {}), '(setting_file)\n', (1939, 1953), False, 'import os\n'), ((2286, 2318), 'glob.glob', 'glob.glob', (["(data_files + '/*.txt')"], {}), "(data_files + '/*.txt')\n", (2295, 2318), False, 'import glob\n'), ((2899, 2934), 'os.path.abspath', 'os.path.abspath', (['destination_folder'], {}), '(destination_folder)\n', (2914, 2934), False, 'import os\n'), ((3587, 3600), 'numpy.array', 'np.array', (['[P]'], {}), '([P])\n', (3595, 3600), True, 'import numpy as np\n'), ((8352, 8388), 'numpy.arcsin', 'np.arcsin', (["mic['numerical_aperture']"], {}), "(mic['numerical_aperture'])\n", (8361, 8388), True, 'import numpy as np\n'), ((10026, 10044), 'numpy.arctan', 'np.arctan', (['(vy / vx)'], {}), '(vy / vx)\n', (10035, 10044), True, 'import numpy as np\n'), ((10109, 10121), 'numpy.cos', 'np.cos', (['th11'], {}), '(th11)\n', (10115, 10121), True, 'import numpy as np\n'), ((10160, 10172), 'numpy.sin', 'np.sin', (['th11'], {}), '(th11)\n', (10166, 10172), True, 'import numpy as np\n'), ((10774, 10792), 'numpy.arctan', 'np.arctan', (['(vy / vx)'], {}), '(vy / vx)\n', (10783, 10792), True, 'import numpy as np\n'), ((10859, 10871), 'numpy.cos', 'np.cos', (['th11'], {}), '(th11)\n', (10865, 10871), True, 'import numpy as np\n'), ((10910, 10922), 'numpy.sin', 'np.sin', (['th11'], {}), '(th11)\n', (10916, 10922), True, 'import numpy as np\n'), ((14242, 14254), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (14248, 14254), True, 'import numpy as np\n'), ((14259, 14271), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (14265, 14271), True, 'import numpy as np\n'), ((14350, 14362), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (14356, 14362), True, 'import numpy as np\n'), ((14391, 14404), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (14397, 14404), True, 'import numpy as np\n'), ((14451, 14464), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (14457, 14464), True, 'import numpy as np\n'), ((14466, 14479), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (14472, 14479), True, 'import numpy as np\n'), ((3764, 3820), 'os.path.join', 'os.path.join', (['self.destination_folder', "(tail[:-3] + 'tif')"], {}), "(self.destination_folder, tail[:-3] + 'tif')\n", (3776, 3820), False, 'import os\n'), ((3852, 3864), 'numpy.uint16', 'np.uint16', (['I'], {}), '(I)\n', (3861, 3864), True, 'import numpy as np\n'), ((6385, 6496), 'numpy.round', 'np.round', (["(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] /\n mic['pixel_size']) ** 2)"], {}), "(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic[\n 'magnification'] / mic['pixel_size']) ** 2)\n", (6393, 6496), True, 'import numpy as np\n'), ((14333, 14345), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (14339, 14345), True, 'import numpy as np\n'), ((14407, 14420), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (14413, 14420), True, 'import numpy as np\n'), ((3948, 4004), 'os.path.join', 'os.path.join', (['self.destination_folder', "(tail[:-3] + 'tif')"], {}), "(self.destination_folder, tail[:-3] + 'tif')\n", (3960, 4004), False, 'import os\n'), ((4036, 4047), 'numpy.uint8', 'np.uint8', (['I'], {}), '(I)\n', (4044, 4047), True, 'import numpy as np\n'), ((6782, 6893), 'numpy.round', 'np.round', (["(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] /\n mic['pixel_size']) ** 2)"], {}), "(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic[\n 'magnification'] / mic['pixel_size']) ** 2)\n", (6790, 6893), True, 'import numpy as np\n'), ((14578, 14594), 'numpy.min', 'np.min', (['Vf[1, :]'], {}), '(Vf[1, :])\n', (14584, 14594), True, 'import numpy as np\n'), ((14635, 14651), 'numpy.max', 'np.max', (['Vf[1, :]'], {}), '(Vf[1, :])\n', (14641, 14651), True, 'import numpy as np\n'), ((14692, 14708), 'numpy.min', 'np.min', (['Vf[2, :]'], {}), '(Vf[2, :])\n', (14698, 14708), True, 'import numpy as np\n'), ((14749, 14765), 'numpy.max', 'np.max', (['Vf[2, :]'], {}), '(Vf[2, :])\n', (14755, 14765), True, 'import numpy as np\n'), ((7193, 7218), 'numpy.round', 'np.round', (['(fact * n_points)'], {}), '(fact * n_points)\n', (7201, 7218), True, 'import numpy as np\n'), ((7458, 7483), 'numpy.round', 'np.round', (['(fact * n_points)'], {}), '(fact * n_points)\n', (7466, 7483), True, 'import numpy as np\n'), ((7064, 7089), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / ecc ** 2)'], {}), '(1 - 1 / ecc ** 2)\n', (7071, 7089), True, 'import numpy as np\n'), ((7134, 7159), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / ecc ** 2)'], {}), '(1 - 1 / ecc ** 2)\n', (7141, 7159), True, 'import numpy as np\n'), ((7337, 7358), 'numpy.sqrt', 'np.sqrt', (['(1 - ecc ** 2)'], {}), '(1 - ecc ** 2)\n', (7344, 7358), True, 'import numpy as np\n'), ((7403, 7424), 'numpy.sqrt', 'np.sqrt', (['(1 - ecc ** 2)'], {}), '(1 - ecc ** 2)\n', (7410, 7424), True, 'import numpy as np\n')]
|
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [-5.4895292543686764, 14.790571669586654, 82.5546687431056, 29.15531114139253, -4.1316769886951761, -13.002076438907238],
'yp': [-35.948168839230306, -42.273376159885785, -28.845467523197698, 52.03426124197, 36.832712600868973, 40.792291220556734],
'min_JK': 16.8314150305,
'max_JK': 95}
cm_data = [[ 5.03832136e-02, 2.98028976e-02, 5.27974883e-01],
[ 6.35363639e-02, 2.84259729e-02, 5.33123681e-01],
[ 7.53531234e-02, 2.72063728e-02, 5.38007001e-01],
[ 8.62217979e-02, 2.61253206e-02, 5.42657691e-01],
[ 9.63786097e-02, 2.51650976e-02, 5.47103487e-01],
[ 1.05979704e-01, 2.43092436e-02, 5.51367851e-01],
[ 1.15123641e-01, 2.35562500e-02, 5.55467728e-01],
[ 1.23902903e-01, 2.28781011e-02, 5.59423480e-01],
[ 1.32380720e-01, 2.22583774e-02, 5.63250116e-01],
[ 1.40603076e-01, 2.16866674e-02, 5.66959485e-01],
[ 1.48606527e-01, 2.11535876e-02, 5.70561711e-01],
[ 1.56420649e-01, 2.06507174e-02, 5.74065446e-01],
[ 1.64069722e-01, 2.01705326e-02, 5.77478074e-01],
[ 1.71573925e-01, 1.97063415e-02, 5.80805890e-01],
[ 1.78950212e-01, 1.92522243e-02, 5.84054243e-01],
[ 1.86212958e-01, 1.88029767e-02, 5.87227661e-01],
[ 1.93374449e-01, 1.83540593e-02, 5.90329954e-01],
[ 2.00445260e-01, 1.79015512e-02, 5.93364304e-01],
[ 2.07434551e-01, 1.74421086e-02, 5.96333341e-01],
[ 2.14350298e-01, 1.69729276e-02, 5.99239207e-01],
[ 2.21196750e-01, 1.64970484e-02, 6.02083323e-01],
[ 2.27982971e-01, 1.60071509e-02, 6.04867403e-01],
[ 2.34714537e-01, 1.55015065e-02, 6.07592438e-01],
[ 2.41396253e-01, 1.49791041e-02, 6.10259089e-01],
[ 2.48032377e-01, 1.44393586e-02, 6.12867743e-01],
[ 2.54626690e-01, 1.38820918e-02, 6.15418537e-01],
[ 2.61182562e-01, 1.33075156e-02, 6.17911385e-01],
[ 2.67702993e-01, 1.27162163e-02, 6.20345997e-01],
[ 2.74190665e-01, 1.21091423e-02, 6.22721903e-01],
[ 2.80647969e-01, 1.14875915e-02, 6.25038468e-01],
[ 2.87076059e-01, 1.08554862e-02, 6.27294975e-01],
[ 2.93477695e-01, 1.02128849e-02, 6.29490490e-01],
[ 2.99855122e-01, 9.56079551e-03, 6.31623923e-01],
[ 3.06209825e-01, 8.90185346e-03, 6.33694102e-01],
[ 3.12543124e-01, 8.23900704e-03, 6.35699759e-01],
[ 3.18856183e-01, 7.57551051e-03, 6.37639537e-01],
[ 3.25150025e-01, 6.91491734e-03, 6.39512001e-01],
[ 3.31425547e-01, 6.26107379e-03, 6.41315649e-01],
[ 3.37683446e-01, 5.61830889e-03, 6.43048936e-01],
[ 3.43924591e-01, 4.99053080e-03, 6.44710195e-01],
[ 3.50149699e-01, 4.38202557e-03, 6.46297711e-01],
[ 3.56359209e-01, 3.79781761e-03, 6.47809772e-01],
[ 3.62553473e-01, 3.24319591e-03, 6.49244641e-01],
[ 3.68732762e-01, 2.72370721e-03, 6.50600561e-01],
[ 3.74897270e-01, 2.24514897e-03, 6.51875762e-01],
[ 3.81047116e-01, 1.81356205e-03, 6.53068467e-01],
[ 3.87182639e-01, 1.43446923e-03, 6.54176761e-01],
[ 3.93304010e-01, 1.11388259e-03, 6.55198755e-01],
[ 3.99410821e-01, 8.59420809e-04, 6.56132835e-01],
[ 4.05502914e-01, 6.78091517e-04, 6.56977276e-01],
[ 4.11580082e-01, 5.77101735e-04, 6.57730380e-01],
[ 4.17642063e-01, 5.63847476e-04, 6.58390492e-01],
[ 4.23688549e-01, 6.45902780e-04, 6.58956004e-01],
[ 4.29719186e-01, 8.31008207e-04, 6.59425363e-01],
[ 4.35733575e-01, 1.12705875e-03, 6.59797077e-01],
[ 4.41732123e-01, 1.53984779e-03, 6.60069009e-01],
[ 4.47713600e-01, 2.07954744e-03, 6.60240367e-01],
[ 4.53677394e-01, 2.75470302e-03, 6.60309966e-01],
[ 4.59622938e-01, 3.57374415e-03, 6.60276655e-01],
[ 4.65549631e-01, 4.54518084e-03, 6.60139383e-01],
[ 4.71456847e-01, 5.67758762e-03, 6.59897210e-01],
[ 4.77343929e-01, 6.97958743e-03, 6.59549311e-01],
[ 4.83210198e-01, 8.45983494e-03, 6.59094989e-01],
[ 4.89054951e-01, 1.01269996e-02, 6.58533677e-01],
[ 4.94877466e-01, 1.19897486e-02, 6.57864946e-01],
[ 5.00677687e-01, 1.40550640e-02, 6.57087561e-01],
[ 5.06454143e-01, 1.63333443e-02, 6.56202294e-01],
[ 5.12206035e-01, 1.88332232e-02, 6.55209222e-01],
[ 5.17932580e-01, 2.15631918e-02, 6.54108545e-01],
[ 5.23632990e-01, 2.45316468e-02, 6.52900629e-01],
[ 5.29306474e-01, 2.77468735e-02, 6.51586010e-01],
[ 5.34952244e-01, 3.12170300e-02, 6.50165396e-01],
[ 5.40569510e-01, 3.49501310e-02, 6.48639668e-01],
[ 5.46157494e-01, 3.89540334e-02, 6.47009884e-01],
[ 5.51715423e-01, 4.31364795e-02, 6.45277275e-01],
[ 5.57242538e-01, 4.73307585e-02, 6.43443250e-01],
[ 5.62738096e-01, 5.15448092e-02, 6.41509389e-01],
[ 5.68201372e-01, 5.57776706e-02, 6.39477440e-01],
[ 5.73631859e-01, 6.00281369e-02, 6.37348841e-01],
[ 5.79028682e-01, 6.42955547e-02, 6.35126108e-01],
[ 5.84391137e-01, 6.85790261e-02, 6.32811608e-01],
[ 5.89718606e-01, 7.28775875e-02, 6.30407727e-01],
[ 5.95010505e-01, 7.71902878e-02, 6.27916992e-01],
[ 6.00266283e-01, 8.15161895e-02, 6.25342058e-01],
[ 6.05485428e-01, 8.58543713e-02, 6.22685703e-01],
[ 6.10667469e-01, 9.02039303e-02, 6.19950811e-01],
[ 6.15811974e-01, 9.45639838e-02, 6.17140367e-01],
[ 6.20918555e-01, 9.89336721e-02, 6.14257440e-01],
[ 6.25986869e-01, 1.03312160e-01, 6.11305174e-01],
[ 6.31016615e-01, 1.07698641e-01, 6.08286774e-01],
[ 6.36007543e-01, 1.12092335e-01, 6.05205491e-01],
[ 6.40959444e-01, 1.16492495e-01, 6.02064611e-01],
[ 6.45872158e-01, 1.20898405e-01, 5.98867442e-01],
[ 6.50745571e-01, 1.25309384e-01, 5.95617300e-01],
[ 6.55579615e-01, 1.29724785e-01, 5.92317494e-01],
[ 6.60374266e-01, 1.34143997e-01, 5.88971318e-01],
[ 6.65129493e-01, 1.38566428e-01, 5.85582301e-01],
[ 6.69845385e-01, 1.42991540e-01, 5.82153572e-01],
[ 6.74522060e-01, 1.47418835e-01, 5.78688247e-01],
[ 6.79159664e-01, 1.51847851e-01, 5.75189431e-01],
[ 6.83758384e-01, 1.56278163e-01, 5.71660158e-01],
[ 6.88318440e-01, 1.60709387e-01, 5.68103380e-01],
[ 6.92840088e-01, 1.65141174e-01, 5.64521958e-01],
[ 6.97323615e-01, 1.69573215e-01, 5.60918659e-01],
[ 7.01769334e-01, 1.74005236e-01, 5.57296144e-01],
[ 7.06177590e-01, 1.78437000e-01, 5.53656970e-01],
[ 7.10548747e-01, 1.82868306e-01, 5.50003579e-01],
[ 7.14883195e-01, 1.87298986e-01, 5.46338299e-01],
[ 7.19181339e-01, 1.91728906e-01, 5.42663338e-01],
[ 7.23443604e-01, 1.96157962e-01, 5.38980786e-01],
[ 7.27670428e-01, 2.00586086e-01, 5.35292612e-01],
[ 7.31862231e-01, 2.05013174e-01, 5.31600995e-01],
[ 7.36019424e-01, 2.09439071e-01, 5.27908434e-01],
[ 7.40142557e-01, 2.13863965e-01, 5.24215533e-01],
[ 7.44232102e-01, 2.18287899e-01, 5.20523766e-01],
[ 7.48288533e-01, 2.22710942e-01, 5.16834495e-01],
[ 7.52312321e-01, 2.27133187e-01, 5.13148963e-01],
[ 7.56303937e-01, 2.31554749e-01, 5.09468305e-01],
[ 7.60263849e-01, 2.35975765e-01, 5.05793543e-01],
[ 7.64192516e-01, 2.40396394e-01, 5.02125599e-01],
[ 7.68090391e-01, 2.44816813e-01, 4.98465290e-01],
[ 7.71957916e-01, 2.49237220e-01, 4.94813338e-01],
[ 7.75795522e-01, 2.53657797e-01, 4.91170517e-01],
[ 7.79603614e-01, 2.58078397e-01, 4.87539124e-01],
[ 7.83382636e-01, 2.62499662e-01, 4.83917732e-01],
[ 7.87132978e-01, 2.66921859e-01, 4.80306702e-01],
[ 7.90855015e-01, 2.71345267e-01, 4.76706319e-01],
[ 7.94549101e-01, 2.75770179e-01, 4.73116798e-01],
[ 7.98215577e-01, 2.80196901e-01, 4.69538286e-01],
[ 8.01854758e-01, 2.84625750e-01, 4.65970871e-01],
[ 8.05466945e-01, 2.89057057e-01, 4.62414580e-01],
[ 8.09052419e-01, 2.93491117e-01, 4.58869577e-01],
[ 8.12611506e-01, 2.97927865e-01, 4.55337565e-01],
[ 8.16144382e-01, 3.02368130e-01, 4.51816385e-01],
[ 8.19651255e-01, 3.06812282e-01, 4.48305861e-01],
[ 8.23132309e-01, 3.11260703e-01, 4.44805781e-01],
[ 8.26587706e-01, 3.15713782e-01, 4.41315901e-01],
[ 8.30017584e-01, 3.20171913e-01, 4.37835947e-01],
[ 8.33422053e-01, 3.24635499e-01, 4.34365616e-01],
[ 8.36801237e-01, 3.29104836e-01, 4.30905052e-01],
[ 8.40155276e-01, 3.33580106e-01, 4.27454836e-01],
[ 8.43484103e-01, 3.38062109e-01, 4.24013059e-01],
[ 8.46787726e-01, 3.42551272e-01, 4.20579333e-01],
[ 8.50066132e-01, 3.47048028e-01, 4.17153264e-01],
[ 8.53319279e-01, 3.51552815e-01, 4.13734445e-01],
[ 8.56547103e-01, 3.56066072e-01, 4.10322469e-01],
[ 8.59749520e-01, 3.60588229e-01, 4.06916975e-01],
[ 8.62926559e-01, 3.65119408e-01, 4.03518809e-01],
[ 8.66077920e-01, 3.69660446e-01, 4.00126027e-01],
[ 8.69203436e-01, 3.74211795e-01, 3.96738211e-01],
[ 8.72302917e-01, 3.78773910e-01, 3.93354947e-01],
[ 8.75376149e-01, 3.83347243e-01, 3.89975832e-01],
[ 8.78422895e-01, 3.87932249e-01, 3.86600468e-01],
[ 8.81442916e-01, 3.92529339e-01, 3.83228622e-01],
[ 8.84435982e-01, 3.97138877e-01, 3.79860246e-01],
[ 8.87401682e-01, 4.01761511e-01, 3.76494232e-01],
[ 8.90339687e-01, 4.06397694e-01, 3.73130228e-01],
[ 8.93249647e-01, 4.11047871e-01, 3.69767893e-01],
[ 8.96131191e-01, 4.15712489e-01, 3.66406907e-01],
[ 8.98983931e-01, 4.20391986e-01, 3.63046965e-01],
[ 9.01807455e-01, 4.25086807e-01, 3.59687758e-01],
[ 9.04601295e-01, 4.29797442e-01, 3.56328796e-01],
[ 9.07364995e-01, 4.34524335e-01, 3.52969777e-01],
[ 9.10098088e-01, 4.39267908e-01, 3.49610469e-01],
[ 9.12800095e-01, 4.44028574e-01, 3.46250656e-01],
[ 9.15470518e-01, 4.48806744e-01, 3.42890148e-01],
[ 9.18108848e-01, 4.53602818e-01, 3.39528771e-01],
[ 9.20714383e-01, 4.58417420e-01, 3.36165582e-01],
[ 9.23286660e-01, 4.63250828e-01, 3.32800827e-01],
[ 9.25825146e-01, 4.68103387e-01, 3.29434512e-01],
[ 9.28329275e-01, 4.72975465e-01, 3.26066550e-01],
[ 9.30798469e-01, 4.77867420e-01, 3.22696876e-01],
[ 9.33232140e-01, 4.82779603e-01, 3.19325444e-01],
[ 9.35629684e-01, 4.87712357e-01, 3.15952211e-01],
[ 9.37990034e-01, 4.92666544e-01, 3.12575440e-01],
[ 9.40312939e-01, 4.97642038e-01, 3.09196628e-01],
[ 9.42597771e-01, 5.02639147e-01, 3.05815824e-01],
[ 9.44843893e-01, 5.07658169e-01, 3.02433101e-01],
[ 9.47050662e-01, 5.12699390e-01, 2.99048555e-01],
[ 9.49217427e-01, 5.17763087e-01, 2.95662308e-01],
[ 9.51343530e-01, 5.22849522e-01, 2.92274506e-01],
[ 9.53427725e-01, 5.27959550e-01, 2.88883445e-01],
[ 9.55469640e-01, 5.33093083e-01, 2.85490391e-01],
[ 9.57468770e-01, 5.38250172e-01, 2.82096149e-01],
[ 9.59424430e-01, 5.43431038e-01, 2.78700990e-01],
[ 9.61335930e-01, 5.48635890e-01, 2.75305214e-01],
[ 9.63202573e-01, 5.53864931e-01, 2.71909159e-01],
[ 9.65023656e-01, 5.59118349e-01, 2.68513200e-01],
[ 9.66798470e-01, 5.64396327e-01, 2.65117752e-01],
[ 9.68525639e-01, 5.69699633e-01, 2.61721488e-01],
[ 9.70204593e-01, 5.75028270e-01, 2.58325424e-01],
[ 9.71835007e-01, 5.80382015e-01, 2.54931256e-01],
[ 9.73416145e-01, 5.85761012e-01, 2.51539615e-01],
[ 9.74947262e-01, 5.91165394e-01, 2.48151200e-01],
[ 9.76427606e-01, 5.96595287e-01, 2.44766775e-01],
[ 9.77856416e-01, 6.02050811e-01, 2.41387186e-01],
[ 9.79232922e-01, 6.07532077e-01, 2.38013359e-01],
[ 9.80556344e-01, 6.13039190e-01, 2.34646316e-01],
[ 9.81825890e-01, 6.18572250e-01, 2.31287178e-01],
[ 9.83040742e-01, 6.24131362e-01, 2.27937141e-01],
[ 9.84198924e-01, 6.29717516e-01, 2.24595006e-01],
[ 9.85300760e-01, 6.35329876e-01, 2.21264889e-01],
[ 9.86345421e-01, 6.40968508e-01, 2.17948456e-01],
[ 9.87332067e-01, 6.46633475e-01, 2.14647532e-01],
[ 9.88259846e-01, 6.52324832e-01, 2.11364122e-01],
[ 9.89127893e-01, 6.58042630e-01, 2.08100426e-01],
[ 9.89935328e-01, 6.63786914e-01, 2.04858855e-01],
[ 9.90681261e-01, 6.69557720e-01, 2.01642049e-01],
[ 9.91364787e-01, 6.75355082e-01, 1.98452900e-01],
[ 9.91984990e-01, 6.81179025e-01, 1.95294567e-01],
[ 9.92540939e-01, 6.87029567e-01, 1.92170500e-01],
[ 9.93031693e-01, 6.92906719e-01, 1.89084459e-01],
[ 9.93456302e-01, 6.98810484e-01, 1.86040537e-01],
[ 9.93813802e-01, 7.04740854e-01, 1.83043180e-01],
[ 9.94103226e-01, 7.10697814e-01, 1.80097207e-01],
[ 9.94323596e-01, 7.16681336e-01, 1.77207826e-01],
[ 9.94473934e-01, 7.22691379e-01, 1.74380656e-01],
[ 9.94553260e-01, 7.28727890e-01, 1.71621733e-01],
[ 9.94560594e-01, 7.34790799e-01, 1.68937522e-01],
[ 9.94494964e-01, 7.40880020e-01, 1.66334918e-01],
[ 9.94355411e-01, 7.46995448e-01, 1.63821243e-01],
[ 9.94140989e-01, 7.53136955e-01, 1.61404226e-01],
[ 9.93850778e-01, 7.59304390e-01, 1.59091984e-01],
[ 9.93482190e-01, 7.65498551e-01, 1.56890625e-01],
[ 9.93033251e-01, 7.71719833e-01, 1.54807583e-01],
[ 9.92505214e-01, 7.77966775e-01, 1.52854862e-01],
[ 9.91897270e-01, 7.84239120e-01, 1.51041581e-01],
[ 9.91208680e-01, 7.90536569e-01, 1.49376885e-01],
[ 9.90438793e-01, 7.96858775e-01, 1.47869810e-01],
[ 9.89587065e-01, 8.03205337e-01, 1.46529128e-01],
[ 9.88647741e-01, 8.09578605e-01, 1.45357284e-01],
[ 9.87620557e-01, 8.15977942e-01, 1.44362644e-01],
[ 9.86509366e-01, 8.22400620e-01, 1.43556679e-01],
[ 9.85314198e-01, 8.28845980e-01, 1.42945116e-01],
[ 9.84031139e-01, 8.35315360e-01, 1.42528388e-01],
[ 9.82652820e-01, 8.41811730e-01, 1.42302653e-01],
[ 9.81190389e-01, 8.48328902e-01, 1.42278607e-01],
[ 9.79643637e-01, 8.54866468e-01, 1.42453425e-01],
[ 9.77994918e-01, 8.61432314e-01, 1.42808191e-01],
[ 9.76264977e-01, 8.68015998e-01, 1.43350944e-01],
[ 9.74443038e-01, 8.74622194e-01, 1.44061156e-01],
[ 9.72530009e-01, 8.81250063e-01, 1.44922913e-01],
[ 9.70532932e-01, 8.87896125e-01, 1.45918663e-01],
[ 9.68443477e-01, 8.94563989e-01, 1.47014438e-01],
[ 9.66271225e-01, 9.01249365e-01, 1.48179639e-01],
[ 9.64021057e-01, 9.07950379e-01, 1.49370428e-01],
[ 9.61681481e-01, 9.14672479e-01, 1.50520343e-01],
[ 9.59275646e-01, 9.21406537e-01, 1.51566019e-01],
[ 9.56808068e-01, 9.28152065e-01, 1.52409489e-01],
[ 9.54286813e-01, 9.34907730e-01, 1.52921158e-01],
[ 9.51726083e-01, 9.41670605e-01, 1.52925363e-01],
[ 9.49150533e-01, 9.48434900e-01, 1.52177604e-01],
[ 9.46602270e-01, 9.55189860e-01, 1.50327944e-01],
[ 9.44151742e-01, 9.61916487e-01, 1.46860789e-01],
[ 9.41896120e-01, 9.68589814e-01, 1.40955606e-01],
[ 9.40015097e-01, 9.75158357e-01, 1.31325517e-01]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
[
"viscm.viscm",
"numpy.linspace",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show"
] |
[((16621, 16673), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['__file__', 'cm_data'], {}), '(__file__, cm_data)\n', (16654, 16673), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((17022, 17032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17030, 17032), True, 'import matplotlib.pyplot as plt\n'), ((16812, 16826), 'viscm.viscm', 'viscm', (['test_cm'], {}), '(test_cm)\n', (16817, 16826), False, 'from viscm import viscm\n'), ((16935, 16959), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(256)'], {}), '(0, 100, 256)\n', (16946, 16959), True, 'import numpy as np\n')]
|
"""
Author: <NAME>
"""
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
def affinity_graph(X):
'''
This function returns a numpy array.
'''
ni, nd = X.shape
A = np.zeros((ni, ni))
for i in range(ni):
for j in range(i+1, ni):
dist = ((X[i] - X[j])**2).sum() # compute L2 distance
A[i][j] = dist
A[j][i] = dist # by symmetry
return A
def knn_graph(X, knn=4):
'''
This function returns a numpy array.
'''
ni, nd = X.shape
nbrs = NearestNeighbors(n_neighbors=(knn+1), algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
A = np.zeros((ni, ni))
for dist, ind in zip(distances, indices):
i0 = ind[0]
for i in range(1,knn+1):
d = dist[i]
A[i0, i] = d
A[i, i0] = d # by symmetry
return A
def sparse_affinity_graph(X):
'''
TODO: This function returns a numpy sparse matrix.
'''
ni, nd = X.shape
A = np.zeros((ni, ni))
for i in range(ni):
for j in range(i+1, ni):
dist = ((X[i] - X[j])**2).sum() # compute L2 distance
A[i][j] = dist
A[j][i] = dist # by symmetry
return A
def laplacian_graph(X, mode='affinity', knn=3, eta=0.01, sigma=2.5):
'''
The unnormalized graph Laplacian, L = D − W.
'''
if mode == 'affinity':
W = affinity_graph(X)
W[abs(W) > eta] = 0
elif mode == 'nearestneighbor':
W = knn_graph(X, knn=knn)
elif mode == 'gaussian':
W = affinity_graph(X)
bandwidth = 2.0*(sigma**2)
W = np.exp(W) / bandwidth
else:
pass
D = np.diag(W.sum(axis=1))
L = D - W
return L
|
[
"numpy.exp",
"numpy.zeros",
"sklearn.neighbors.NearestNeighbors"
] |
[((208, 226), 'numpy.zeros', 'np.zeros', (['(ni, ni)'], {}), '((ni, ni))\n', (216, 226), True, 'import numpy as np\n'), ((606, 624), 'numpy.zeros', 'np.zeros', (['(ni, ni)'], {}), '((ni, ni))\n', (614, 624), True, 'import numpy as np\n'), ((896, 914), 'numpy.zeros', 'np.zeros', (['(ni, ni)'], {}), '((ni, ni))\n', (904, 914), True, 'import numpy as np\n'), ((492, 552), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(knn + 1)', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=knn + 1, algorithm='ball_tree')\n", (508, 552), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1422, 1431), 'numpy.exp', 'np.exp', (['W'], {}), '(W)\n', (1428, 1431), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from pytest import approx
from pymt.component.grid import GridMixIn
class Port:
def __init__(self, name, uses=None, provides=None):
self._name = name
self._uses = uses or []
self._provides = provides or []
def get_component_name(self):
return self._name
def get_input_item_count(self):
return len(self._uses)
def get_input_item_list(self):
return self._uses
def get_output_item_count(self):
return len(self._provides)
def get_output_item_list(self):
return self._provides
def test_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.input_items == ["invar"]
assert c.output_items == ["outvar"]
def test_no_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test")
super().__init__()
c = Component()
assert c.input_items == []
assert c.output_items == []
def test_raster_1d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (3,)
def get_grid_spacing(self, grid_id):
return (2.0,)
def get_grid_origin(self, grid_id):
return (3.0,)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x("invar") == approx(np.array([3.0, 5.0, 7.0]))
def test_raster_2d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_spacing(self, grid_id):
return (2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-2d", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.name == "test-2d"
assert c.get_grid_type(0) == "RASTER"
assert c.get_x(0) == approx(np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]))
assert c.get_y(0) == approx(np.array([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]))
assert np.all(c.get_connectivity(0) == np.array([0, 1, 4, 3, 1, 2, 5, 4]))
assert np.all(c.get_offset(0) == np.array([4, 8]))
def test_raster_3d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 2, 3)
def get_grid_spacing(self, grid_id):
return (1.0, 2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-3d", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x(0) == approx(
np.array(
[[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]]
)
)
assert c.get_y(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]]
)
)
assert c.get_z(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]
)
)
def test_rectilinear():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return (0.0, 3.0, 4)
def get_grid_y(self, grid_id):
return (2.0, 7.0)
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
assert c.get_x(0) == approx(np.array([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]]))
assert c.get_y(0) == approx(np.array([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]]))
def test_structured():
class StructuredPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])
class Component(GridMixIn):
def __init__(self):
self._port = StructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "STRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0]))
def test_unstructured():
class UnstructuredPort(Port):
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 0.0, 1.0, 1.0, 0.0])
def get_grid_connectivity(self, grid_id):
return np.array([0, 1, 3, 2, 4, 3, 1])
def get_grid_offset(self, grid_id):
return np.array([4, 7])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 0.0, 1.0, 1.0, 0.0]))
def test_get_grid_shape_is_none():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
return None
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_get_grid_shape_raises():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
raise NotImplementedError("get_grid_shape")
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_structured_1d():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
raise NotImplementedError("get_grid_y")
def get_grid_z(self, grid_id):
raise NotImplementedError("get_grid_z")
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
with pytest.raises(IndexError):
c.get_z(0)
|
[
"numpy.array",
"pytest.raises"
] |
[((7100, 7125), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (7113, 7125), False, 'import pytest\n'), ((1613, 1638), 'numpy.array', 'np.array', (['[3.0, 5.0, 7.0]'], {}), '([3.0, 5.0, 7.0])\n', (1621, 1638), True, 'import numpy as np\n'), ((2214, 2258), 'numpy.array', 'np.array', (['[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]'], {}), '([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])\n', (2222, 2258), True, 'import numpy as np\n'), ((2292, 2336), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]'], {}), '([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]])\n', (2300, 2336), True, 'import numpy as np\n'), ((2381, 2415), 'numpy.array', 'np.array', (['[0, 1, 4, 3, 1, 2, 5, 4]'], {}), '([0, 1, 4, 3, 1, 2, 5, 4])\n', (2389, 2415), True, 'import numpy as np\n'), ((2454, 2470), 'numpy.array', 'np.array', (['[4, 8]'], {}), '([4, 8])\n', (2462, 2470), True, 'import numpy as np\n'), ((2974, 3061), 'numpy.array', 'np.array', (['[[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]]'], {}), '([[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, \n 2.0]]])\n', (2982, 3061), True, 'import numpy as np\n'), ((3126, 3213), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]]'], {}), '([[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, \n 2.0]]])\n', (3134, 3213), True, 'import numpy as np\n'), ((3278, 3365), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]'], {}), '([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, \n 1.0]]])\n', (3286, 3365), True, 'import numpy as np\n'), ((3917, 3961), 'numpy.array', 'np.array', (['[[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]]'], {}), '([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]])\n', (3925, 3961), True, 'import numpy as np\n'), ((3995, 4039), 'numpy.array', 'np.array', (['[[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]]'], {}), '([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]])\n', (4003, 4039), True, 'import numpy as np\n'), ((4226, 4266), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])\n', (4234, 4266), True, 'import numpy as np\n'), ((4326, 4366), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 1.0, 2.0, 3.0]'], {}), '([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])\n', (4334, 4366), True, 'import numpy as np\n'), ((4622, 4662), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])\n', (4630, 4662), True, 'import numpy as np\n'), ((4696, 4736), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 1.0, 2.0, 3.0]'], {}), '([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])\n', (4704, 4736), True, 'import numpy as np\n'), ((4857, 4892), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 2.0])\n', (4865, 4892), True, 'import numpy as np\n'), ((4952, 4987), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 0.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 0.0])\n', (4960, 4987), True, 'import numpy as np\n'), ((5058, 5089), 'numpy.array', 'np.array', (['[0, 1, 3, 2, 4, 3, 1]'], {}), '([0, 1, 3, 2, 4, 3, 1])\n', (5066, 5089), True, 'import numpy as np\n'), ((5154, 5170), 'numpy.array', 'np.array', (['[4, 7]'], {}), '([4, 7])\n', (5162, 5170), True, 'import numpy as np\n'), ((5430, 5465), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 2.0])\n', (5438, 5465), True, 'import numpy as np\n'), ((5499, 5534), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 0.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 0.0])\n', (5507, 5534), True, 'import numpy as np\n'), ((5733, 5758), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (5741, 5758), True, 'import numpy as np\n'), ((6214, 6239), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (6222, 6239), True, 'import numpy as np\n'), ((6656, 6681), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (6664, 6681), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.