code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import onnx
import numpy as np
from onnx.helper import make_graph, make_model, make_tensor_value_info
import pytest
from openvino.frontend import FrontEndManager
from tests.runtime import get_runtime
def create_onnx_model():
add = onnx.helper.make_node("Add", inputs=["x", "y"], outputs=["z"])
const_tensor = onnx.helper.make_tensor("const_tensor",
onnx.TensorProto.FLOAT,
(2, 2),
[0.5, 1, 1.5, 2.0])
const_node = onnx.helper.make_node("Constant", [], outputs=["const_node"],
value=const_tensor, name="const_node")
mul = onnx.helper.make_node("Mul", inputs=["z", "const_node"], outputs=["out"])
input_tensors = [
make_tensor_value_info("x", onnx.TensorProto.FLOAT, (2, 2)),
make_tensor_value_info("y", onnx.TensorProto.FLOAT, (2, 2)),
]
output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (2, 2))]
graph = make_graph([add, const_node, mul], "graph", input_tensors, output_tensors)
return make_model(graph, producer_name="ngraph ONNX Importer")
def create_onnx_model_with_subgraphs():
A = onnx.helper.make_tensor_value_info("A", onnx.TensorProto.FLOAT, [3])
B = onnx.helper.make_tensor_value_info("B", onnx.TensorProto.FLOAT, [3])
add_out = onnx.helper.make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, [3])
sub_out = onnx.helper.make_tensor_value_info("sub_out", onnx.TensorProto.FLOAT, [3])
add = onnx.helper.make_node("Add", inputs=["A", "B"], outputs=["add_out"])
sub = onnx.helper.make_node("Sub", inputs=["A", "B"], outputs=["sub_out"])
then_body = make_graph([add], "then_body", [], [add_out])
else_body = make_graph([sub], "else_body", [], [sub_out])
if_node = onnx.helper.make_node(
"If",
inputs=["cond"],
outputs=["res"],
then_branch=then_body,
else_branch=else_body
)
cond = onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, [])
res = onnx.helper.make_tensor_value_info("res", onnx.TensorProto.FLOAT, [3])
graph = make_graph([if_node], "graph", [cond, A, B], [res])
return make_model(graph, producer_name="ngraph ONNX Importer")
def create_onnx_model_with_custom_attributes():
add = onnx.helper.make_node("Add", inputs=["x", "y"], outputs=["z"],
attribute_i32=np.int32(10),
attribute_i64=np.int64(10),
attribute_str="string",
attribute_f32=np.float(10),
attribute_f64=np.float64(10),
attribute_bool=np.bool(True),
attribute_type=onnx.TensorProto.INT32,
attribute_list_i32=np.array([1, 2, 3], dtype=np.int32),
attribute_list_i64=np.array([1, 2, 3], dtype=np.int64),
attribute_list_str=np.array(["a", "b", "c"], dtype=np.str),
attribute_list_f32=np.array([1, 2, 3], dtype=np.float),
attribute_list_f64=np.array([1, 2, 3], dtype=np.float64),
attribute_list_bool=[True, False, True],
attribute_list_type=np.array([onnx.TensorProto.INT32,
onnx.TensorProto.FLOAT]),
)
const_tensor = onnx.helper.make_tensor("const_tensor",
onnx.TensorProto.FLOAT,
(2, 2),
[0.5, 1, 1.5, 2.0])
const_node = onnx.helper.make_node("Constant", [], outputs=["const_node"],
value=const_tensor, name="const_node")
mul = onnx.helper.make_node("Mul", inputs=["z", "const_node"], outputs=["out"])
input_tensors = [
make_tensor_value_info("x", onnx.TensorProto.FLOAT, (2, 2)),
make_tensor_value_info("y", onnx.TensorProto.FLOAT, (2, 2)),
]
output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (2, 2))]
graph = make_graph([add, const_node, mul], "graph", input_tensors, output_tensors)
return make_model(graph, producer_name="ngraph ONNX Importer")
def run_function(function, *inputs, expected):
runtime = get_runtime()
computation = runtime.computation(function)
actual = computation(*inputs)
assert len(actual) == len(expected)
for i in range(len(actual)):
np.testing.assert_allclose(expected[i], actual[i], rtol=1e-3, atol=1e-6)
# FrontEndManager shall be initialized and destroyed after all tests finished
# This is because destroy of FrontEndManager will unload all plugins, no objects shall exist after this
fem = FrontEndManager()
onnx_model_filename = "model.onnx"
onnx_model_with_custom_attributes_filename = "model_custom_attributes.onnx"
onnx_model_with_subgraphs_filename = "model_subgraphs.onnx"
ONNX_FRONTEND_NAME = "onnx"
def setup_module():
onnx.save_model(create_onnx_model(), onnx_model_filename)
onnx.save_model(create_onnx_model_with_custom_attributes(),
onnx_model_with_custom_attributes_filename)
onnx.save_model(create_onnx_model_with_subgraphs(), onnx_model_with_subgraphs_filename)
def teardown_module():
os.remove(onnx_model_filename)
os.remove(onnx_model_with_custom_attributes_filename)
os.remove(onnx_model_with_subgraphs_filename)
def skip_if_onnx_frontend_is_disabled():
front_ends = fem.get_available_front_ends()
if ONNX_FRONTEND_NAME not in front_ends:
pytest.skip()
def test_convert():
skip_if_onnx_frontend_is_disabled()
fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)
assert fe
model = fe.load(onnx_model_filename)
assert model
function = fe.convert(model)
assert function
a = np.array([[1, 2], [3, 4]], dtype=np.float32)
b = np.array([[2, 3], [4, 5]], dtype=np.float32)
expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)
run_function(function, a, b, expected=[expected])
@pytest.mark.parametrize("model_filename, inputs, expected", [
[onnx_model_filename,
[np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[2, 3], [4, 5]], dtype=np.float32)],
np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)],
[onnx_model_with_subgraphs_filename,
[np.array(False, dtype=bool),
np.array([1, 2, 3], dtype=np.float32),
np.array([2, 3, 5], dtype=np.float32)],
np.array([-1, -1, -2], dtype=np.float32)],
])
def test_decode_and_convert(model_filename, inputs, expected):
skip_if_onnx_frontend_is_disabled()
fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)
assert fe
model = fe.load(model_filename)
assert model
decoded_function = fe.decode(model)
assert decoded_function
for op in decoded_function.get_ordered_ops():
assert op.get_type_name() in ["Parameter", "Constant", "ONNXFrameworkNode",
"ONNXSubgraphFrameworkNode", "Result"]
fe.convert(decoded_function)
assert decoded_function
for op in decoded_function.get_ordered_ops():
assert op.get_type_name() not in ["ONNXFrameworkNode", "ONNXSubgraphFrameworkNode"]
run_function(decoded_function, *inputs, expected=[expected])
def test_load_by_model():
skip_if_onnx_frontend_is_disabled()
fe = fem.load_by_model(onnx_model_filename)
assert fe
assert fe.get_name() == "onnx"
model = fe.load(onnx_model_filename)
assert model
decoded_function = fe.decode(model)
assert decoded_function
assert not fem.load_by_model("test.xx")
assert not fem.load_by_model("onnx.yy")
def test_onnx_conversion_extension_check_attributes():
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend.onnx) import here
from openvino.frontend.onnx import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset8 as ops
# use the model with attributes
fe = fem.load_by_model(onnx_model_with_custom_attributes_filename)
assert fe
assert fe.get_name() == "onnx"
invoked = False
def custom_converter(node: NodeContext):
nonlocal invoked
invoked = True
def check_attribute(context, name, expected_type, expected_value):
assert context.has_attribute(name)
attribute = context.get_attribute(name)
assert type(attribute) == expected_type
assert attribute == expected_value
check_attribute(node, "attribute_i32", int, 10)
check_attribute(node, "attribute_i64", int, 10)
check_attribute(node, "attribute_str", str, "string")
check_attribute(node, "attribute_f32", float, 10.)
check_attribute(node, "attribute_f64", float, 10.)
check_attribute(node, "attribute_bool", int, 1)
check_attribute(node, "attribute_type", int, 6)
check_attribute(node, "attribute_list_i32", list, [1, 2, 3])
check_attribute(node, "attribute_list_i64", list, [1, 2, 3])
check_attribute(node, "attribute_list_str", list, ["a", "b", "c"])
check_attribute(node, "attribute_list_f32", list, [1., 2., 3.])
check_attribute(node, "attribute_list_f64", list, [1., 2., 3.])
check_attribute(node, "attribute_list_bool", list, [1, 0, 1])
check_attribute(node, "attribute_list_type", list, [6, 1])
a = node.get_input(0)
b = node.get_input(1)
add = ops.add(a, b)
return [add.output(0)]
fe.add_extension(ConversionExtension("Add", custom_converter))
input_model = fe.load(onnx_model_with_custom_attributes_filename)
assert input_model
model = fe.convert(input_model)
assert model
assert invoked
def test_onnx_conversion_extension_attribute_with_default_value():
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend.onnx) import here
from openvino.frontend.onnx import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset8 as ops
# use the model without attributes
fe = fem.load_by_model(onnx_model_filename)
assert fe
assert fe.get_name() == "onnx"
invoked = False
def custom_converter(node: NodeContext):
nonlocal invoked
invoked = True
def check_attribute(context, name, default_value):
assert not context.has_attribute(name)
attribute = context.get_attribute(name, default_value)
assert type(attribute) == type(default_value)
if isinstance(attribute, np.ndarray):
assert np.all(attribute == default_value)
else:
assert attribute == default_value
check_attribute(node, "attribute_i32", np.int32(5))
check_attribute(node, "attribute_i64", np.int64(5))
check_attribute(node, "attribute_str", "abc")
check_attribute(node, "attribute_f32", np.float32(5))
check_attribute(node, "attribute_f64", np.float64(5))
check_attribute(node, "attribute_bool", np.bool(False))
check_attribute(node, "attribute_type", onnx.TensorProto.FLOAT)
check_attribute(node, "attribute_list_i32", np.array([4, 5, 6], dtype=np.int32))
check_attribute(node, "attribute_list_i64", np.array([4, 5, 6], dtype=np.int64))
check_attribute(node, "attribute_list_str", np.array(["d", "e", "f"], dtype=np.str))
check_attribute(node, "attribute_list_f32", np.array([4, 5, 6], dtype=np.float))
check_attribute(node, "attribute_list_f64", np.array([4, 5, 6], dtype=np.float64))
check_attribute(node, "attribute_list_bool", np.array([True, False, True], dtype=np.bool))
check_attribute(node, "attribute_list_type", np.array([onnx.TensorProto.INT32,
onnx.TensorProto.FLOAT]))
a = node.get_input(0)
b = node.get_input(1)
add = ops.add(a, b)
return [add.output(0)]
fe.add_extension(ConversionExtension("Add", custom_converter))
input_model = fe.load(onnx_model_filename)
assert input_model
model = fe.convert(input_model)
assert model
assert invoked
def test_onnx_conversion_extension_cast_attributes():
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend.onnx) import here
from openvino.frontend.onnx import ConversionExtension
from openvino.frontend import NodeContext
from openvino.runtime import Type
import openvino.runtime.opset8 as ops
# use the model without attributes
fe = fem.load_by_model(onnx_model_with_custom_attributes_filename)
assert fe
assert fe.get_name() == "onnx"
invoked = False
def custom_converter(node: NodeContext):
nonlocal invoked
invoked = True
def check_attribute(context, name, expected_value, dtype):
attribute = context.get_attribute(name, dtype=dtype)
if isinstance(attribute, list):
assert type(attribute[0]) == dtype
else:
assert type(attribute) == dtype
assert attribute == expected_value
check_attribute(node, "attribute_i32", 10, float)
check_attribute(node, "attribute_i64", 10, float)
check_attribute(node, "attribute_str", "string", np.str)
check_attribute(node, "attribute_f32", 10, int)
check_attribute(node, "attribute_f64", 10, int)
check_attribute(node, "attribute_bool", True, bool)
check_attribute(node, "attribute_type", Type.i32, Type)
check_attribute(node, "attribute_list_i32", [1., 2., 3.], float)
check_attribute(node, "attribute_list_i64", [1., 2., 3.], float)
check_attribute(node, "attribute_list_str", ["a", "b", "c"], np.str)
check_attribute(node, "attribute_list_f32", [1, 2, 3], int)
check_attribute(node, "attribute_list_f64", [1, 2, 3], int)
check_attribute(node, "attribute_list_bool", [True, False, True], bool)
check_attribute(node, "attribute_list_type", [Type.i32, Type.f32], Type)
a = node.get_input(0)
b = node.get_input(1)
add = ops.add(a, b)
return [add.output(0)]
fe.add_extension(ConversionExtension("Add", custom_converter))
input_model = fe.load(onnx_model_with_custom_attributes_filename)
assert input_model
model = fe.convert(input_model)
assert model
assert invoked
def test_onnx_conversion_extension_common():
skip_if_onnx_frontend_is_disabled()
# use common (openvino.frontend) import here
from openvino.frontend import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset8 as ops
fe = fem.load_by_model(onnx_model_filename)
assert fe
assert fe.get_name() == "onnx"
invoked = False
def custom_converter(node: NodeContext):
nonlocal invoked
invoked = True
a = node.get_input(0)
b = node.get_input(1)
add = ops.add(a, b)
return [add.output(0)]
fe.add_extension(ConversionExtension("Add", custom_converter))
input_model = fe.load(onnx_model_filename)
assert input_model
model = fe.convert(input_model)
assert model
assert invoked
def test_onnx_conversion_extension():
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend.onnx) import here
from openvino.frontend.onnx import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset8 as ops
fe = fem.load_by_model(onnx_model_filename)
assert fe
assert fe.get_name() == "onnx"
invoked = False
def custom_converter(node: NodeContext):
nonlocal invoked
invoked = True
a = node.get_input(0)
b = node.get_input(1)
add = ops.add(a, b)
return [add.output(0)]
fe.add_extension(ConversionExtension("Add", custom_converter))
input_model = fe.load(onnx_model_filename)
assert input_model
model = fe.convert(input_model)
assert model
assert invoked
def test_op_extension_via_onnx_extension():
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend.onnx) import here
from openvino.frontend.onnx import OpExtension
from openvino.runtime import Core
ie = Core()
ie.add_extension(OpExtension("FW_OV_OP"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_1"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_2", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"}))
ie.add_extension(OpExtension("OV_OP", "FW_OP_3", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"},
{"ov_attribute_str": "string",
"ov_attribute_int": 4,
"ov_attribute_bool": True,
"ov_attribute_float": 4.,
"ov_attribute_vec_string": ["str1", "str2", "str3"],
"ov_attribute_vec_int": [1, 2, 3, 4, 5, 6, 7],
"ov_attribute_vec_bool": [True, False, True],
"ov_attribute_vec_float": [1., 2., 3., 4., 5., 6., 7.]}))
model = ie.read_model(onnx_model_filename)
assert model
def test_op_extension_via_frontend_extension():
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend) import here
from openvino.frontend import OpExtension
from openvino.runtime import Core
ie = Core()
ie.add_extension(OpExtension("FW_OV_OP"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_1"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_2", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"}))
ie.add_extension(OpExtension("OV_OP", "FW_OP_3", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"},
{"ov_attribute_str": "string",
"ov_attribute_int": 4,
"ov_attribute_bool": True,
"ov_attribute_float": 4.,
"ov_attribute_vec_string": ["str1", "str2", "str3"],
"ov_attribute_vec_int": [1, 2, 3, 4, 5, 6, 7],
"ov_attribute_vec_bool": [True, False, True],
"ov_attribute_vec_float": [1., 2., 3., 4., 5., 6., 7.]}))
model = ie.read_model(onnx_model_filename)
assert model
|
[
"onnx.helper.make_node",
"os.remove",
"onnx.helper.make_tensor_value_info",
"numpy.float64",
"tests.runtime.get_runtime",
"openvino.runtime.opset8.add",
"numpy.int32",
"numpy.int64",
"numpy.bool",
"numpy.testing.assert_allclose",
"onnx.helper.make_graph",
"openvino.frontend.onnx.ConversionExtension",
"openvino.frontend.FrontEndManager",
"numpy.float",
"numpy.all",
"openvino.runtime.Core",
"onnx.helper.make_model",
"numpy.float32",
"pytest.skip",
"openvino.frontend.OpExtension",
"onnx.helper.make_tensor",
"numpy.array"
] |
[((5073, 5090), 'openvino.frontend.FrontEndManager', 'FrontEndManager', ([], {}), '()\n', (5088, 5090), False, 'from openvino.frontend import FrontEndManager\n'), ((331, 393), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""'], {'inputs': "['x', 'y']", 'outputs': "['z']"}), "('Add', inputs=['x', 'y'], outputs=['z'])\n", (352, 393), False, 'import onnx\n'), ((413, 509), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['"""const_tensor"""', 'onnx.TensorProto.FLOAT', '(2, 2)', '[0.5, 1, 1.5, 2.0]'], {}), "('const_tensor', onnx.TensorProto.FLOAT, (2, 2), [\n 0.5, 1, 1.5, 2.0])\n", (436, 509), False, 'import onnx\n'), ((651, 756), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Constant"""', '[]'], {'outputs': "['const_node']", 'value': 'const_tensor', 'name': '"""const_node"""'}), "('Constant', [], outputs=['const_node'], value=\n const_tensor, name='const_node')\n", (672, 756), False, 'import onnx\n'), ((801, 874), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Mul"""'], {'inputs': "['z', 'const_node']", 'outputs': "['out']"}), "('Mul', inputs=['z', 'const_node'], outputs=['out'])\n", (822, 874), False, 'import onnx\n'), ((1138, 1212), 'onnx.helper.make_graph', 'make_graph', (['[add, const_node, mul]', '"""graph"""', 'input_tensors', 'output_tensors'], {}), "([add, const_node, mul], 'graph', input_tensors, output_tensors)\n", (1148, 1212), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((1224, 1279), 'onnx.helper.make_model', 'make_model', (['graph'], {'producer_name': '"""ngraph ONNX Importer"""'}), "(graph, producer_name='ngraph ONNX Importer')\n", (1234, 1279), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((1330, 1398), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""A"""', 'onnx.TensorProto.FLOAT', '[3]'], {}), "('A', onnx.TensorProto.FLOAT, [3])\n", (1364, 1398), False, 'import onnx\n'), ((1407, 1475), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""B"""', 'onnx.TensorProto.FLOAT', '[3]'], {}), "('B', onnx.TensorProto.FLOAT, [3])\n", (1441, 1475), False, 'import onnx\n'), ((1490, 1564), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""add_out"""', 'onnx.TensorProto.FLOAT', '[3]'], {}), "('add_out', onnx.TensorProto.FLOAT, [3])\n", (1524, 1564), False, 'import onnx\n'), ((1579, 1653), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""sub_out"""', 'onnx.TensorProto.FLOAT', '[3]'], {}), "('sub_out', onnx.TensorProto.FLOAT, [3])\n", (1613, 1653), False, 'import onnx\n'), ((1665, 1733), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""'], {'inputs': "['A', 'B']", 'outputs': "['add_out']"}), "('Add', inputs=['A', 'B'], outputs=['add_out'])\n", (1686, 1733), False, 'import onnx\n'), ((1744, 1812), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Sub"""'], {'inputs': "['A', 'B']", 'outputs': "['sub_out']"}), "('Sub', inputs=['A', 'B'], outputs=['sub_out'])\n", (1765, 1812), False, 'import onnx\n'), ((1830, 1875), 'onnx.helper.make_graph', 'make_graph', (['[add]', '"""then_body"""', '[]', '[add_out]'], {}), "([add], 'then_body', [], [add_out])\n", (1840, 1875), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((1892, 1937), 'onnx.helper.make_graph', 'make_graph', (['[sub]', '"""else_body"""', '[]', '[sub_out]'], {}), "([sub], 'else_body', [], [sub_out])\n", (1902, 1937), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((1953, 2065), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""If"""'], {'inputs': "['cond']", 'outputs': "['res']", 'then_branch': 'then_body', 'else_branch': 'else_body'}), "('If', inputs=['cond'], outputs=['res'], then_branch=\n then_body, else_branch=else_body)\n", (1974, 2065), False, 'import onnx\n'), ((2118, 2187), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""cond"""', 'onnx.TensorProto.BOOL', '[]'], {}), "('cond', onnx.TensorProto.BOOL, [])\n", (2152, 2187), False, 'import onnx\n'), ((2198, 2268), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""res"""', 'onnx.TensorProto.FLOAT', '[3]'], {}), "('res', onnx.TensorProto.FLOAT, [3])\n", (2232, 2268), False, 'import onnx\n'), ((2282, 2333), 'onnx.helper.make_graph', 'make_graph', (['[if_node]', '"""graph"""', '[cond, A, B]', '[res]'], {}), "([if_node], 'graph', [cond, A, B], [res])\n", (2292, 2333), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((2345, 2400), 'onnx.helper.make_model', 'make_model', (['graph'], {'producer_name': '"""ngraph ONNX Importer"""'}), "(graph, producer_name='ngraph ONNX Importer')\n", (2355, 2400), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((3703, 3799), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['"""const_tensor"""', 'onnx.TensorProto.FLOAT', '(2, 2)', '[0.5, 1, 1.5, 2.0]'], {}), "('const_tensor', onnx.TensorProto.FLOAT, (2, 2), [\n 0.5, 1, 1.5, 2.0])\n", (3726, 3799), False, 'import onnx\n'), ((3941, 4046), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Constant"""', '[]'], {'outputs': "['const_node']", 'value': 'const_tensor', 'name': '"""const_node"""'}), "('Constant', [], outputs=['const_node'], value=\n const_tensor, name='const_node')\n", (3962, 4046), False, 'import onnx\n'), ((4091, 4164), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Mul"""'], {'inputs': "['z', 'const_node']", 'outputs': "['out']"}), "('Mul', inputs=['z', 'const_node'], outputs=['out'])\n", (4112, 4164), False, 'import onnx\n'), ((4428, 4502), 'onnx.helper.make_graph', 'make_graph', (['[add, const_node, mul]', '"""graph"""', 'input_tensors', 'output_tensors'], {}), "([add, const_node, mul], 'graph', input_tensors, output_tensors)\n", (4438, 4502), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((4514, 4569), 'onnx.helper.make_model', 'make_model', (['graph'], {'producer_name': '"""ngraph ONNX Importer"""'}), "(graph, producer_name='ngraph ONNX Importer')\n", (4524, 4569), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((4633, 4646), 'tests.runtime.get_runtime', 'get_runtime', ([], {}), '()\n', (4644, 4646), False, 'from tests.runtime import get_runtime\n'), ((5623, 5653), 'os.remove', 'os.remove', (['onnx_model_filename'], {}), '(onnx_model_filename)\n', (5632, 5653), False, 'import os\n'), ((5658, 5711), 'os.remove', 'os.remove', (['onnx_model_with_custom_attributes_filename'], {}), '(onnx_model_with_custom_attributes_filename)\n', (5667, 5711), False, 'import os\n'), ((5716, 5761), 'os.remove', 'os.remove', (['onnx_model_with_subgraphs_filename'], {}), '(onnx_model_with_subgraphs_filename)\n', (5725, 5761), False, 'import os\n'), ((6180, 6224), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {'dtype': 'np.float32'}), '([[1, 2], [3, 4]], dtype=np.float32)\n', (6188, 6224), True, 'import numpy as np\n'), ((6233, 6277), 'numpy.array', 'np.array', (['[[2, 3], [4, 5]]'], {'dtype': 'np.float32'}), '([[2, 3], [4, 5]], dtype=np.float32)\n', (6241, 6277), True, 'import numpy as np\n'), ((6293, 6343), 'numpy.array', 'np.array', (['[[1.5, 5], [10.5, 18]]'], {'dtype': 'np.float32'}), '([[1.5, 5], [10.5, 18]], dtype=np.float32)\n', (6301, 6343), True, 'import numpy as np\n'), ((16739, 16745), 'openvino.runtime.Core', 'Core', ([], {}), '()\n', (16743, 16745), False, 'from openvino.runtime import Core\n'), ((18092, 18098), 'openvino.runtime.Core', 'Core', ([], {}), '()\n', (18096, 18098), False, 'from openvino.runtime import Core\n'), ((905, 964), 'onnx.helper.make_tensor_value_info', 'make_tensor_value_info', (['"""x"""', 'onnx.TensorProto.FLOAT', '(2, 2)'], {}), "('x', onnx.TensorProto.FLOAT, (2, 2))\n", (927, 964), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((974, 1033), 'onnx.helper.make_tensor_value_info', 'make_tensor_value_info', (['"""y"""', 'onnx.TensorProto.FLOAT', '(2, 2)'], {}), "('y', onnx.TensorProto.FLOAT, (2, 2))\n", (996, 1033), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((1063, 1124), 'onnx.helper.make_tensor_value_info', 'make_tensor_value_info', (['"""out"""', 'onnx.TensorProto.FLOAT', '(2, 2)'], {}), "('out', onnx.TensorProto.FLOAT, (2, 2))\n", (1085, 1124), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((4195, 4254), 'onnx.helper.make_tensor_value_info', 'make_tensor_value_info', (['"""x"""', 'onnx.TensorProto.FLOAT', '(2, 2)'], {}), "('x', onnx.TensorProto.FLOAT, (2, 2))\n", (4217, 4254), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((4264, 4323), 'onnx.helper.make_tensor_value_info', 'make_tensor_value_info', (['"""y"""', 'onnx.TensorProto.FLOAT', '(2, 2)'], {}), "('y', onnx.TensorProto.FLOAT, (2, 2))\n", (4286, 4323), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((4353, 4414), 'onnx.helper.make_tensor_value_info', 'make_tensor_value_info', (['"""out"""', 'onnx.TensorProto.FLOAT', '(2, 2)'], {}), "('out', onnx.TensorProto.FLOAT, (2, 2))\n", (4375, 4414), False, 'from onnx.helper import make_graph, make_model, make_tensor_value_info\n'), ((4810, 4884), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected[i]', 'actual[i]'], {'rtol': '(0.001)', 'atol': '(1e-06)'}), '(expected[i], actual[i], rtol=0.001, atol=1e-06)\n', (4836, 4884), True, 'import numpy as np\n'), ((5906, 5919), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (5917, 5919), False, 'import pytest\n'), ((9857, 9870), 'openvino.runtime.opset8.add', 'ops.add', (['a', 'b'], {}), '(a, b)\n', (9864, 9870), True, 'import openvino.runtime.opset8 as ops\n'), ((9924, 9968), 'openvino.frontend.onnx.ConversionExtension', 'ConversionExtension', (['"""Add"""', 'custom_converter'], {}), "('Add', custom_converter)\n", (9943, 9968), False, 'from openvino.frontend.onnx import ConversionExtension\n'), ((12349, 12362), 'openvino.runtime.opset8.add', 'ops.add', (['a', 'b'], {}), '(a, b)\n', (12356, 12362), True, 'import openvino.runtime.opset8 as ops\n'), ((12416, 12460), 'openvino.frontend.onnx.ConversionExtension', 'ConversionExtension', (['"""Add"""', 'custom_converter'], {}), "('Add', custom_converter)\n", (12435, 12460), False, 'from openvino.frontend.onnx import ConversionExtension\n'), ((14572, 14585), 'openvino.runtime.opset8.add', 'ops.add', (['a', 'b'], {}), '(a, b)\n', (14579, 14585), True, 'import openvino.runtime.opset8 as ops\n'), ((14639, 14683), 'openvino.frontend.onnx.ConversionExtension', 'ConversionExtension', (['"""Add"""', 'custom_converter'], {}), "('Add', custom_converter)\n", (14658, 14683), False, 'from openvino.frontend.onnx import ConversionExtension\n'), ((15416, 15429), 'openvino.runtime.opset8.add', 'ops.add', (['a', 'b'], {}), '(a, b)\n', (15423, 15429), True, 'import openvino.runtime.opset8 as ops\n'), ((15483, 15527), 'openvino.frontend.onnx.ConversionExtension', 'ConversionExtension', (['"""Add"""', 'custom_converter'], {}), "('Add', custom_converter)\n", (15502, 15527), False, 'from openvino.frontend.onnx import ConversionExtension\n'), ((16242, 16255), 'openvino.runtime.opset8.add', 'ops.add', (['a', 'b'], {}), '(a, b)\n', (16249, 16255), True, 'import openvino.runtime.opset8 as ops\n'), ((16309, 16353), 'openvino.frontend.onnx.ConversionExtension', 'ConversionExtension', (['"""Add"""', 'custom_converter'], {}), "('Add', custom_converter)\n", (16328, 16353), False, 'from openvino.frontend.onnx import ConversionExtension\n'), ((16767, 16790), 'openvino.frontend.OpExtension', 'OpExtension', (['"""FW_OV_OP"""'], {}), "('FW_OV_OP')\n", (16778, 16790), False, 'from openvino.frontend import OpExtension\n'), ((16813, 16844), 'openvino.frontend.OpExtension', 'OpExtension', (['"""OV_OP"""', '"""FW_OP_1"""'], {}), "('OV_OP', 'FW_OP_1')\n", (16824, 16844), False, 'from openvino.frontend import OpExtension\n'), ((16867, 16976), 'openvino.frontend.OpExtension', 'OpExtension', (['"""OV_OP"""', '"""FW_OP_2"""', "{'ov_attribute_1': 'fw_attribute_1', 'ov_attribute_2': 'fw_attribute_2'}"], {}), "('OV_OP', 'FW_OP_2', {'ov_attribute_1': 'fw_attribute_1',\n 'ov_attribute_2': 'fw_attribute_2'})\n", (16878, 16976), False, 'from openvino.frontend import OpExtension\n'), ((17049, 17496), 'openvino.frontend.OpExtension', 'OpExtension', (['"""OV_OP"""', '"""FW_OP_3"""', "{'ov_attribute_1': 'fw_attribute_1', 'ov_attribute_2': 'fw_attribute_2'}", "{'ov_attribute_str': 'string', 'ov_attribute_int': 4, 'ov_attribute_bool': \n True, 'ov_attribute_float': 4.0, 'ov_attribute_vec_string': ['str1',\n 'str2', 'str3'], 'ov_attribute_vec_int': [1, 2, 3, 4, 5, 6, 7],\n 'ov_attribute_vec_bool': [True, False, True], 'ov_attribute_vec_float':\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]}"], {}), "('OV_OP', 'FW_OP_3', {'ov_attribute_1': 'fw_attribute_1',\n 'ov_attribute_2': 'fw_attribute_2'}, {'ov_attribute_str': 'string',\n 'ov_attribute_int': 4, 'ov_attribute_bool': True, 'ov_attribute_float':\n 4.0, 'ov_attribute_vec_string': ['str1', 'str2', 'str3'],\n 'ov_attribute_vec_int': [1, 2, 3, 4, 5, 6, 7], 'ov_attribute_vec_bool':\n [True, False, True], 'ov_attribute_vec_float': [1.0, 2.0, 3.0, 4.0, 5.0,\n 6.0, 7.0]})\n", (17060, 17496), False, 'from openvino.frontend import OpExtension\n'), ((18120, 18143), 'openvino.frontend.OpExtension', 'OpExtension', (['"""FW_OV_OP"""'], {}), "('FW_OV_OP')\n", (18131, 18143), False, 'from openvino.frontend import OpExtension\n'), ((18166, 18197), 'openvino.frontend.OpExtension', 'OpExtension', (['"""OV_OP"""', '"""FW_OP_1"""'], {}), "('OV_OP', 'FW_OP_1')\n", (18177, 18197), False, 'from openvino.frontend import OpExtension\n'), ((18220, 18329), 'openvino.frontend.OpExtension', 'OpExtension', (['"""OV_OP"""', '"""FW_OP_2"""', "{'ov_attribute_1': 'fw_attribute_1', 'ov_attribute_2': 'fw_attribute_2'}"], {}), "('OV_OP', 'FW_OP_2', {'ov_attribute_1': 'fw_attribute_1',\n 'ov_attribute_2': 'fw_attribute_2'})\n", (18231, 18329), False, 'from openvino.frontend import OpExtension\n'), ((18402, 18849), 'openvino.frontend.OpExtension', 'OpExtension', (['"""OV_OP"""', '"""FW_OP_3"""', "{'ov_attribute_1': 'fw_attribute_1', 'ov_attribute_2': 'fw_attribute_2'}", "{'ov_attribute_str': 'string', 'ov_attribute_int': 4, 'ov_attribute_bool': \n True, 'ov_attribute_float': 4.0, 'ov_attribute_vec_string': ['str1',\n 'str2', 'str3'], 'ov_attribute_vec_int': [1, 2, 3, 4, 5, 6, 7],\n 'ov_attribute_vec_bool': [True, False, True], 'ov_attribute_vec_float':\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]}"], {}), "('OV_OP', 'FW_OP_3', {'ov_attribute_1': 'fw_attribute_1',\n 'ov_attribute_2': 'fw_attribute_2'}, {'ov_attribute_str': 'string',\n 'ov_attribute_int': 4, 'ov_attribute_bool': True, 'ov_attribute_float':\n 4.0, 'ov_attribute_vec_string': ['str1', 'str2', 'str3'],\n 'ov_attribute_vec_int': [1, 2, 3, 4, 5, 6, 7], 'ov_attribute_vec_bool':\n [True, False, True], 'ov_attribute_vec_float': [1.0, 2.0, 3.0, 4.0, 5.0,\n 6.0, 7.0]})\n", (18413, 18849), False, 'from openvino.frontend import OpExtension\n'), ((2570, 2582), 'numpy.int32', 'np.int32', (['(10)'], {}), '(10)\n', (2578, 2582), True, 'import numpy as np\n'), ((2630, 2642), 'numpy.int64', 'np.int64', (['(10)'], {}), '(10)\n', (2638, 2642), True, 'import numpy as np\n'), ((2746, 2758), 'numpy.float', 'np.float', (['(10)'], {}), '(10)\n', (2754, 2758), True, 'import numpy as np\n'), ((2806, 2820), 'numpy.float64', 'np.float64', (['(10)'], {}), '(10)\n', (2816, 2820), True, 'import numpy as np\n'), ((2869, 2882), 'numpy.bool', 'np.bool', (['(True)'], {}), '(True)\n', (2876, 2882), True, 'import numpy as np\n'), ((3007, 3042), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int32'}), '([1, 2, 3], dtype=np.int32)\n', (3015, 3042), True, 'import numpy as np\n'), ((3095, 3130), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int64'}), '([1, 2, 3], dtype=np.int64)\n', (3103, 3130), True, 'import numpy as np\n'), ((3183, 3222), 'numpy.array', 'np.array', (["['a', 'b', 'c']"], {'dtype': 'np.str'}), "(['a', 'b', 'c'], dtype=np.str)\n", (3191, 3222), True, 'import numpy as np\n'), ((3275, 3310), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.float'}), '([1, 2, 3], dtype=np.float)\n', (3283, 3310), True, 'import numpy as np\n'), ((3363, 3400), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.float64'}), '([1, 2, 3], dtype=np.float64)\n', (3371, 3400), True, 'import numpy as np\n'), ((3527, 3585), 'numpy.array', 'np.array', (['[onnx.TensorProto.INT32, onnx.TensorProto.FLOAT]'], {}), '([onnx.TensorProto.INT32, onnx.TensorProto.FLOAT])\n', (3535, 3585), True, 'import numpy as np\n'), ((6599, 6649), 'numpy.array', 'np.array', (['[[1.5, 5], [10.5, 18]]'], {'dtype': 'np.float32'}), '([[1.5, 5], [10.5, 18]], dtype=np.float32)\n', (6607, 6649), True, 'import numpy as np\n'), ((6824, 6864), 'numpy.array', 'np.array', (['[-1, -1, -2]'], {'dtype': 'np.float32'}), '([-1, -1, -2], dtype=np.float32)\n', (6832, 6864), True, 'import numpy as np\n'), ((11160, 11171), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (11168, 11171), True, 'import numpy as np\n'), ((11220, 11231), 'numpy.int64', 'np.int64', (['(5)'], {}), '(5)\n', (11228, 11231), True, 'import numpy as np\n'), ((11334, 11347), 'numpy.float32', 'np.float32', (['(5)'], {}), '(5)\n', (11344, 11347), True, 'import numpy as np\n'), ((11396, 11409), 'numpy.float64', 'np.float64', (['(5)'], {}), '(5)\n', (11406, 11409), True, 'import numpy as np\n'), ((11459, 11473), 'numpy.bool', 'np.bool', (['(False)'], {}), '(False)\n', (11466, 11473), True, 'import numpy as np\n'), ((11600, 11635), 'numpy.array', 'np.array', (['[4, 5, 6]'], {'dtype': 'np.int32'}), '([4, 5, 6], dtype=np.int32)\n', (11608, 11635), True, 'import numpy as np\n'), ((11689, 11724), 'numpy.array', 'np.array', (['[4, 5, 6]'], {'dtype': 'np.int64'}), '([4, 5, 6], dtype=np.int64)\n', (11697, 11724), True, 'import numpy as np\n'), ((11778, 11817), 'numpy.array', 'np.array', (["['d', 'e', 'f']"], {'dtype': 'np.str'}), "(['d', 'e', 'f'], dtype=np.str)\n", (11786, 11817), True, 'import numpy as np\n'), ((11871, 11906), 'numpy.array', 'np.array', (['[4, 5, 6]'], {'dtype': 'np.float'}), '([4, 5, 6], dtype=np.float)\n', (11879, 11906), True, 'import numpy as np\n'), ((11960, 11997), 'numpy.array', 'np.array', (['[4, 5, 6]'], {'dtype': 'np.float64'}), '([4, 5, 6], dtype=np.float64)\n', (11968, 11997), True, 'import numpy as np\n'), ((12052, 12096), 'numpy.array', 'np.array', (['[True, False, True]'], {'dtype': 'np.bool'}), '([True, False, True], dtype=np.bool)\n', (12060, 12096), True, 'import numpy as np\n'), ((12151, 12209), 'numpy.array', 'np.array', (['[onnx.TensorProto.INT32, onnx.TensorProto.FLOAT]'], {}), '([onnx.TensorProto.INT32, onnx.TensorProto.FLOAT])\n', (12159, 12209), True, 'import numpy as np\n'), ((6495, 6539), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {'dtype': 'np.float32'}), '([[1, 2], [3, 4]], dtype=np.float32)\n', (6503, 6539), True, 'import numpy as np\n'), ((6547, 6591), 'numpy.array', 'np.array', (['[[2, 3], [4, 5]]'], {'dtype': 'np.float32'}), '([[2, 3], [4, 5]], dtype=np.float32)\n', (6555, 6591), True, 'import numpy as np\n'), ((6699, 6726), 'numpy.array', 'np.array', (['(False)'], {'dtype': 'bool'}), '(False, dtype=bool)\n', (6707, 6726), True, 'import numpy as np\n'), ((6734, 6771), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.float32'}), '([1, 2, 3], dtype=np.float32)\n', (6742, 6771), True, 'import numpy as np\n'), ((6779, 6816), 'numpy.array', 'np.array', (['[2, 3, 5]'], {'dtype': 'np.float32'}), '([2, 3, 5], dtype=np.float32)\n', (6787, 6816), True, 'import numpy as np\n'), ((11009, 11043), 'numpy.all', 'np.all', (['(attribute == default_value)'], {}), '(attribute == default_value)\n', (11015, 11043), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
#
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import *
import numpy as np
from transform.base import HomographyTransformer, ShapeT
class Rotation3DTransformer(HomographyTransformer):
"""Transforms (rotation and/or translation) entire images or just points in
3D space. The instance is meant to be immutable because of performance
demands.
"""
def __init__(
self, theta: float = 0, phi: float = 0, gamma: float = 0,
dx: float = 0, dy: float = 0, dz: float = 0
):
"""Constructors the transformer.
:param theta: x-axis rotation angle (in radians)
:param phi: y-axis rotation angle (in radians)
:param gamma: z-axis rotation angle (in radians)
:param dx: translation in x-axis
:param dy: translation in y-axis
:param dz: translation in z-axis
"""
self._theta: float = theta
self._phi: float = phi
self._gamma: float = gamma
self._dx: float = dx
self._dy: float = dy
self._dz: float = dz
self._rotation_mat = self.rotation_matrix(theta, phi, gamma)
@property
def homography(self) -> np.ndarray:
# docstring inherited
return self._rotation_mat
@property
def theta(self) -> float:
"""Returns the x-axis rotation angle (in radians).
"""
return self._theta
@property
def phi(self) -> float:
"""Returns the y-axis rotation angle (in radians).
"""
return self._phi
@property
def gamma(self) -> float:
"""Returns the z-axis rotation angle (in radians).
"""
return self._gamma
@property
def dx(self) -> float:
"""Returns the translation in the x-axis.
"""
return self._dx
@property
def dy(self) -> float:
"""Returns the translation in the y-axis.
"""
return self._dy
@property
def dz(self) -> float:
"""Returns the translation in the z-axis.
"""
return self._dz
def _build_homography(
self, img_shape: Optional[ShapeT] = None
) -> np.ndarray:
return self._build_homography_for_shape(img_shape)
@staticmethod
def projection_2d3d_matrix(img_shape: ShapeT) -> np.ndarray:
return np.float32((
(1, 0, -(img_shape[1] / 2.0)),
(0, 1, -(img_shape[0] / 2.0)),
(0, 0, 1),
(0, 0, 1)
))
@staticmethod
def projection_3d2d_matrix(
img_shape: ShapeT, focal: float
) -> np.ndarray:
return np.float32((
(focal, 0, img_shape[1] / 2.0, 0),
(0, focal, img_shape[0] / 2.0, 0),
(0, 0, 1, 0)
))
@staticmethod
def rotation_matrix(
theta: float = 0, phi: float = 0, gamma: float = 0
) -> np.ndarray:
x_rotation_mat = Rotation3DTransformer.x_axis_rotation_matrix(theta)
y_rotation_mat = Rotation3DTransformer.y_axis_rotation_matrix(phi)
z_rotation_mat = Rotation3DTransformer.z_axis_rotation_matrix(gamma)
return np.dot(np.dot(x_rotation_mat, y_rotation_mat), z_rotation_mat)
@staticmethod
def x_axis_rotation_matrix(theta: float = 0) -> np.ndarray:
sin_theta, cos_theta = np.sin(theta), np.cos(theta)
return np.float32((
(1, 0, 0, 0),
(0, cos_theta, -sin_theta, 0),
(0, sin_theta, cos_theta, 0),
(0, 0, 0, 1)
))
@staticmethod
def y_axis_rotation_matrix(phi: float = 0) -> np.ndarray:
sin_phi, cos_phi = np.sin(phi), np.cos(phi)
return np.float32((
(cos_phi, 0, -sin_phi, 0),
(0, 1, 0, 0),
(sin_phi, 0, cos_phi, 0),
(0, 0, 0, 1)
))
@staticmethod
def z_axis_rotation_matrix(gamma: float = 0) -> np.ndarray:
sin_gamma, cos_gamma = np.sin(gamma), np.cos(gamma)
return np.float32((
(cos_gamma, -sin_gamma, 0, 0),
(sin_gamma, cos_gamma, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)
))
@staticmethod
def translation_matrix(
dx: float = 0, dy: float = 0, dz: float = 0
) -> np.ndarray:
return np.float32((
(1, 0, 0, dx),
(0, 1, 0, dy),
(0, 0, 1, dz),
(0, 0, 0, 1)
))
def _build_homography_for_shape(self, output_shape: ShapeT) -> np.ndarray:
focal = self._calc_focal_length(output_shape)
projection_2d3d_mat = self.projection_2d3d_matrix(output_shape)
projection_3d2d_mat = self.projection_3d2d_matrix(output_shape, focal)
translation_mat = self.translation_matrix(self.dx, self.dy, focal)
return np.dot(
projection_3d2d_mat,
np.dot(
translation_mat,
np.dot(self._rotation_mat, projection_2d3d_mat)
)
)
def _calc_focal_length(self, output_shape: ShapeT) -> float:
sin_gamma = np.sin(self._gamma)
dist = np.linalg.norm(output_shape)
focal = dist / (2 * sin_gamma if sin_gamma != 0 else 1)
return focal
|
[
"numpy.float32",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"numpy.dot"
] |
[((3620, 3721), 'numpy.float32', 'np.float32', (['((1, 0, -(img_shape[1] / 2.0)), (0, 1, -(img_shape[0] / 2.0)), (0, 0, 1), (\n 0, 0, 1))'], {}), '(((1, 0, -(img_shape[1] / 2.0)), (0, 1, -(img_shape[0] / 2.0)), (\n 0, 0, 1), (0, 0, 1)))\n', (3630, 3721), True, 'import numpy as np\n'), ((3921, 4022), 'numpy.float32', 'np.float32', (['((focal, 0, img_shape[1] / 2.0, 0), (0, focal, img_shape[0] / 2.0, 0), (0, \n 0, 1, 0))'], {}), '(((focal, 0, img_shape[1] / 2.0, 0), (0, focal, img_shape[0] / \n 2.0, 0), (0, 0, 1, 0)))\n', (3931, 4022), True, 'import numpy as np\n'), ((4703, 4808), 'numpy.float32', 'np.float32', (['((1, 0, 0, 0), (0, cos_theta, -sin_theta, 0), (0, sin_theta, cos_theta, 0),\n (0, 0, 0, 1))'], {}), '(((1, 0, 0, 0), (0, cos_theta, -sin_theta, 0), (0, sin_theta,\n cos_theta, 0), (0, 0, 0, 1)))\n', (4713, 4808), True, 'import numpy as np\n'), ((5035, 5133), 'numpy.float32', 'np.float32', (['((cos_phi, 0, -sin_phi, 0), (0, 1, 0, 0), (sin_phi, 0, cos_phi, 0), (0, 0, \n 0, 1))'], {}), '(((cos_phi, 0, -sin_phi, 0), (0, 1, 0, 0), (sin_phi, 0, cos_phi, \n 0), (0, 0, 0, 1)))\n', (5045, 5133), True, 'import numpy as np\n'), ((5369, 5474), 'numpy.float32', 'np.float32', (['((cos_gamma, -sin_gamma, 0, 0), (sin_gamma, cos_gamma, 0, 0), (0, 0, 1, 0),\n (0, 0, 0, 1))'], {}), '(((cos_gamma, -sin_gamma, 0, 0), (sin_gamma, cos_gamma, 0, 0), (0,\n 0, 1, 0), (0, 0, 0, 1)))\n', (5379, 5474), True, 'import numpy as np\n'), ((5683, 5754), 'numpy.float32', 'np.float32', (['((1, 0, 0, dx), (0, 1, 0, dy), (0, 0, 1, dz), (0, 0, 0, 1))'], {}), '(((1, 0, 0, dx), (0, 1, 0, dy), (0, 0, 1, dz), (0, 0, 0, 1)))\n', (5693, 5754), True, 'import numpy as np\n'), ((6505, 6524), 'numpy.sin', 'np.sin', (['self._gamma'], {}), '(self._gamma)\n', (6511, 6524), True, 'import numpy as np\n'), ((6541, 6569), 'numpy.linalg.norm', 'np.linalg.norm', (['output_shape'], {}), '(output_shape)\n', (6555, 6569), True, 'import numpy as np\n'), ((4470, 4508), 'numpy.dot', 'np.dot', (['x_rotation_mat', 'y_rotation_mat'], {}), '(x_rotation_mat, y_rotation_mat)\n', (4476, 4508), True, 'import numpy as np\n'), ((4648, 4661), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4654, 4661), True, 'import numpy as np\n'), ((4663, 4676), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4669, 4676), True, 'import numpy as np\n'), ((4984, 4995), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4990, 4995), True, 'import numpy as np\n'), ((4997, 5008), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (5003, 5008), True, 'import numpy as np\n'), ((5314, 5327), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (5320, 5327), True, 'import numpy as np\n'), ((5329, 5342), 'numpy.cos', 'np.cos', (['gamma'], {}), '(gamma)\n', (5335, 5342), True, 'import numpy as np\n'), ((6338, 6385), 'numpy.dot', 'np.dot', (['self._rotation_mat', 'projection_2d3d_mat'], {}), '(self._rotation_mat, projection_2d3d_mat)\n', (6344, 6385), True, 'import numpy as np\n')]
|
import argparse
import lzma
import pickle
import os
import urllib.request
import sys
import zipfile
import numpy as np
import sklearn.metrics
class Dataset:
def __init__(
self,
name="isnt_it_ironic.train.zip",
url="https://ufal.mff.cuni.cz/~straka/courses/npfl129/1920/datasets/",
):
if not os.path.exists(name):
print("Downloading dataset {}...".format(name), file=sys.stderr)
urllib.request.urlretrieve(url + name, filename=name)
# Load the dataset and split it into `data` and `target`.
self.data = []
self.target = []
with zipfile.ZipFile(name, "r") as dataset_file:
with dataset_file.open(name.replace(".zip", ".txt"), "r") as train_file:
for line in train_file:
label, text = line.decode("utf-8").rstrip("\n").split("\t")
self.data.append(text)
self.target.append(int(label))
self.target = np.array(self.target, np.int32)
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_path", default="isnt_it_ironic.model", type=str, help="Model path"
)
parser.add_argument("--seed", default=42, type=int, help="Random seed")
if __name__ == "__main__":
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Load the dataset, downloading it if required
train = Dataset()
# TODO: Train the model.
# TODO: The trained model needs to be saved. All sklearn models can
# be serialized and deserialized using the standard `pickle` module.
# Additionally, we also compress the model.
#
# To save a model, open a target file for binary access, and use
# `pickle.dump` to save the model to the opened file:
def recodex_predict(data):
# The `data` is a Python list containing tweets as `str`ings.
args = parser.parse_args([])
with lzma.open("isnt_it_ironic.model", "rb") as model_file:
model = pickle.load(model_file)
predictions = model.predict(data)
return predictions.astype(np.int8)
# TODO: Return the predictions as a Python list or Numpy array of
# binary labels of the tweets.
|
[
"lzma.open",
"numpy.random.seed",
"argparse.ArgumentParser",
"zipfile.ZipFile",
"os.path.exists",
"pickle.load",
"numpy.array"
] |
[((1037, 1062), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1060, 1062), False, 'import argparse\n'), ((1324, 1349), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1338, 1349), True, 'import numpy as np\n'), ((994, 1025), 'numpy.array', 'np.array', (['self.target', 'np.int32'], {}), '(self.target, np.int32)\n', (1002, 1025), True, 'import numpy as np\n'), ((1920, 1959), 'lzma.open', 'lzma.open', (['"""isnt_it_ironic.model"""', '"""rb"""'], {}), "('isnt_it_ironic.model', 'rb')\n", (1929, 1959), False, 'import lzma\n'), ((1991, 2014), 'pickle.load', 'pickle.load', (['model_file'], {}), '(model_file)\n', (2002, 2014), False, 'import pickle\n'), ((335, 355), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (349, 355), False, 'import os\n'), ((629, 655), 'zipfile.ZipFile', 'zipfile.ZipFile', (['name', '"""r"""'], {}), "(name, 'r')\n", (644, 655), False, 'import zipfile\n')]
|
import argparse
import logging
import os
import pprint
import random
import numpy as np
import torch
import torch.optim as optim
from common.dataset import DatasetFactory
from common.evaluation import EvaluatorFactory
from common.train import TrainerFactory
from utils.serialization import load_checkpoint
from .model import VDPWIModel
def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_size, device):
saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, batch_size, device)
scores, metric_names = saved_model_evaluator.get_scores()
logger.info('Evaluation metrics for {}'.format(split_name))
logger.info('\t'.join([' '] + metric_names))
logger.info('\t'.join([split_name] + list(map(str, scores))))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch implementation of VDPWI')
parser.add_argument('model_outfile', help='file to save final model')
parser.add_argument('--dataset', help='dataset to use, one of [sick, msrvid, trecqa, wikiqa]', default='sick')
parser.add_argument('--word-vectors-dir', help='word vectors directory', default=os.path.join(os.pardir, 'Castor-data', 'embeddings', 'GloVe'))
parser.add_argument('--word-vectors-file', help='word vectors filename', default='glove.840B.300d.txt')
parser.add_argument('--word-vectors-dim', type=int, default=300,
help='number of dimensions of word vectors (default: 300)')
parser.add_argument('--skip-training', help='will load pre-trained model', action='store_true')
parser.add_argument('--device', type=int, default=0, help='GPU device, -1 for CPU (default: 0)')
parser.add_argument('--sparse-features', action='store_true', default=False, help='use sparse features (default: false)')
parser.add_argument('--batch-size', type=int, default=16, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train (default: 10)')
parser.add_argument('--optimizer', type=str, default='rmsprop', help='optimizer to use: adam, sgd, or rmsprop (default: adam)')
parser.add_argument('--lr', type=float, default=5E-4, help='learning rate (default: 0.001)')
parser.add_argument('--lr-reduce-factor', type=float, default=0.3, help='learning rate reduce factor after plateau (default: 0.3)')
parser.add_argument('--patience', type=float, default=2, help='learning rate patience after seeing plateau (default: 2)')
parser.add_argument('--momentum', type=float, default=0.1, help='momentum (default: 0.1)')
parser.add_argument('--epsilon', type=float, default=1e-8, help='Adam epsilon (default: 1e-8)')
parser.add_argument('--log-interval', type=int, default=10, help='how many batches to wait before logging training status (default: 10)')
parser.add_argument('--regularization', type=float, default=1E-5, help='Regularization for the optimizer (default: 0.00001)')
parser.add_argument('--hidden-units', type=int, default=250, help='number of hidden units in the RNN')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--tensorboard', action='store_true', default=False, help='use TensorBoard to visualize training (default: false)')
parser.add_argument('--run-label', type=str, help='label to describe run')
# VDPWI args
parser.add_argument('--classifier', type=str, default='vdpwi', choices=['vdpwi', 'resnet'])
parser.add_argument('--clip-norm', type=float, default=50)
parser.add_argument('--decay', type=float, default=0.95)
parser.add_argument('--res-fmaps', type=int, default=32)
parser.add_argument('--res-layers', type=int, default=16)
parser.add_argument('--rnn-hidden-dim', type=int, default=250)
args = parser.parse_args()
device = torch.device(f'cuda:{args.device}' if torch.cuda.is_available() and args.device >= 0 else 'cpu')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
# logging setup
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info(pprint.pformat(vars(args)))
dataset_cls, embedding, train_loader, test_loader, dev_loader \
= DatasetFactory.get_dataset(args.dataset, args.word_vectors_dir, args.word_vectors_file, args.batch_size, args.device)
model_config = {
'classifier': args.classifier,
'rnn_hidden_dim': args.rnn_hidden_dim,
'n_labels': dataset_cls.NUM_CLASSES,
'device': device,
'res_layers': args.res_layers,
'res_fmaps': args.res_fmaps
}
model = VDPWIModel(args.word_vectors_dim, model_config)
model.to(device)
embedding = embedding.to(device)
optimizer = None
if args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.regularization, eps=args.epsilon)
elif args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.regularization)
elif args.optimizer == "rmsprop":
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum, alpha=args.decay,
weight_decay=args.regularization)
else:
raise ValueError('optimizer not recognized: it should be one of adam, sgd, or rmsprop')
train_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, train_loader, args.batch_size, args.device)
test_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, test_loader, args.batch_size, args.device)
dev_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, dev_loader, args.batch_size, args.device)
trainer_config = {
'optimizer': optimizer,
'batch_size': args.batch_size,
'log_interval': args.log_interval,
'model_outfile': args.model_outfile,
'lr_reduce_factor': args.lr_reduce_factor,
'patience': args.patience,
'tensorboard': args.tensorboard,
'run_label': args.run_label,
'logger': logger,
'clip_norm': args.clip_norm
}
trainer = TrainerFactory.get_trainer(args.dataset, model, embedding, train_loader, trainer_config, train_evaluator, test_evaluator, dev_evaluator)
if not args.skip_training:
total_params = 0
for param in model.parameters():
size = [s for s in param.size()]
total_params += np.prod(size)
logger.info('Total number of parameters: %s', total_params)
trainer.train(args.epochs)
_, _, state_dict, _, _ = load_checkpoint(args.model_outfile)
for k, tensor in state_dict.items():
state_dict[k] = tensor.to(device)
model.load_state_dict(state_dict)
if dev_loader:
evaluate_dataset('dev', dataset_cls, model, embedding, dev_loader, args.batch_size, args.device)
evaluate_dataset('test', dataset_cls, model, embedding, test_loader, args.batch_size, args.device)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.manual_seed",
"logging.StreamHandler",
"torch.cuda.manual_seed",
"common.train.TrainerFactory.get_trainer",
"utils.serialization.load_checkpoint",
"logging.Formatter",
"common.evaluation.EvaluatorFactory.get_evaluator",
"numpy.prod",
"random.seed",
"torch.cuda.is_available",
"common.dataset.DatasetFactory.get_dataset",
"os.path.join",
"logging.getLogger"
] |
[((461, 554), 'common.evaluation.EvaluatorFactory.get_evaluator', 'EvaluatorFactory.get_evaluator', (['dataset_cls', 'model', 'embedding', 'loader', 'batch_size', 'device'], {}), '(dataset_cls, model, embedding, loader,\n batch_size, device)\n', (491, 554), False, 'from common.evaluation import EvaluatorFactory\n'), ((833, 903), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch implementation of VDPWI"""'}), "(description='PyTorch implementation of VDPWI')\n", (856, 903), False, 'import argparse\n'), ((3995, 4017), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (4006, 4017), False, 'import random\n'), ((4022, 4047), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4036, 4047), True, 'import numpy as np\n'), ((4052, 4080), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4069, 4080), False, 'import torch\n'), ((4183, 4210), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4200, 4210), False, 'import logging\n'), ((4255, 4278), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4276, 4278), False, 'import logging\n'), ((4326, 4374), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (4343, 4374), False, 'import logging\n'), ((4556, 4678), 'common.dataset.DatasetFactory.get_dataset', 'DatasetFactory.get_dataset', (['args.dataset', 'args.word_vectors_dir', 'args.word_vectors_file', 'args.batch_size', 'args.device'], {}), '(args.dataset, args.word_vectors_dir, args.\n word_vectors_file, args.batch_size, args.device)\n', (4582, 4678), False, 'from common.dataset import DatasetFactory\n'), ((5699, 5808), 'common.evaluation.EvaluatorFactory.get_evaluator', 'EvaluatorFactory.get_evaluator', (['dataset_cls', 'model', 'embedding', 'train_loader', 'args.batch_size', 'args.device'], {}), '(dataset_cls, model, embedding, train_loader,\n args.batch_size, args.device)\n', (5729, 5808), False, 'from common.evaluation import EvaluatorFactory\n'), ((5826, 5934), 'common.evaluation.EvaluatorFactory.get_evaluator', 'EvaluatorFactory.get_evaluator', (['dataset_cls', 'model', 'embedding', 'test_loader', 'args.batch_size', 'args.device'], {}), '(dataset_cls, model, embedding, test_loader,\n args.batch_size, args.device)\n', (5856, 5934), False, 'from common.evaluation import EvaluatorFactory\n'), ((5951, 6058), 'common.evaluation.EvaluatorFactory.get_evaluator', 'EvaluatorFactory.get_evaluator', (['dataset_cls', 'model', 'embedding', 'dev_loader', 'args.batch_size', 'args.device'], {}), '(dataset_cls, model, embedding, dev_loader,\n args.batch_size, args.device)\n', (5981, 6058), False, 'from common.evaluation import EvaluatorFactory\n'), ((6485, 6625), 'common.train.TrainerFactory.get_trainer', 'TrainerFactory.get_trainer', (['args.dataset', 'model', 'embedding', 'train_loader', 'trainer_config', 'train_evaluator', 'test_evaluator', 'dev_evaluator'], {}), '(args.dataset, model, embedding, train_loader,\n trainer_config, train_evaluator, test_evaluator, dev_evaluator)\n', (6511, 6625), False, 'from common.train import TrainerFactory\n'), ((6940, 6975), 'utils.serialization.load_checkpoint', 'load_checkpoint', (['args.model_outfile'], {}), '(args.model_outfile)\n', (6955, 6975), False, 'from utils.serialization import load_checkpoint\n'), ((4115, 4148), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4137, 4148), False, 'import torch\n'), ((1178, 1239), 'os.path.join', 'os.path.join', (['os.pardir', '"""Castor-data"""', '"""embeddings"""', '"""GloVe"""'], {}), "(os.pardir, 'Castor-data', 'embeddings', 'GloVe')\n", (1190, 1239), False, 'import os\n'), ((6793, 6806), 'numpy.prod', 'np.prod', (['size'], {}), '(size)\n', (6800, 6806), True, 'import numpy as np\n'), ((3931, 3956), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3954, 3956), False, 'import torch\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
(fs, x2) = UF.wavread('../../../sounds/impulse-response.wav')
x1 = x[40000:44096]
N = 4096
plt.figure(1, figsize=(9.5, 7))
plt.subplot(3,2,1)
plt.title('x1 (ocean.wav)')
plt.plot(x1, 'b')
plt.axis([0,N,min(x1),max(x1)])
plt.subplot(3,2,2)
plt.title('x2 (impulse-response.wav)')
plt.plot(x2, 'b')
plt.axis([0,N,min(x2),max(x2)])
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX1 = mX1 - max(mX1)
plt.subplot(3,2,3)
plt.title('X1')
plt.plot(mX1, 'r')
plt.axis([0,N/2,-70,0])
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
mX2 = mX2 - max(mX2)
plt.subplot(3,2,4)
plt.title('X2')
plt.plot(mX2, 'r')
plt.axis([0,N/2,-70,0])
y = np.convolve(x1, x2)
mY, pY = DF.dftAnal(y[0:N], np.ones(N), N)
mY = mY - max(mY)
plt.subplot(3,2,5)
plt.title('DFT(x1 * x2)')
plt.plot(mY, 'r')
plt.axis([0,N/2,-70,0])
plt.subplot(3,2,6)
plt.title('X1 x X2')
mY1 = 20*np.log10(np.abs(fft(x1) * fft(x2)))
mY1 = mY1 - max(mY1)
plt.plot(mY1[0:N//2], 'r')
plt.axis([0,N//2,-84,0])
plt.tight_layout()
plt.savefig('convolution-1.png')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"os.path.realpath",
"matplotlib.pyplot.axis",
"numpy.ones",
"scipy.fftpack.fft",
"matplotlib.pyplot.figure",
"numpy.convolve",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"utilFunctions.wavread"
] |
[((295, 334), 'utilFunctions.wavread', 'UF.wavread', (['"""../../../sounds/ocean.wav"""'], {}), "('../../../sounds/ocean.wav')\n", (305, 334), True, 'import utilFunctions as UF\n'), ((346, 396), 'utilFunctions.wavread', 'UF.wavread', (['"""../../../sounds/impulse-response.wav"""'], {}), "('../../../sounds/impulse-response.wav')\n", (356, 396), True, 'import utilFunctions as UF\n'), ((427, 458), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(9.5, 7)'}), '(1, figsize=(9.5, 7))\n', (437, 458), True, 'import matplotlib.pyplot as plt\n'), ((459, 479), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (470, 479), True, 'import matplotlib.pyplot as plt\n'), ((478, 505), 'matplotlib.pyplot.title', 'plt.title', (['"""x1 (ocean.wav)"""'], {}), "('x1 (ocean.wav)')\n", (487, 505), True, 'import matplotlib.pyplot as plt\n'), ((506, 523), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', '"""b"""'], {}), "(x1, 'b')\n", (514, 523), True, 'import matplotlib.pyplot as plt\n'), ((557, 577), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (568, 577), True, 'import matplotlib.pyplot as plt\n'), ((576, 614), 'matplotlib.pyplot.title', 'plt.title', (['"""x2 (impulse-response.wav)"""'], {}), "('x2 (impulse-response.wav)')\n", (585, 614), True, 'import matplotlib.pyplot as plt\n'), ((615, 632), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', '"""b"""'], {}), "(x2, 'b')\n", (623, 632), True, 'import matplotlib.pyplot as plt\n'), ((728, 748), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (739, 748), True, 'import matplotlib.pyplot as plt\n'), ((747, 762), 'matplotlib.pyplot.title', 'plt.title', (['"""X1"""'], {}), "('X1')\n", (756, 762), True, 'import matplotlib.pyplot as plt\n'), ((763, 781), 'matplotlib.pyplot.plot', 'plt.plot', (['mX1', '"""r"""'], {}), "(mX1, 'r')\n", (771, 781), True, 'import matplotlib.pyplot as plt\n'), ((782, 810), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, N / 2, -70, 0]'], {}), '([0, N / 2, -70, 0])\n', (790, 810), True, 'import matplotlib.pyplot as plt\n'), ((869, 889), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (880, 889), True, 'import matplotlib.pyplot as plt\n'), ((888, 903), 'matplotlib.pyplot.title', 'plt.title', (['"""X2"""'], {}), "('X2')\n", (897, 903), True, 'import matplotlib.pyplot as plt\n'), ((904, 922), 'matplotlib.pyplot.plot', 'plt.plot', (['mX2', '"""r"""'], {}), "(mX2, 'r')\n", (912, 922), True, 'import matplotlib.pyplot as plt\n'), ((923, 951), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, N / 2, -70, 0]'], {}), '([0, N / 2, -70, 0])\n', (931, 951), True, 'import matplotlib.pyplot as plt\n'), ((952, 971), 'numpy.convolve', 'np.convolve', (['x1', 'x2'], {}), '(x1, x2)\n', (963, 971), True, 'import numpy as np\n'), ((1033, 1053), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (1044, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1077), 'matplotlib.pyplot.title', 'plt.title', (['"""DFT(x1 * x2)"""'], {}), "('DFT(x1 * x2)')\n", (1061, 1077), True, 'import matplotlib.pyplot as plt\n'), ((1078, 1095), 'matplotlib.pyplot.plot', 'plt.plot', (['mY', '"""r"""'], {}), "(mY, 'r')\n", (1086, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1124), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, N / 2, -70, 0]'], {}), '([0, N / 2, -70, 0])\n', (1104, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1141), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (1132, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1140, 1160), 'matplotlib.pyplot.title', 'plt.title', (['"""X1 x X2"""'], {}), "('X1 x X2')\n", (1149, 1160), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1255), 'matplotlib.pyplot.plot', 'plt.plot', (['mY1[0:N // 2]', '"""r"""'], {}), "(mY1[0:N // 2], 'r')\n", (1235, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1283), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, N // 2, -84, 0]'], {}), '([0, N // 2, -84, 0])\n', (1262, 1283), True, 'import matplotlib.pyplot as plt\n'), ((1280, 1298), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1296, 1298), True, 'import matplotlib.pyplot as plt\n'), ((1299, 1331), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""convolution-1.png"""'], {}), "('convolution-1.png')\n", (1310, 1331), True, 'import matplotlib.pyplot as plt\n'), ((1332, 1342), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1340, 1342), True, 'import matplotlib.pyplot as plt\n'), ((692, 702), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (699, 702), True, 'import numpy as np\n'), ((833, 843), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (840, 843), True, 'import numpy as np\n'), ((1000, 1010), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (1007, 1010), True, 'import numpy as np\n'), ((176, 202), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (192, 202), False, 'import time, os, sys\n'), ((1186, 1193), 'scipy.fftpack.fft', 'fft', (['x1'], {}), '(x1)\n', (1189, 1193), False, 'from scipy.fftpack import fft, ifft, fftshift\n'), ((1196, 1203), 'scipy.fftpack.fft', 'fft', (['x2'], {}), '(x2)\n', (1199, 1203), False, 'from scipy.fftpack import fft, ifft, fftshift\n')]
|
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pyopencl as cl
import pyopencl.clrandom as clr
import pystella as ps
import pytest
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
from pystella.multigrid import (FullApproximationScheme, MultiGridSolver,
NewtonIterator)
@pytest.mark.filterwarnings(
"ignore::pyopencl.characterize.CLCharacterizationWarning")
@pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory")
@pytest.mark.filterwarnings("ignore::loopy.diagnostic.ParameterFinderWarning")
@pytest.mark.parametrize("h", [1])
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("Solver", [NewtonIterator])
@pytest.mark.parametrize("MG", [FullApproximationScheme, MultiGridSolver])
def test_multigrid(ctx_factory, grid_shape, proc_shape, h, dtype, Solver, MG,
timing=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
rank_shape = tuple(Ni // pi for Ni, pi in zip(grid_shape, proc_shape))
mpi = ps.DomainDecomposition(proc_shape, h, rank_shape)
L = 10
dx = L / grid_shape[0]
statistics = ps.FieldStatistics(mpi, h, rank_shape=rank_shape,
grid_size=np.product(grid_shape))
def get_laplacian(f):
from pystella.derivs import _lap_coefs, centered_diff
lap_coefs = _lap_coefs[h]
from pymbolic import var
return sum([centered_diff(f, lap_coefs, direction=mu, order=2)
for mu in range(1, 4)]) / var("dx")**2
test_problems = {}
from pystella import Field
f = Field("f", offset="h")
rho = Field("rho", offset="h")
test_problems[f] = (get_laplacian(f), rho)
f = Field("f2", offset="h")
rho = Field("rho2", offset="h")
test_problems[f] = (get_laplacian(f) - f, rho)
solver = Solver(mpi, queue, test_problems, halo_shape=h, dtype=dtype,
fixed_parameters=dict(omega=1/2))
mg = MG(solver=solver, halo_shape=h, dtype=dtype)
def zero_mean_array():
f0 = clr.rand(queue, grid_shape, dtype)
f = clr.rand(queue, tuple(ni + 2*h for ni in rank_shape), dtype)
mpi.scatter_array(queue, f0, f, root=0)
avg = statistics(f)["mean"].item()
f = f - avg
mpi.share_halos(queue, f)
return f
f = zero_mean_array()
rho = zero_mean_array()
f2 = zero_mean_array()
rho2 = zero_mean_array()
poisson_errs = []
helmholtz_errs = []
num_v_cycles = 15 if MG == MultiGridSolver else 10
for _ in range(num_v_cycles):
errs = mg(mpi, queue, dx0=dx, f=f, rho=rho, f2=f2, rho2=rho2)
poisson_errs.append(errs[-1][-1]["f"])
helmholtz_errs.append(errs[-1][-1]["f2"])
for name, cycle_errs in zip(["poisson", "helmholtz"],
[poisson_errs, helmholtz_errs]):
tol = 1e-6 if MG == MultiGridSolver else 5e-14
assert cycle_errs[-1][1] < tol and cycle_errs[-2][1] < 10*tol, \
f"multigrid solution to {name} eqn is inaccurate for " \
f"{grid_shape=}, {h=}, {proc_shape=}\n{cycle_errs=}"
if __name__ == "__main__":
from common import parser
parser.set_defaults(grid_shape=(128,)*3)
args = parser.parse_args()
test_multigrid(
ps.choose_device_and_make_context,
grid_shape=args.grid_shape, proc_shape=args.proc_shape,
h=args.h, dtype=args.dtype, timing=args.timing,
Solver=NewtonIterator, MG=FullApproximationScheme
)
|
[
"common.parser.set_defaults",
"pystella.derivs.centered_diff",
"pystella.DomainDecomposition",
"pymbolic.var",
"pyopencl.CommandQueue",
"common.parser.parse_args",
"pystella.Field",
"numpy.product",
"pytest.mark.parametrize",
"pytest.mark.filterwarnings",
"pyopencl.clrandom.rand"
] |
[((1466, 1556), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::pyopencl.characterize.CLCharacterizationWarning"""'], {}), "(\n 'ignore::pyopencl.characterize.CLCharacterizationWarning')\n", (1492, 1556), False, 'import pytest\n'), ((1560, 1628), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::loopy.diagnostic.LoopyAdvisory"""'], {}), "('ignore::loopy.diagnostic.LoopyAdvisory')\n", (1586, 1628), False, 'import pytest\n'), ((1631, 1708), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::loopy.diagnostic.ParameterFinderWarning"""'], {}), "('ignore::loopy.diagnostic.ParameterFinderWarning')\n", (1657, 1708), False, 'import pytest\n'), ((1711, 1744), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""h"""', '[1]'], {}), "('h', [1])\n", (1734, 1744), False, 'import pytest\n'), ((1747, 1793), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float64]'], {}), "('dtype', [np.float64])\n", (1770, 1793), False, 'import pytest\n'), ((1796, 1847), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Solver"""', '[NewtonIterator]'], {}), "('Solver', [NewtonIterator])\n", (1819, 1847), False, 'import pytest\n'), ((1850, 1923), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""MG"""', '[FullApproximationScheme, MultiGridSolver]'], {}), "('MG', [FullApproximationScheme, MultiGridSolver])\n", (1873, 1923), False, 'import pytest\n'), ((2078, 2098), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (2093, 2098), True, 'import pyopencl as cl\n'), ((2186, 2235), 'pystella.DomainDecomposition', 'ps.DomainDecomposition', (['proc_shape', 'h', 'rank_shape'], {}), '(proc_shape, h, rank_shape)\n', (2208, 2235), True, 'import pystella as ps\n'), ((2781, 2803), 'pystella.Field', 'Field', (['"""f"""'], {'offset': '"""h"""'}), "('f', offset='h')\n", (2786, 2803), False, 'from pystella import Field\n'), ((2815, 2839), 'pystella.Field', 'Field', (['"""rho"""'], {'offset': '"""h"""'}), "('rho', offset='h')\n", (2820, 2839), False, 'from pystella import Field\n'), ((2899, 2922), 'pystella.Field', 'Field', (['"""f2"""'], {'offset': '"""h"""'}), "('f2', offset='h')\n", (2904, 2922), False, 'from pystella import Field\n'), ((2934, 2959), 'pystella.Field', 'Field', (['"""rho2"""'], {'offset': '"""h"""'}), "('rho2', offset='h')\n", (2939, 2959), False, 'from pystella import Field\n'), ((4409, 4451), 'common.parser.set_defaults', 'parser.set_defaults', ([], {'grid_shape': '((128,) * 3)'}), '(grid_shape=(128,) * 3)\n', (4428, 4451), False, 'from common import parser\n'), ((4462, 4481), 'common.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (4479, 4481), False, 'from common import parser\n'), ((3243, 3277), 'pyopencl.clrandom.rand', 'clr.rand', (['queue', 'grid_shape', 'dtype'], {}), '(queue, grid_shape, dtype)\n', (3251, 3277), True, 'import pyopencl.clrandom as clr\n'), ((2395, 2417), 'numpy.product', 'np.product', (['grid_shape'], {}), '(grid_shape)\n', (2405, 2417), True, 'import numpy as np\n'), ((2699, 2708), 'pymbolic.var', 'var', (['"""dx"""'], {}), "('dx')\n", (2702, 2708), False, 'from pymbolic import var\n'), ((2601, 2651), 'pystella.derivs.centered_diff', 'centered_diff', (['f', 'lap_coefs'], {'direction': 'mu', 'order': '(2)'}), '(f, lap_coefs, direction=mu, order=2)\n', (2614, 2651), False, 'from pystella.derivs import _lap_coefs, centered_diff\n')]
|
#!/usr/bin/env python
# coding: Latin-1
# Load library functions we want
import time
import sys
import gpiozero
import numpy as np
import cv2 as cv
from PIL import Image
from camera import Camera
from argparse import ArgumentParser
import logging
import os
from rectangle import Rectangle
from time import sleep
from picamera.array import PiRGBArray
from picamera.array import PiYUVArray
running = True
showMask = True
button = 0
def click(event, x, y, flags, param):
global running, button
haltRect = Rectangle(290,0,320,20)
exitRect = Rectangle(0,0,20,20)
switchRect = Rectangle(306,220,350,240)
hMinusRect = Rectangle(0,220,40,240)
hPlusRect = Rectangle(50,220,90,240)
sMinusRect = Rectangle(100,220,150,240)
sPlusRect = Rectangle(160,220,200,240)
vMinusRect = Rectangle(210,220,250,240)
vPlusRect = Rectangle(260,220,300,240)
if event == cv.EVENT_LBUTTONDOWN:
print("x {0} - y {1}", x, y )
if haltRect.Contains( x, y ):
button = 1
if exitRect.Contains( x, y ):
button = 2
running = False
if switchRect.Contains( x, y ):
button = 3
if hMinusRect.Contains( x, y ):
button = 4
if hPlusRect.Contains( x, y ):
button = 5
if sMinusRect.Contains( x, y ):
button = 6
if sPlusRect.Contains( x, y ):
button = 7
if vMinusRect.Contains( x, y ):
button = 8
if vPlusRect.Contains( x, y ):
button = 9
def showMenuImage(menu_img):
cv.imshow('image',menu_img)
cv.waitKey(1)
def main():
global running, button
w = 320
h = 240
camera = Camera( w, h)
camera.Rotate( 270 )
cv.namedWindow('image', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('image',cv.WND_PROP_FULLSCREEN,cv.WINDOW_FULLSCREEN)
cv.setMouseCallback("image", click)
rawCapture = PiRGBArray( camera, size=(w, h))
time.sleep(0.1)
lower = np.array([110,50,50])
upper = np.array([130,255,255])
showMask = True;
for frame in camera.CaptureContinous(rawCapture):
if running == False:
break
if button == 1:
cv.destroyAllWindows()
os.system("sudo halt")
if button == 2:
break
if button == 3:
showMask = not showMask
button = 0
# Narrow the H range
if button == 4:
lower[0] = min(lower[0] + 1,170)
upper[0] = max(upper[0] - 1,0)
button = 0
# Expand the H range
if button == 5:
lower[0] = max(lower[0] - 1,0)
upper[0] = min(upper[0] + 1,170)
button = 0
# Narrow the S range
if button == 6:
lower[1] = min(lower[1] + 1,255)
upper[1] = max(upper[1] - 1,0)
button = 0
# Expand the S range
if button == 7:
lower[1] = max(lower[1] - 1,0)
upper[1] = min(upper[1] + 1,255)
button = 0
# Narrow the V range
if button == 8:
lower[2] = min(lower[2] + 1,255)
upper[2] = max(upper[2] - 1,0)
button = 0
# Expand the V range
if button == 9:
lower[2] = max(lower[2] - 1,0)
upper[2] = min(upper[2] + 1,255)
button = 0
raw = frame.array
imgHSV = cv.cvtColor(raw, cv.COLOR_RGB2HSV) # Convert the captured frame from RGB to HSV ( with blue in the red position )
imgMask = cv.inRange(imgHSV, lower, upper)
#kernel = np.ones((3,3), np.uint8)
#imgMask = cv.erode(imgMask, kernel, iterations=2)
#imgMask = cv.dilate(imgMask, kernel, iterations=4)
imgMask = cv.erode(imgMask, cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5)))
imgMask = cv.dilate(imgMask, cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5)))
imgMask = cv.dilate(imgMask, cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5)))
imgMask = cv.erode(imgMask, cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5)))
if showMask:
image = imgMask
else:
image = raw
cv.rectangle(image,(290,0),(320,20),(255,255,255),-1);
cv.putText(image, "X", (298, 18), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
cv.rectangle(image,(0,220),(40,240),(255,255,255),-1);
cv.putText(image, "-H", (8, 236), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
cv.rectangle(image,(50,220),(90,240),(255,255,255),-1);
cv.putText(image, "+H", (58, 236), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
cv.rectangle(image,(100,220),(150,240),(255,255,255),-1);
cv.putText(image, "-S", (108, 236), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
cv.rectangle(image,(160,220),(200,240),(255,255,255),-1);
cv.putText(image, "+S", (168, 236), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
cv.rectangle(image,(210,220),(250,240),(255,255,255),-1);
cv.putText(image, "-V", (218, 236), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
cv.rectangle(image,(260,220),(300,240),(255,255,255),-1);
cv.putText(image, "+V", (268, 236), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
cv.rectangle(image,(310,220),(350,240),(255,255,255),-1);
cv.putText(image, "+", (311, 236), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0));
text = "{0} {1}".format(lower, upper)
cv.putText(image, text,(10, 20), cv.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
showMenuImage(image)
rawCapture.truncate(0)
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
[
"cv2.putText",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.getStructuringElement",
"camera.Camera",
"cv2.setWindowProperty",
"os.system",
"time.sleep",
"cv2.setMouseCallback",
"numpy.array",
"picamera.array.PiRGBArray",
"cv2.rectangle",
"rectangle.Rectangle",
"cv2.imshow",
"cv2.inRange",
"cv2.namedWindow"
] |
[((513, 539), 'rectangle.Rectangle', 'Rectangle', (['(290)', '(0)', '(320)', '(20)'], {}), '(290, 0, 320, 20)\n', (522, 539), False, 'from rectangle import Rectangle\n'), ((549, 572), 'rectangle.Rectangle', 'Rectangle', (['(0)', '(0)', '(20)', '(20)'], {}), '(0, 0, 20, 20)\n', (558, 572), False, 'from rectangle import Rectangle\n'), ((584, 613), 'rectangle.Rectangle', 'Rectangle', (['(306)', '(220)', '(350)', '(240)'], {}), '(306, 220, 350, 240)\n', (593, 613), False, 'from rectangle import Rectangle\n'), ((625, 651), 'rectangle.Rectangle', 'Rectangle', (['(0)', '(220)', '(40)', '(240)'], {}), '(0, 220, 40, 240)\n', (634, 651), False, 'from rectangle import Rectangle\n'), ((664, 691), 'rectangle.Rectangle', 'Rectangle', (['(50)', '(220)', '(90)', '(240)'], {}), '(50, 220, 90, 240)\n', (673, 691), False, 'from rectangle import Rectangle\n'), ((703, 732), 'rectangle.Rectangle', 'Rectangle', (['(100)', '(220)', '(150)', '(240)'], {}), '(100, 220, 150, 240)\n', (712, 732), False, 'from rectangle import Rectangle\n'), ((743, 772), 'rectangle.Rectangle', 'Rectangle', (['(160)', '(220)', '(200)', '(240)'], {}), '(160, 220, 200, 240)\n', (752, 772), False, 'from rectangle import Rectangle\n'), ((784, 813), 'rectangle.Rectangle', 'Rectangle', (['(210)', '(220)', '(250)', '(240)'], {}), '(210, 220, 250, 240)\n', (793, 813), False, 'from rectangle import Rectangle\n'), ((824, 853), 'rectangle.Rectangle', 'Rectangle', (['(260)', '(220)', '(300)', '(240)'], {}), '(260, 220, 300, 240)\n', (833, 853), False, 'from rectangle import Rectangle\n'), ((1421, 1449), 'cv2.imshow', 'cv.imshow', (['"""image"""', 'menu_img'], {}), "('image', menu_img)\n", (1430, 1449), True, 'import cv2 as cv\n'), ((1450, 1463), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (1460, 1463), True, 'import cv2 as cv\n'), ((1532, 1544), 'camera.Camera', 'Camera', (['w', 'h'], {}), '(w, h)\n', (1538, 1544), False, 'from camera import Camera\n'), ((1572, 1619), 'cv2.namedWindow', 'cv.namedWindow', (['"""image"""', 'cv.WND_PROP_FULLSCREEN'], {}), "('image', cv.WND_PROP_FULLSCREEN)\n", (1586, 1619), True, 'import cv2 as cv\n'), ((1621, 1696), 'cv2.setWindowProperty', 'cv.setWindowProperty', (['"""image"""', 'cv.WND_PROP_FULLSCREEN', 'cv.WINDOW_FULLSCREEN'], {}), "('image', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)\n", (1641, 1696), True, 'import cv2 as cv\n'), ((1697, 1732), 'cv2.setMouseCallback', 'cv.setMouseCallback', (['"""image"""', 'click'], {}), "('image', click)\n", (1716, 1732), True, 'import cv2 as cv\n'), ((1750, 1781), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': '(w, h)'}), '(camera, size=(w, h))\n', (1760, 1781), False, 'from picamera.array import PiRGBArray\n'), ((1785, 1800), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1795, 1800), False, 'import time\n'), ((1813, 1836), 'numpy.array', 'np.array', (['[110, 50, 50]'], {}), '([110, 50, 50])\n', (1821, 1836), True, 'import numpy as np\n'), ((1844, 1869), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (1852, 1869), True, 'import numpy as np\n'), ((4930, 4952), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (4950, 4952), True, 'import cv2 as cv\n'), ((2960, 2994), 'cv2.cvtColor', 'cv.cvtColor', (['raw', 'cv.COLOR_RGB2HSV'], {}), '(raw, cv.COLOR_RGB2HSV)\n', (2971, 2994), True, 'import cv2 as cv\n'), ((3092, 3124), 'cv2.inRange', 'cv.inRange', (['imgHSV', 'lower', 'upper'], {}), '(imgHSV, lower, upper)\n', (3102, 3124), True, 'import cv2 as cv\n'), ((3676, 3737), 'cv2.rectangle', 'cv.rectangle', (['image', '(290, 0)', '(320, 20)', '(255, 255, 255)', '(-1)'], {}), '(image, (290, 0), (320, 20), (255, 255, 255), -1)\n', (3688, 3737), True, 'import cv2 as cv\n'), ((3733, 3803), 'cv2.putText', 'cv.putText', (['image', '"""X"""', '(298, 18)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, 'X', (298, 18), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (3743, 3803), True, 'import cv2 as cv\n'), ((3808, 3869), 'cv2.rectangle', 'cv.rectangle', (['image', '(0, 220)', '(40, 240)', '(255, 255, 255)', '(-1)'], {}), '(image, (0, 220), (40, 240), (255, 255, 255), -1)\n', (3820, 3869), True, 'import cv2 as cv\n'), ((3865, 3935), 'cv2.putText', 'cv.putText', (['image', '"""-H"""', '(8, 236)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, '-H', (8, 236), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (3875, 3935), True, 'import cv2 as cv\n'), ((3937, 3999), 'cv2.rectangle', 'cv.rectangle', (['image', '(50, 220)', '(90, 240)', '(255, 255, 255)', '(-1)'], {}), '(image, (50, 220), (90, 240), (255, 255, 255), -1)\n', (3949, 3999), True, 'import cv2 as cv\n'), ((3995, 4066), 'cv2.putText', 'cv.putText', (['image', '"""+H"""', '(58, 236)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, '+H', (58, 236), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (4005, 4066), True, 'import cv2 as cv\n'), ((4073, 4137), 'cv2.rectangle', 'cv.rectangle', (['image', '(100, 220)', '(150, 240)', '(255, 255, 255)', '(-1)'], {}), '(image, (100, 220), (150, 240), (255, 255, 255), -1)\n', (4085, 4137), True, 'import cv2 as cv\n'), ((4133, 4205), 'cv2.putText', 'cv.putText', (['image', '"""-S"""', '(108, 236)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, '-S', (108, 236), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (4143, 4205), True, 'import cv2 as cv\n'), ((4207, 4271), 'cv2.rectangle', 'cv.rectangle', (['image', '(160, 220)', '(200, 240)', '(255, 255, 255)', '(-1)'], {}), '(image, (160, 220), (200, 240), (255, 255, 255), -1)\n', (4219, 4271), True, 'import cv2 as cv\n'), ((4267, 4339), 'cv2.putText', 'cv.putText', (['image', '"""+S"""', '(168, 236)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, '+S', (168, 236), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (4277, 4339), True, 'import cv2 as cv\n'), ((4344, 4408), 'cv2.rectangle', 'cv.rectangle', (['image', '(210, 220)', '(250, 240)', '(255, 255, 255)', '(-1)'], {}), '(image, (210, 220), (250, 240), (255, 255, 255), -1)\n', (4356, 4408), True, 'import cv2 as cv\n'), ((4404, 4476), 'cv2.putText', 'cv.putText', (['image', '"""-V"""', '(218, 236)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, '-V', (218, 236), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (4414, 4476), True, 'import cv2 as cv\n'), ((4478, 4542), 'cv2.rectangle', 'cv.rectangle', (['image', '(260, 220)', '(300, 240)', '(255, 255, 255)', '(-1)'], {}), '(image, (260, 220), (300, 240), (255, 255, 255), -1)\n', (4490, 4542), True, 'import cv2 as cv\n'), ((4538, 4610), 'cv2.putText', 'cv.putText', (['image', '"""+V"""', '(268, 236)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, '+V', (268, 236), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (4548, 4610), True, 'import cv2 as cv\n'), ((4615, 4679), 'cv2.rectangle', 'cv.rectangle', (['image', '(310, 220)', '(350, 240)', '(255, 255, 255)', '(-1)'], {}), '(image, (310, 220), (350, 240), (255, 255, 255), -1)\n', (4627, 4679), True, 'import cv2 as cv\n'), ((4675, 4746), 'cv2.putText', 'cv.putText', (['image', '"""+"""', '(311, 236)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)'], {}), "(image, '+', (311, 236), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0))\n", (4685, 4746), True, 'import cv2 as cv\n'), ((4791, 4870), 'cv2.putText', 'cv.putText', (['image', 'text', '(10, 20)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(255, 255, 255)', '(1)'], {}), '(image, text, (10, 20), cv.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)\n', (4801, 4870), True, 'import cv2 as cv\n'), ((2006, 2028), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (2026, 2028), True, 'import cv2 as cv\n'), ((2032, 2054), 'os.system', 'os.system', (['"""sudo halt"""'], {}), "('sudo halt')\n", (2041, 2054), False, 'import os\n'), ((3308, 3358), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv.MORPH_ELLIPSE, (5, 5))\n', (3332, 3358), True, 'import cv2 as cv\n'), ((3391, 3441), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv.MORPH_ELLIPSE, (5, 5))\n', (3415, 3441), True, 'import cv2 as cv\n'), ((3477, 3527), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv.MORPH_ELLIPSE, (5, 5))\n', (3501, 3527), True, 'import cv2 as cv\n'), ((3559, 3609), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv.MORPH_ELLIPSE, (5, 5))\n', (3583, 3609), True, 'import cv2 as cv\n')]
|
import pickle
import numpy as np
import statsmodels.api as sm
# find the games which are 2v2, 3v3, 4v4
def scan_game_table(table_path):
team_game_set = set()
target_file = open(table_path, "r")
line = target_file.readline().strip().split(",")
while len(line) > 1:
game_id = int(line[0])
game_type = str(line[1])
if game_type in ["Doubles", "Triples", "Quadruples"]:
team_game_set.add(game_id)
line = target_file.readline().strip().split(",")
return team_game_set
# store player's skill state for a given range of games
def scan_play_table(table_path, n): # n: max games considered
team_player_dict = {} # key: game+team; value: player id
player_team_dict = {} # key: player id; value: game+team
player_game_dict = {} # key: player id; value: game id
player_skill_dict = {} # player: player id; value: player's skill level at n
target_file = open(table_path, "r")
line = target_file.readline().strip().split(",")
while len(line) > 1:
# some lines have null data fields. let's skip them
valid_flag = 1
for i in range(0, 4):
valid_flag = valid_flag * len(line[i])
if valid_flag == 0:
line = target_file.readline().strip().split(",")
continue
player_id = int(line[0])
game_id = int(line[1])
team_id = int(line[2])
skill = float(line[3])
game_num = int(line[4])
game_team = str(game_id) + "-" + str(team_id)
# init dicts
team_player_dict.setdefault(game_team, set())
player_team_dict.setdefault(player_id, set())
player_game_dict.setdefault(player_id, set())
team_player_dict[game_team].add(player_id)
# store player's skill when n games are reached
if len(player_team_dict[player_id]) == n - 1:
player_skill_dict[player_id] = skill
# no use; skip
if len(player_team_dict[player_id]) >= n:
line = target_file.readline().strip().split(",")
continue
player_team_dict[player_id].add(game_team)
player_game_dict[player_id].add(game_id)
line = target_file.readline().strip().split(",")
return team_player_dict, player_team_dict, player_game_dict, player_skill_dict
# find the most frequent teammate
def get_max_teammate(self_id, player_team_set, team_player_dict):
teammate_dict = {}
for game_team in player_team_set:
for member in team_player_dict[game_team]:
if member != self_id:
teammate_dict.setdefault(member, 0)
teammate_dict[member] = teammate_dict[member] + 1
return max(teammate_dict.values())
# multivariate polyfit
def reg_m(y, x):
x = x[::-1] # reverse list to make the right output order
ones = np.ones(len(x[0]))
X = sm.add_constant(np.column_stack((x[0], ones)))
for ele in x[1:]:
X = sm.add_constant(np.column_stack((ele, X)))
results = sm.OLS(y, X).fit()
return results
N = 200 # 50, 100, 200, 300, ...
team_player_dict, player_team_dict, player_game_dict, player_skill_dict = scan_play_table("play.csv", N)
team_game_set = scan_game_table("game.csv")
player_info_dict = {}
# for regression
skill_list = []
TOB_list = []
loyalty_list = []
faithfulness_list = []
for player_id in player_team_dict:
player_team_game_set = player_game_dict[player_id] & team_game_set # only consider team games
if (len(player_team_dict[player_id]) == N) and (len(player_team_game_set) >= 4): # min 4 games: see the PLOS ONE paper
# calculate 4 values for regression
skill_list.append(player_skill_dict[player_id])
TOB_list.append(len(player_team_game_set) / N)
max_teammate = get_max_teammate(player_id, player_team_dict[player_id], team_player_dict)
loyalty_list.append(max_teammate / len(player_team_game_set))
faithfulness_list.append(max_teammate / N)
'''
player_info_dict[player_id] = {"TOB": len(player_team_game_set) / N}
max_teammate = get_max_teammate(player_id, player_team_dict[player_id], team_player_dict)
player_info_dict[player_id]["loyalty"] = max_teammate / len(player_team_game_set)
print(player_id, player_info_dict[player_id])
'''
print(reg_m(skill_list, [TOB_list, loyalty_list, faithfulness_list]).summary()) # regression (default: linear)
|
[
"statsmodels.api.OLS",
"numpy.column_stack"
] |
[((2886, 2915), 'numpy.column_stack', 'np.column_stack', (['(x[0], ones)'], {}), '((x[0], ones))\n', (2901, 2915), True, 'import numpy as np\n'), ((2967, 2992), 'numpy.column_stack', 'np.column_stack', (['(ele, X)'], {}), '((ele, X))\n', (2982, 2992), True, 'import numpy as np\n'), ((3008, 3020), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (3014, 3020), True, 'import statsmodels.api as sm\n')]
|
# Copyright 2018-2019 CNRS-UM LIRMM
#
# \author <NAME>
#
#
#
# pyQpController is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# pyQpController is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pyQpController. If not, see
# <http://www.gnu.org/licenses/>.
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib.font_manager import FontProperties
if __name__ =="__main__":
fileName = sys.argv[1]
loaded = np.load(fileName)
impactFileName = sys.argv[2]
impact_loaded = np.load(impactFileName)
# loaded = np.load("../log/data/data_Nov_03_2018_15-40-26.npz")
# impact_loaded = np.load("../log/data/impact-data_Nov_03_2018_15-40-26.npz")
time = loaded['time']
error_x = loaded['error'][:, 0]
error_y = loaded['error'][:, 1]
error_z = loaded['error'][:, 2]
fontP = FontProperties()
fontP.set_size('small')
#fig1, (ax11, ax12, ax13 )= plt.subplots(nrows=3, ncols=1)
# ax1 = plt.subplot(311)
# plt.plot(time, error_x, label='error x')
# ax11.set_ylabel('error x')
#
# ax12 = plt.subplot(312)
# plt.plot(time, error_y, label='error y')
# ax12.set_ylabel('error y')
#
# ax13 = plt.subplot(313)
# plt.plot(time, error_z, label='error z')
# plt.ylabel('error z')
# plt.grid(True)
# plt.xlabel('Time [s]')
fig12 = plt.figure()
impact_time_1 = [1.80, 1.85]
impact_time_2 = [3.42, 3.45]
impact_time_3 = [3.94, 4.02]
ax = fig12.gca()
ax.plot(time, error_x, label='Error x')
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax.plot(time, error_y, label='Error y')
ax.plot(time, error_z, label='Error z')
ax.legend(frameon=False, loc='upper left', prop=fontP)
ax.autoscale(enable=True, axis='x', tight=True)
plt.xlabel('Time [s]')
plt.ylabel('Task Error')
plt.title('End-effector velocity Task')
plt.grid(True)
fig12.savefig("task_error.pdf", bbox_inches='tight')
dq_0 = loaded['dq'][:, 0]
dq_1 = loaded['dq'][:, 1]
dq_2 = loaded['dq'][:, 2]
dq_3 = loaded['dq'][:, 3]
dq_4 = loaded['dq'][:, 4]
dq_5 = loaded['dq'][:, 5]
impact_time = impact_loaded['time']
predict_delta_dq_0 = impact_loaded['predict_delta_dq'][:,0]
predict_delta_dq_1 = impact_loaded['predict_delta_dq'][:,1]
predict_delta_dq_2 = impact_loaded['predict_delta_dq'][:,2]
predict_delta_dq_3 = impact_loaded['predict_delta_dq'][:,3]
predict_delta_dq_4 = impact_loaded['predict_delta_dq'][:,4]
predict_delta_dq_5 = impact_loaded['predict_delta_dq'][:,5]
actual_delta_dq_0 = impact_loaded['actual_delta_dq'][:, 0]
actual_delta_dq_1 = impact_loaded['actual_delta_dq'][:, 1]
actual_delta_dq_2 = impact_loaded['actual_delta_dq'][:, 2]
actual_delta_dq_3 = impact_loaded['actual_delta_dq'][:, 3]
actual_delta_dq_4 = impact_loaded['actual_delta_dq'][:, 4]
actual_delta_dq_5 = impact_loaded['actual_delta_dq'][:, 5]
# predict_average_ddq_0 = impact_loaded['predict_average_acc'][:,0]
# predict_average_ddq_1 = impact_loaded['predict_average_acc'][:, 1]
# predict_average_ddq_2 = impact_loaded['predict_average_acc'][:, 2]
# predict_average_ddq_3 = impact_loaded['predict_average_acc'][:, 3]
# predict_average_ddq_4 = impact_loaded['predict_average_acc'][:, 4]
# predict_average_ddq_5 = impact_loaded['predict_average_acc'][:, 5]
acc_0 = loaded['acc'][:, 0]
acc_1 = loaded['acc'][:, 1]
acc_2 = loaded['acc'][:, 2]
acc_3 = loaded['acc'][:, 3]
acc_4 = loaded['acc'][:, 4]
acc_5 = loaded['acc'][:, 5]
sol_acc_0 = loaded['sol_acc'][:, 0]
sol_acc_1 = loaded['sol_acc'][:, 1]
sol_acc_2 = loaded['sol_acc'][:, 2]
sol_acc_3 = loaded['sol_acc'][:, 3]
sol_acc_4 = loaded['sol_acc'][:, 4]
sol_acc_5 = loaded['sol_acc'][:, 5]
predict_delta_torque_0 = impact_loaded['predict_delta_tau'][:,0]
predict_delta_torque_1 = impact_loaded['predict_delta_tau'][:,1]
predict_delta_torque_2 = impact_loaded['predict_delta_tau'][:,2]
predict_delta_torque_3 = impact_loaded['predict_delta_tau'][:,3]
predict_delta_torque_4 = impact_loaded['predict_delta_tau'][:,4]
predict_delta_torque_5 = impact_loaded['predict_delta_tau'][:,5]
actual_delta_torque_0 = impact_loaded['actual_delta_tau'][:, 0]
actual_delta_torque_1 = impact_loaded['actual_delta_tau'][:, 1]
actual_delta_torque_2 = impact_loaded['actual_delta_tau'][:, 2]
actual_delta_torque_3 = impact_loaded['actual_delta_tau'][:, 3]
actual_delta_torque_4 = impact_loaded['actual_delta_tau'][:, 4]
actual_delta_torque_5 = impact_loaded['actual_delta_tau'][:, 5]
predict_F_0 = impact_loaded['predict_F'][:, 0]
predict_F_1 = impact_loaded['predict_F'][:, 1]
predict_F_2 = impact_loaded['predict_F'][:, 2]
actual_F_0 = impact_loaded['actual_F'][:, 0]
actual_F_1 = impact_loaded['actual_F'][:, 1]
actual_F_2 = impact_loaded['actual_F'][:, 2]
fig2, (ax21, ax22, ax23, ax24, ax25, ax26) = plt.subplots(nrows=6, ncols=1)
ax21 = plt.subplot(611)
plt.plot(time, dq_0, label='dq')
ax21.set_ylabel('dq 0')
ax21.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.plot(impact_time, predict_delta_dq_0, label='Predicted delta dq')
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.setp(ax21.get_xticklabels(), visible=False)
plt.plot(impact_time, actual_delta_dq_0, label='Actual delta dq')
ax21.locator_params(nbins=5, axis='y')
ax21.autoscale(enable=True, axis='x', tight=True)
plt.grid(True)
#ax21.legend(fancybox=True, framealpha=0.5)
ax21.legend(frameon=False, loc='upper left', prop=fontP)
plt.title("Joint velocities and joint velocities jump at the impact time")
ax22 = plt.subplot(612)
plt.plot(time, dq_1)
plt.plot(impact_time, predict_delta_dq_1)
plt.plot(impact_time, actual_delta_dq_1)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax22.set_ylabel('dq 1')
ax22.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax22.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax22.get_xticklabels(), visible=False)
plt.grid(True)
#ax22.legend(fancybox=True, framealpha=0.5)
#ax23.legend(frameon=False)
#ax23.legend(frameon=False, fancybox=True, framealpha=0.2, loc='best', prop=fontP)
ax22.locator_params(nbins=5, axis='y')
ax23 = plt.subplot(613)
plt.plot(time, dq_2)
plt.plot(impact_time, predict_delta_dq_2)
plt.plot(impact_time, actual_delta_dq_2)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax23.set_ylabel('dq 2')
ax23.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax23.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax23.get_xticklabels(), visible=False)
plt.grid(True)
ax23.locator_params(nbins=5, axis='y')
ax24 = plt.subplot(614)
plt.plot(time, dq_3)
plt.plot(impact_time, predict_delta_dq_3)
plt.plot(impact_time, actual_delta_dq_3)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax24.set_ylabel('dq 3')
plt.setp(ax24.get_xticklabels(), visible=False)
ax24.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax24.autoscale(enable=True, axis='x', tight=True)
plt.grid(True)
ax24.legend()
ax24.locator_params(nbins=5, axis='y')
ax25 = plt.subplot(615)
plt.plot(time, dq_4)
plt.plot(impact_time, predict_delta_dq_4)
plt.plot(impact_time, actual_delta_dq_4)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax25.set_ylabel('dq 4')
plt.setp(ax25.get_xticklabels(), visible=False)
ax25.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax25.autoscale(enable=True, axis='x', tight=True)
plt.grid(True)
ax25.legend()
ax25.locator_params(nbins=5, axis='y')
ax26 = plt.subplot(616)
plt.plot(time, dq_5)
plt.plot(impact_time, predict_delta_dq_5)
plt.plot(impact_time, actual_delta_dq_5)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax26.set_ylabel('dq 5')
plt.xlabel('Time [s]')
plt.grid(True)
ax26.legend()
ax26.locator_params(nbins=5, axis='y')
ax26.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax26.autoscale(enable=True, axis='x', tight=True)
fig2.savefig("Delta_dq.pdf", bbox_inches='tight')
fig3, (ax31, ax32, ax33, ax34, ax35, ax36) = plt.subplots(nrows=6, ncols=1)
ax31 = plt.subplot(611)
ax31.set_ylabel('Joint 0')
plt.plot(impact_time, predict_delta_torque_0, label='Predicted torque jump')
plt.plot(impact_time, actual_delta_torque_0, label='Actual torque jump')
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.setp(ax31.get_xticklabels(), visible=False)
plt.grid(True)
#ax31.legend(prop=fontP)
ax31.legend(frameon=False, loc='upper left', prop=fontP)
plt.title("Joint torque jumps at the impact time")
ax31.locator_params(nbins=5, axis='y')
ax31.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax31.autoscale(enable=True, axis='x', tight=True)
ax32 = plt.subplot(612)
plt.plot(impact_time, predict_delta_torque_1)
plt.plot(impact_time, actual_delta_torque_1)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax32.set_ylabel('Joint 1')
plt.setp(ax32.get_xticklabels(), visible=False)
ax32.locator_params(nbins=5, axis='y')
ax32.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.grid(True)
ax32.legend()
ax32.autoscale(enable=True, axis='x', tight=True)
ax33 = plt.subplot(613)
plt.plot(impact_time, predict_delta_torque_2)
plt.plot(impact_time, actual_delta_torque_2)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax33.set_ylabel('Joint 2')
plt.setp(ax33.get_xticklabels(), visible=False)
ax33.locator_params(nbins=5, axis='y')
ax33.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.grid(True)
ax33.legend()
ax33.autoscale(enable=True, axis='x', tight=True)
ax34 = plt.subplot(614)
plt.plot(impact_time, predict_delta_torque_3)
plt.plot(impact_time, actual_delta_torque_3)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax34.set_ylabel('Joint 3')
plt.setp(ax34.get_xticklabels(), visible=False)
ax34.locator_params(nbins=5, axis='y')
ax34.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.grid(True)
ax34.legend()
ax34.autoscale(enable=True, axis='x', tight=True)
ax35 = plt.subplot(615)
plt.plot(impact_time, predict_delta_torque_4)
plt.plot(impact_time, actual_delta_torque_4)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax35.set_ylabel('Joint 4')
plt.setp(ax35.get_xticklabels(), visible=False)
ax35.locator_params(nbins=5, axis='y')
ax35.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.grid(True)
ax35.legend()
ax35.autoscale(enable=True, axis='x', tight=True)
ax36 = plt.subplot(616)
plt.plot(impact_time, predict_delta_torque_5)
plt.plot(impact_time, actual_delta_torque_5)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
ax36.set_ylabel('Joint 5')
plt.xlabel('Impact Time [s]')
plt.grid(True)
ax36.locator_params(nbins=5, axis='y')
ax36.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax36.legend()
ax36.autoscale(enable=True, axis='x', tight=True)
fig3.savefig("Delta_torque.pdf", bbox_inches='tight')
fig4, (ax41, ax42, ax43 )= plt.subplots(nrows=3, ncols=1)
ax41 = plt.subplot(311)
plt.plot(impact_time, predict_F_0, label='Predicted contact force jump')
plt.plot(impact_time, actual_F_0, label='Measured contact force jump')
# plt.plot(impact_time, predict_F_0, label='Predicted contact force jump')
# plt.plot(impact_time, actual_F_0, label='Measured contact force jump')
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.setp(ax41.get_xticklabels(), visible=False)
ax41.locator_params(nbins=5, axis='y')
ax41.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax41.autoscale(enable=True, axis='x', tight=True)
plt.grid(True)
#ax41.legend(prop=fontP)
ax41.legend(frameon=False, loc='upper left', prop=fontP)
ax41.set_ylabel('Force x')
plt.title("Precited contact force jump Versus measured contact force ")
ax42 = plt.subplot(312)
plt.plot(impact_time, predict_F_1)
plt.plot(impact_time, actual_F_1)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.setp(ax42.get_xticklabels(), visible=False)
ax42.locator_params(nbins=5, axis='y')
ax42.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax42.autoscale(enable=True, axis='x', tight=True)
plt.grid(True)
ax42.set_ylabel('Force y')
ax43 = plt.subplot(313)
plt.plot(impact_time, predict_F_2)
plt.plot(impact_time, actual_F_2)
# plt.axvspan(impact_time_1[0], impact_time_1[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_2[0], impact_time_2[1], color='red', alpha=0.1)
# plt.axvspan(impact_time_3[0], impact_time_3[1], color='red', alpha=0.1)
plt.ylabel('Force z')
plt.grid(True)
ax43.locator_params(nbins=5, axis='y')
ax43.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax43.autoscale(enable=True, axis='x', tight=True)
fig4.savefig("impact_force.pdf", bbox_inches='tight')
plt.grid(True)
plt.xlabel('Impact Time [s]')
sol_len = len(sol_acc_0)
fig5, (ax51, ax52, ax53, ax54, ax55, ax56) = plt.subplots(nrows=6, ncols=1)
impact_length = len(impact_time)
# ax51 = plt.subplot(611)
# # plt.plot(impact_time, acc_0[-impact_length:], label='Actual joint acceleration')
# plt.plot(time, acc_0, label='Actual joint acceleration')
# # plt.plot(impact_time, sol_acc_0[-impact_length:], label='QP predict joint acceleration')
# plt.plot(time[:sol_len], sol_acc_0, label='QP predict joint acceleration')
# plt.plot(impact_time, predict_average_ddq_0, label='Average joint acceleration at impact')
# #ax51.legend(fancybox=True, framealpha=0.5)
# ax51.legend(frameon=False, loc='upper left', prop=fontP)
# ax51.set_ylabel('joint 0')
# plt.grid(True)
# ax51.locator_params(nbins=5, axis='y')
# ax51.autoscale(enable=True, axis='x', tight=True)
# ax51.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# plt.setp(ax51.get_xticklabels(), visible=False)
# plt.title("Joint accelerations [Radion/s^2]")
# ax52 = plt.subplot(612)
# plt.plot(time, acc_1)
# plt.plot(time[:sol_len], sol_acc_1)
# plt.plot(impact_time, predict_average_ddq_1)
# ax52.legend()
# ax52.set_ylabel('joint 1')
# plt.grid(True)
# ax52.locator_params(nbins=5, axis='y')
# ax52.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax52.autoscale(enable=True, axis='x', tight=True)
# plt.setp(ax52.get_xticklabels(), visible=False)
# ax53 = plt.subplot(613)
# plt.plot(time, acc_2)
# plt.plot(time[:sol_len], sol_acc_2)
# plt.plot(impact_time, predict_average_ddq_2)
# ax53.legend()
# ax53.set_ylabel('joint 2')
# plt.grid(True)
# ax53.locator_params(nbins=5, axis='y')
# ax53.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax53.autoscale(enable=True, axis='x', tight=True)
# plt.setp(ax53.get_xticklabels(), visible=False)
# ax54 = plt.subplot(614)
# plt.plot(time, acc_3)
# plt.plot(time[:sol_len], sol_acc_3)
# plt.plot(impact_time, predict_average_ddq_3)
# ax54.legend()
# ax54.set_ylabel('joint 3')
# plt.grid(True)
# ax54.locator_params(nbins=5, axis='y')
# ax54.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax54.autoscale(enable=True, axis='x', tight=True)
# plt.setp(ax54.get_xticklabels(), visible=False)
# ax55 = plt.subplot(615)
# plt.plot(time, acc_4)
# plt.plot(time[:sol_len], sol_acc_4)
# plt.plot(impact_time, predict_average_ddq_4)
# ax55.legend()
# ax55.set_ylabel('joint 4')
# plt.grid(True)
# ax55.locator_params(nbins=5, axis='y')
# ax55.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax55.autoscale(enable=True, axis='x', tight=True)
# plt.setp(ax55.get_xticklabels(), visible=False)
# ax56 = plt.subplot(616)
# plt.plot(time, acc_5)
# plt.plot(time[:sol_len], sol_acc_5)
# plt.plot(impact_time, predict_average_ddq_5)
# ax56.legend()
# ax56.set_ylabel('joint 5')
# plt.grid(True)
# ax56.locator_params(nbins=5, axis='y')
# ax56.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax56.autoscale(enable=True, axis='x', tight=True)
# plt.xlabel('Time [s]')
# plt.grid(True)
# fig5.savefig("joint_accelerations_comparison.pdf", bbox_inches='tight')
fig6, (ax61, ax62, ax63, ax64, ax65, ax66) = plt.subplots(nrows=6, ncols=1)
ax61 = plt.subplot(611)
plt.plot(time, acc_0, label='Actual joint acceleration')
plt.plot(time[:sol_len], sol_acc_0, label='QP predicted joint acceleration')
ax61.legend(frameon=False, loc='upper left', prop=fontP)
ax61.set_ylabel('joint 0')
plt.grid(True)
ax61.locator_params(nbins=5, axis='y')
ax61.autoscale(enable=True, axis='x', tight=True)
ax61.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.setp(ax61.get_xticklabels(), visible=False)
plt.title("Joint accelerations [Radion/s^2]")
ax62 = plt.subplot(612)
plt.plot(time, acc_1)
plt.plot(time[:sol_len], sol_acc_1)
ax62.set_ylabel('joint 1')
plt.grid(True)
ax62.locator_params(nbins=5, axis='y')
ax62.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax62.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax62.get_xticklabels(), visible=False)
ax63 = plt.subplot(613)
plt.plot(time, acc_2)
plt.plot(time[:sol_len], sol_acc_2)
ax63.set_ylabel('joint 2')
plt.grid(True)
ax63.locator_params(nbins=5, axis='y')
ax63.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax63.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax63.get_xticklabels(), visible=False)
ax64 = plt.subplot(614)
plt.plot(time, acc_3)
plt.plot(time[:sol_len], sol_acc_3)
ax64.set_ylabel('joint 3')
plt.grid(True)
ax64.locator_params(nbins=5, axis='y')
ax64.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax64.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax64.get_xticklabels(), visible=False)
ax65 = plt.subplot(615)
plt.plot(time, acc_4)
plt.plot(time[:sol_len], sol_acc_4)
ax65.set_ylabel('joint 4')
plt.grid(True)
ax65.locator_params(nbins=5, axis='y')
ax65.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax65.autoscale(enable=True, axis='x', tight=True)
plt.setp(ax65.get_xticklabels(), visible=False)
ax66 = plt.subplot(616)
plt.plot(time, acc_5)
plt.plot(time[:sol_len], sol_acc_5)
ax66.set_ylabel('joint 3')
plt.grid(True)
ax66.locator_params(nbins=5, axis='y')
ax66.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax66.autoscale(enable=True, axis='x', tight=True)
#
#
# ax22.plot(time, acc_0, label='acc 0')
# ax22.plot(time, predict_average_ddq_0, label='acc 0')
#p
# ax22.plot(time, acc_1, label='acc 1')
# ax22.plot(time, acc_2, label='acc 2')
# ax22.plot(time, acc_3, label='acc 3')
# ax22.plot(time, acc_4, label='acc 4')
# ax22.plot(time, acc_5, label='acc 5')
# plt.ylabel('Joint accelerations [Radion/s^2]')
plt.xlabel('Time [s]')
plt.grid(True)
fig6.savefig("joint_accelerations_predicted_vs_simulated.pdf", bbox_inches='tight')
# fig6.savefig("joint_accelerations_predicted_vs_simulated.pdf")
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((985, 1002), 'numpy.load', 'np.load', (['fileName'], {}), '(fileName)\n', (992, 1002), True, 'import numpy as np\n'), ((1057, 1080), 'numpy.load', 'np.load', (['impactFileName'], {}), '(impactFileName)\n', (1064, 1080), True, 'import numpy as np\n'), ((1382, 1398), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (1396, 1398), False, 'from matplotlib.font_manager import FontProperties\n'), ((1892, 1904), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1902, 1904), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (2520, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2561), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Task Error"""'], {}), "('Task Error')\n", (2547, 2561), True, 'import matplotlib.pyplot as plt\n'), ((2566, 2605), 'matplotlib.pyplot.title', 'plt.title', (['"""End-effector velocity Task"""'], {}), "('End-effector velocity Task')\n", (2575, 2605), True, 'import matplotlib.pyplot as plt\n'), ((2610, 2624), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2618, 2624), True, 'import matplotlib.pyplot as plt\n'), ((5720, 5750), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(6)', 'ncols': '(1)'}), '(nrows=6, ncols=1)\n', (5732, 5750), True, 'import matplotlib.pyplot as plt\n'), ((5763, 5779), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(611)'], {}), '(611)\n', (5774, 5779), True, 'import matplotlib.pyplot as plt\n'), ((5784, 5816), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dq_0'], {'label': '"""dq"""'}), "(time, dq_0, label='dq')\n", (5792, 5816), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5981), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_dq_0'], {'label': '"""Predicted delta dq"""'}), "(impact_time, predict_delta_dq_0, label='Predicted delta dq')\n", (5920, 5981), True, 'import matplotlib.pyplot as plt\n'), ((6272, 6337), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_dq_0'], {'label': '"""Actual delta dq"""'}), "(impact_time, actual_delta_dq_0, label='Actual delta dq')\n", (6280, 6337), True, 'import matplotlib.pyplot as plt\n'), ((6439, 6453), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6447, 6453), True, 'import matplotlib.pyplot as plt\n'), ((6567, 6641), 'matplotlib.pyplot.title', 'plt.title', (['"""Joint velocities and joint velocities jump at the impact time"""'], {}), "('Joint velocities and joint velocities jump at the impact time')\n", (6576, 6641), True, 'import matplotlib.pyplot as plt\n'), ((6654, 6670), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(612)'], {}), '(612)\n', (6665, 6670), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6695), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dq_1'], {}), '(time, dq_1)\n', (6683, 6695), True, 'import matplotlib.pyplot as plt\n'), ((6700, 6741), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_dq_1'], {}), '(impact_time, predict_delta_dq_1)\n', (6708, 6741), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6786), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_dq_1'], {}), '(impact_time, actual_delta_dq_1)\n', (6754, 6786), True, 'import matplotlib.pyplot as plt\n'), ((7222, 7236), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7230, 7236), True, 'import matplotlib.pyplot as plt\n'), ((7459, 7475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(613)'], {}), '(613)\n', (7470, 7475), True, 'import matplotlib.pyplot as plt\n'), ((7480, 7500), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dq_2'], {}), '(time, dq_2)\n', (7488, 7500), True, 'import matplotlib.pyplot as plt\n'), ((7505, 7546), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_dq_2'], {}), '(impact_time, predict_delta_dq_2)\n', (7513, 7546), True, 'import matplotlib.pyplot as plt\n'), ((7551, 7591), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_dq_2'], {}), '(impact_time, actual_delta_dq_2)\n', (7559, 7591), True, 'import matplotlib.pyplot as plt\n'), ((8027, 8041), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8035, 8041), True, 'import matplotlib.pyplot as plt\n'), ((8097, 8113), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(614)'], {}), '(614)\n', (8108, 8113), True, 'import matplotlib.pyplot as plt\n'), ((8118, 8138), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dq_3'], {}), '(time, dq_3)\n', (8126, 8138), True, 'import matplotlib.pyplot as plt\n'), ((8143, 8184), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_dq_3'], {}), '(impact_time, predict_delta_dq_3)\n', (8151, 8184), True, 'import matplotlib.pyplot as plt\n'), ((8189, 8229), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_dq_3'], {}), '(impact_time, actual_delta_dq_3)\n', (8197, 8229), True, 'import matplotlib.pyplot as plt\n'), ((8666, 8680), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8674, 8680), True, 'import matplotlib.pyplot as plt\n'), ((8754, 8770), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(615)'], {}), '(615)\n', (8765, 8770), True, 'import matplotlib.pyplot as plt\n'), ((8775, 8795), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dq_4'], {}), '(time, dq_4)\n', (8783, 8795), True, 'import matplotlib.pyplot as plt\n'), ((8800, 8841), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_dq_4'], {}), '(impact_time, predict_delta_dq_4)\n', (8808, 8841), True, 'import matplotlib.pyplot as plt\n'), ((8846, 8886), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_dq_4'], {}), '(impact_time, actual_delta_dq_4)\n', (8854, 8886), True, 'import matplotlib.pyplot as plt\n'), ((9322, 9336), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (9330, 9336), True, 'import matplotlib.pyplot as plt\n'), ((9410, 9426), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(616)'], {}), '(616)\n', (9421, 9426), True, 'import matplotlib.pyplot as plt\n'), ((9431, 9451), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'dq_5'], {}), '(time, dq_5)\n', (9439, 9451), True, 'import matplotlib.pyplot as plt\n'), ((9456, 9497), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_dq_5'], {}), '(impact_time, predict_delta_dq_5)\n', (9464, 9497), True, 'import matplotlib.pyplot as plt\n'), ((9502, 9542), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_dq_5'], {}), '(impact_time, actual_delta_dq_5)\n', (9510, 9542), True, 'import matplotlib.pyplot as plt\n'), ((9809, 9831), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (9819, 9831), True, 'import matplotlib.pyplot as plt\n'), ((9836, 9850), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (9844, 9850), True, 'import matplotlib.pyplot as plt\n'), ((10133, 10163), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(6)', 'ncols': '(1)'}), '(nrows=6, ncols=1)\n', (10145, 10163), True, 'import matplotlib.pyplot as plt\n'), ((10176, 10192), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(611)'], {}), '(611)\n', (10187, 10192), True, 'import matplotlib.pyplot as plt\n'), ((10228, 10304), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_torque_0'], {'label': '"""Predicted torque jump"""'}), "(impact_time, predict_delta_torque_0, label='Predicted torque jump')\n", (10236, 10304), True, 'import matplotlib.pyplot as plt\n'), ((10309, 10381), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_torque_0'], {'label': '"""Actual torque jump"""'}), "(impact_time, actual_delta_torque_0, label='Actual torque jump')\n", (10317, 10381), True, 'import matplotlib.pyplot as plt\n'), ((10672, 10686), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (10680, 10686), True, 'import matplotlib.pyplot as plt\n'), ((10782, 10832), 'matplotlib.pyplot.title', 'plt.title', (['"""Joint torque jumps at the impact time"""'], {}), "('Joint torque jumps at the impact time')\n", (10791, 10832), True, 'import matplotlib.pyplot as plt\n'), ((11005, 11021), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(612)'], {}), '(612)\n', (11016, 11021), True, 'import matplotlib.pyplot as plt\n'), ((11026, 11071), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_torque_1'], {}), '(impact_time, predict_delta_torque_1)\n', (11034, 11071), True, 'import matplotlib.pyplot as plt\n'), ((11076, 11120), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_torque_1'], {}), '(impact_time, actual_delta_torque_1)\n', (11084, 11120), True, 'import matplotlib.pyplot as plt\n'), ((11548, 11562), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11556, 11562), True, 'import matplotlib.pyplot as plt\n'), ((11647, 11663), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(613)'], {}), '(613)\n', (11658, 11663), True, 'import matplotlib.pyplot as plt\n'), ((11668, 11713), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_torque_2'], {}), '(impact_time, predict_delta_torque_2)\n', (11676, 11713), True, 'import matplotlib.pyplot as plt\n'), ((11718, 11762), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_torque_2'], {}), '(impact_time, actual_delta_torque_2)\n', (11726, 11762), True, 'import matplotlib.pyplot as plt\n'), ((12190, 12204), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (12198, 12204), True, 'import matplotlib.pyplot as plt\n'), ((12289, 12305), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(614)'], {}), '(614)\n', (12300, 12305), True, 'import matplotlib.pyplot as plt\n'), ((12310, 12355), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_torque_3'], {}), '(impact_time, predict_delta_torque_3)\n', (12318, 12355), True, 'import matplotlib.pyplot as plt\n'), ((12360, 12404), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_torque_3'], {}), '(impact_time, actual_delta_torque_3)\n', (12368, 12404), True, 'import matplotlib.pyplot as plt\n'), ((12832, 12846), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (12840, 12846), True, 'import matplotlib.pyplot as plt\n'), ((12931, 12947), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(615)'], {}), '(615)\n', (12942, 12947), True, 'import matplotlib.pyplot as plt\n'), ((12952, 12997), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_torque_4'], {}), '(impact_time, predict_delta_torque_4)\n', (12960, 12997), True, 'import matplotlib.pyplot as plt\n'), ((13002, 13046), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_torque_4'], {}), '(impact_time, actual_delta_torque_4)\n', (13010, 13046), True, 'import matplotlib.pyplot as plt\n'), ((13474, 13488), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (13482, 13488), True, 'import matplotlib.pyplot as plt\n'), ((13573, 13589), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(616)'], {}), '(616)\n', (13584, 13589), True, 'import matplotlib.pyplot as plt\n'), ((13594, 13639), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_delta_torque_5'], {}), '(impact_time, predict_delta_torque_5)\n', (13602, 13639), True, 'import matplotlib.pyplot as plt\n'), ((13644, 13688), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_delta_torque_5'], {}), '(impact_time, actual_delta_torque_5)\n', (13652, 13688), True, 'import matplotlib.pyplot as plt\n'), ((13958, 13987), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Impact Time [s]"""'], {}), "('Impact Time [s]')\n", (13968, 13987), True, 'import matplotlib.pyplot as plt\n'), ((13992, 14006), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (14000, 14006), True, 'import matplotlib.pyplot as plt\n'), ((14275, 14305), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(1)'}), '(nrows=3, ncols=1)\n', (14287, 14305), True, 'import matplotlib.pyplot as plt\n'), ((14317, 14333), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (14328, 14333), True, 'import matplotlib.pyplot as plt\n'), ((14338, 14410), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_F_0'], {'label': '"""Predicted contact force jump"""'}), "(impact_time, predict_F_0, label='Predicted contact force jump')\n", (14346, 14410), True, 'import matplotlib.pyplot as plt\n'), ((14415, 14485), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_F_0'], {'label': '"""Measured contact force jump"""'}), "(impact_time, actual_F_0, label='Measured contact force jump')\n", (14423, 14485), True, 'import matplotlib.pyplot as plt\n'), ((15092, 15106), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (15100, 15106), True, 'import matplotlib.pyplot as plt\n'), ((15233, 15304), 'matplotlib.pyplot.title', 'plt.title', (['"""Precited contact force jump Versus measured contact force """'], {}), "('Precited contact force jump Versus measured contact force ')\n", (15242, 15304), True, 'import matplotlib.pyplot as plt\n'), ((15317, 15333), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (15328, 15333), True, 'import matplotlib.pyplot as plt\n'), ((15338, 15372), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_F_1'], {}), '(impact_time, predict_F_1)\n', (15346, 15372), True, 'import matplotlib.pyplot as plt\n'), ((15377, 15410), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_F_1'], {}), '(impact_time, actual_F_1)\n', (15385, 15410), True, 'import matplotlib.pyplot as plt\n'), ((15861, 15875), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (15869, 15875), True, 'import matplotlib.pyplot as plt\n'), ((15919, 15935), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (15930, 15935), True, 'import matplotlib.pyplot as plt\n'), ((15940, 15974), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'predict_F_2'], {}), '(impact_time, predict_F_2)\n', (15948, 15974), True, 'import matplotlib.pyplot as plt\n'), ((15979, 16012), 'matplotlib.pyplot.plot', 'plt.plot', (['impact_time', 'actual_F_2'], {}), '(impact_time, actual_F_2)\n', (15987, 16012), True, 'import matplotlib.pyplot as plt\n'), ((16251, 16272), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Force z"""'], {}), "('Force z')\n", (16261, 16272), True, 'import matplotlib.pyplot as plt\n'), ((16277, 16291), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (16285, 16291), True, 'import matplotlib.pyplot as plt\n'), ((16516, 16530), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (16524, 16530), True, 'import matplotlib.pyplot as plt\n'), ((16535, 16564), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Impact Time [s]"""'], {}), "('Impact Time [s]')\n", (16545, 16564), True, 'import matplotlib.pyplot as plt\n'), ((16645, 16675), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(6)', 'ncols': '(1)'}), '(nrows=6, ncols=1)\n', (16657, 16675), True, 'import matplotlib.pyplot as plt\n'), ((19975, 20005), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(6)', 'ncols': '(1)'}), '(nrows=6, ncols=1)\n', (19987, 20005), True, 'import matplotlib.pyplot as plt\n'), ((20017, 20033), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(611)'], {}), '(611)\n', (20028, 20033), True, 'import matplotlib.pyplot as plt\n'), ((20038, 20094), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'acc_0'], {'label': '"""Actual joint acceleration"""'}), "(time, acc_0, label='Actual joint acceleration')\n", (20046, 20094), True, 'import matplotlib.pyplot as plt\n'), ((20099, 20175), 'matplotlib.pyplot.plot', 'plt.plot', (['time[:sol_len]', 'sol_acc_0'], {'label': '"""QP predicted joint acceleration"""'}), "(time[:sol_len], sol_acc_0, label='QP predicted joint acceleration')\n", (20107, 20175), True, 'import matplotlib.pyplot as plt\n'), ((20272, 20286), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (20280, 20286), True, 'import matplotlib.pyplot as plt\n'), ((20504, 20549), 'matplotlib.pyplot.title', 'plt.title', (['"""Joint accelerations [Radion/s^2]"""'], {}), "('Joint accelerations [Radion/s^2]')\n", (20513, 20549), True, 'import matplotlib.pyplot as plt\n'), ((20562, 20578), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(612)'], {}), '(612)\n', (20573, 20578), True, 'import matplotlib.pyplot as plt\n'), ((20583, 20604), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'acc_1'], {}), '(time, acc_1)\n', (20591, 20604), True, 'import matplotlib.pyplot as plt\n'), ((20609, 20644), 'matplotlib.pyplot.plot', 'plt.plot', (['time[:sol_len]', 'sol_acc_1'], {}), '(time[:sol_len], sol_acc_1)\n', (20617, 20644), True, 'import matplotlib.pyplot as plt\n'), ((20680, 20694), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (20688, 20694), True, 'import matplotlib.pyplot as plt\n'), ((20919, 20935), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(613)'], {}), '(613)\n', (20930, 20935), True, 'import matplotlib.pyplot as plt\n'), ((20940, 20961), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'acc_2'], {}), '(time, acc_2)\n', (20948, 20961), True, 'import matplotlib.pyplot as plt\n'), ((20966, 21001), 'matplotlib.pyplot.plot', 'plt.plot', (['time[:sol_len]', 'sol_acc_2'], {}), '(time[:sol_len], sol_acc_2)\n', (20974, 21001), True, 'import matplotlib.pyplot as plt\n'), ((21037, 21051), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (21045, 21051), True, 'import matplotlib.pyplot as plt\n'), ((21276, 21292), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(614)'], {}), '(614)\n', (21287, 21292), True, 'import matplotlib.pyplot as plt\n'), ((21297, 21318), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'acc_3'], {}), '(time, acc_3)\n', (21305, 21318), True, 'import matplotlib.pyplot as plt\n'), ((21323, 21358), 'matplotlib.pyplot.plot', 'plt.plot', (['time[:sol_len]', 'sol_acc_3'], {}), '(time[:sol_len], sol_acc_3)\n', (21331, 21358), True, 'import matplotlib.pyplot as plt\n'), ((21394, 21408), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (21402, 21408), True, 'import matplotlib.pyplot as plt\n'), ((21633, 21649), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(615)'], {}), '(615)\n', (21644, 21649), True, 'import matplotlib.pyplot as plt\n'), ((21654, 21675), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'acc_4'], {}), '(time, acc_4)\n', (21662, 21675), True, 'import matplotlib.pyplot as plt\n'), ((21680, 21715), 'matplotlib.pyplot.plot', 'plt.plot', (['time[:sol_len]', 'sol_acc_4'], {}), '(time[:sol_len], sol_acc_4)\n', (21688, 21715), True, 'import matplotlib.pyplot as plt\n'), ((21751, 21765), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (21759, 21765), True, 'import matplotlib.pyplot as plt\n'), ((21990, 22006), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(616)'], {}), '(616)\n', (22001, 22006), True, 'import matplotlib.pyplot as plt\n'), ((22011, 22032), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'acc_5'], {}), '(time, acc_5)\n', (22019, 22032), True, 'import matplotlib.pyplot as plt\n'), ((22037, 22072), 'matplotlib.pyplot.plot', 'plt.plot', (['time[:sol_len]', 'sol_acc_5'], {}), '(time[:sol_len], sol_acc_5)\n', (22045, 22072), True, 'import matplotlib.pyplot as plt\n'), ((22108, 22122), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (22116, 22122), True, 'import matplotlib.pyplot as plt\n'), ((22685, 22707), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (22695, 22707), True, 'import matplotlib.pyplot as plt\n'), ((22712, 22726), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (22720, 22726), True, 'import matplotlib.pyplot as plt\n'), ((22895, 22905), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22903, 22905), True, 'import matplotlib.pyplot as plt\n'), ((5880, 5906), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (5898, 5906), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((7084, 7110), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (7102, 7110), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((7889, 7915), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (7907, 7915), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((8579, 8605), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (8597, 8605), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((9236, 9262), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (9254, 9262), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((9947, 9973), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (9965, 9973), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((10911, 10937), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (10929, 10937), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((11516, 11542), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (11534, 11542), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((12158, 12184), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (12176, 12184), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((12800, 12826), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (12818, 12826), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((13442, 13468), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (13460, 13468), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((14085, 14111), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (14103, 14111), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((15006, 15032), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (15024, 15032), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((15775, 15801), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (15793, 15801), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((16370, 16396), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (16388, 16396), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((20419, 20445), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (20437, 20445), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((20773, 20799), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (20791, 20799), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((21130, 21156), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (21148, 21156), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((21487, 21513), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (21505, 21513), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((21844, 21870), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (21862, 21870), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((22201, 22227), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (22219, 22227), False, 'from matplotlib.ticker import FormatStrFormatter\n')]
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.tektronix.tds2024b
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements the drivers to control an oscilloscope.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import struct
import numpy as np
from lantz.feat import Feat
from lantz.action import Action
from lantz import MessageBasedDriver
class TDS2024(MessageBasedDriver):
"""Tektronix TDS2024 200 MHz 4 Channel Digital Real-Time Oscilloscope
"""
MANUFACTURER_ID = '0x699'
@Action()
def autoconf(self):
"""Autoconfig oscilloscope.
"""
self.send(':AUTOS EXEC')
def initialize(self):
"""initiate.
"""
self.send(':ACQ:STATE ON')
return "Init"
@Feat()
def idn(self):
"""IDN.
"""
return self.query('ID?')
@Feat()
def trigger(self):
"""Trigger state.
"""
return self.query(':TRIG:STATE?')
@trigger.setter
def trigger(self, mode):
"""Set trigger state.
"""
self.query('TRIG:MAIN:MODE {}'.format(mode))
@Action()
def triggerlevel(self):
"""Set trigger level to 50% of the minimum adn maximum
values of the signal.
"""
self.send('TRIG:MAIn SATLevel')
@Action()
def forcetrigger(self):
"""Force trigger event.
"""
self.send('TRIG FORCe')
@Action()
def datasource(self, chn):
"""Selects channel.
"""
self.send(':DATA:SOURCE CH{}'.format(chn))
@Action()
def acqparams(self):
""" X/Y Increment Origin and Offset.
"""
commands = 'XZE?;XIN?;YZE?;YMU?;YOFF?'
#params = self.query(":WFMPRE:XZE?;XIN?;YZE?;YMU?;YOFF?;")
params = self.query(':WFMPRE:{}'.format(commands))
params = {k: float(v) for k, v in zip(commands.split(';'), params.split(';'))}
return params
@Action()
def dataencoding(self):
"""Set data encoding.
"""
self.send(':DAT:ENC RPB;WID 2;')
return "Set data encoding"
@Action()
def curv(self):
"""Get data.
Returns:
xdata, data as list
"""
self.dataencoding()
self.send('CURV?')
answer = self.recv()
numdigs = int(answer[1])
bytecount = int(answer[2:2+numdigs])
data = answer[2+numdigs:]
length = bytecount / 2
data = struct.unpack("{}H".format(length), data[0:2*length])
params = self.acqparams()
data = np.array(list(map(float, data)))
yoff = params['YOFF?']
ymu = params['YMU?']
yze = params['YZE?']
xin = params['XIN?']
xze = params['XZE?']
ydata = ( data - yoff) * ymu + yze
xdata = np.arange(len(data)) * xin + xze
return list(xdata), list(data)
def _measure(self, type, source):
self.send('MEASUrement:IMMed:TYPe {}'.format(type))
self.send('MEASUrement:IMMed:SOUrce1 CH{}'.format(source))
self.send('MEASUrement:IMMed:VALue?')
return self.recv()
@Action()
def measure_frequency(self, channel):
"""Get immediate measurement result.
"""
return self._measure('FREQuency', channel)
@Action()
def measure_min(self, channel):
"""Get immediate measurement result.
"""
return self._measure('MINImum', channel)
@Action()
def measure_max(self, chn):
"""Get immediate measurement result.
"""
return self._measure('MAXImum', channel)
@Action()
def measure_mean(self, chn):
"""Get immediate measurement result.
"""
return self._measure('MEAN', channel)
if __name__ == '__main__':
import argparse
import csv
parser = argparse.ArgumentParser(description='Measure using TDS2024 and dump to screen')
parser.add_argument('-p', '--port', default='USB0::0x0699::0x036A::C048617',
help='USB port')
parser.add_argument('-v', '--view', action='store_true', default=False,
help='View ')
parser.add_argument('Channels', metavar='channels', type=int, nargs='*',
help='Channels to use')
parser.add_argument('--output', type=argparse.FileType('wb', 0), default='-')
args = parser.parse_args()
osc = TDS2024(args.port)
osc.initialize()
print(osc.idn)
print(osc.trigger)
osc.forcetrigger()
osc.triggerlevel()
osc.trigger = "AUTO"
print(osc.trigger)
#osc.autoconf()
params = osc.acqparams()
if args.view:
import matplotlib.pyplot as plt
import numpy as np
with args.output as fp:
writer = csv.writer(fp)
writer.write_row(('Channel', 'Freq', 'Max', 'Min', 'Mean'))
for channel in args.channels or range(1, 4):
osc.datasource(channel)
writer.write_row(([osc.measure_frequency(channel),
osc.measure_max(channel),
osc.measure_min(channel),
osc.measure_mean(channel)]))
if args.view:
x, y = osc.curv()
x = np.array(x)
x = x - x.min()
y = np.array(y)
plt.plot(x, y)
if args.view:
plt.show()
|
[
"matplotlib.pyplot.show",
"csv.writer",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"lantz.feat.Feat",
"numpy.array",
"lantz.action.Action",
"argparse.FileType"
] |
[((570, 578), 'lantz.action.Action', 'Action', ([], {}), '()\n', (576, 578), False, 'from lantz.action import Action\n'), ((807, 813), 'lantz.feat.Feat', 'Feat', ([], {}), '()\n', (811, 813), False, 'from lantz.feat import Feat\n'), ((900, 906), 'lantz.feat.Feat', 'Feat', ([], {}), '()\n', (904, 906), False, 'from lantz.feat import Feat\n'), ((1161, 1169), 'lantz.action.Action', 'Action', ([], {}), '()\n', (1167, 1169), False, 'from lantz.action import Action\n'), ((1349, 1357), 'lantz.action.Action', 'Action', ([], {}), '()\n', (1355, 1357), False, 'from lantz.action import Action\n'), ((1468, 1476), 'lantz.action.Action', 'Action', ([], {}), '()\n', (1474, 1476), False, 'from lantz.action import Action\n'), ((1605, 1613), 'lantz.action.Action', 'Action', ([], {}), '()\n', (1611, 1613), False, 'from lantz.action import Action\n'), ((1984, 1992), 'lantz.action.Action', 'Action', ([], {}), '()\n', (1990, 1992), False, 'from lantz.action import Action\n'), ((2145, 2153), 'lantz.action.Action', 'Action', ([], {}), '()\n', (2151, 2153), False, 'from lantz.action import Action\n'), ((3162, 3170), 'lantz.action.Action', 'Action', ([], {}), '()\n', (3168, 3170), False, 'from lantz.action import Action\n'), ((3327, 3335), 'lantz.action.Action', 'Action', ([], {}), '()\n', (3333, 3335), False, 'from lantz.action import Action\n'), ((3484, 3492), 'lantz.action.Action', 'Action', ([], {}), '()\n', (3490, 3492), False, 'from lantz.action import Action\n'), ((3637, 3645), 'lantz.action.Action', 'Action', ([], {}), '()\n', (3643, 3645), False, 'from lantz.action import Action\n'), ((3860, 3939), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Measure using TDS2024 and dump to screen"""'}), "(description='Measure using TDS2024 and dump to screen')\n", (3883, 3939), False, 'import argparse\n'), ((4783, 4797), 'csv.writer', 'csv.writer', (['fp'], {}), '(fp)\n', (4793, 4797), False, 'import csv\n'), ((5407, 5417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5415, 5417), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4368), 'argparse.FileType', 'argparse.FileType', (['"""wb"""', '(0)'], {}), "('wb', 0)\n", (4359, 4368), False, 'import argparse\n'), ((5273, 5284), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5281, 5284), True, 'import numpy as np\n'), ((5337, 5348), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5345, 5348), True, 'import numpy as np\n'), ((5365, 5379), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (5373, 5379), True, 'import matplotlib.pyplot as plt\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Accuracy(nn.Module):
def __init__(self, topk=(1,)):
super(Accuracy, self).__init__()
self.topk = topk
def forward(self, output, target):
"""Computes the precision@k for the specified values of k"""
maxk = max(self.topk)
batch_size = target.size(0)
pred = output.topk(maxk, 1, True, True)[1].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in self.topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append( correct_k.mul_(100.0 / batch_size) )
return res
class ConfusionMeter( object ):
"""Maintains a confusion matrix for a given calssification problem.
https://github.com/pytorch/tnt/tree/master/torchnet/meter
The ConfusionMeter constructs a confusion matrix for a multi-class
classification problems. It does not support multi-label, multi-class problems:
for such problems, please use MultiLabelConfusionMeter.
Args:
k (int): number of classes in the classification problem
normalized (boolean): Determines whether or not the confusion matrix
is normalized or not
"""
def __init__(self, k, normalized=False):
super(ConfusionMeter, self).__init__()
self.conf = np.ndarray((k, k), dtype=np.int32)
self.normalized = normalized
self.k = k
self.reset()
def reset(self):
self.conf.fill(0)
def add(self, predicted, target):
"""Computes the confusion matrix of K x K size where K is no of classes
Args:
predicted (tensor): Can be an N x K tensor of predicted scores obtained from
the model for N examples and K classes or an N-tensor of
integer values between 0 and K-1.
target (tensor): Can be a N-tensor of integer values assumed to be integer
values between 0 and K-1 or N x K tensor, where targets are
assumed to be provided as one-hot vectors
"""
predicted = predicted.cpu().numpy()
target = target.cpu().numpy()
assert predicted.shape[0] == target.shape[0], \
'number of targets and predicted outputs do not match'
if np.ndim(predicted) != 1:
assert predicted.shape[1] == self.k, \
'number of predictions does not match size of confusion matrix'
predicted = np.argmax(predicted, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 1 and k'
onehot_target = np.ndim(target) != 1
if onehot_target:
assert target.shape[1] == self.k, \
'Onehot target does not match size of confusion matrix'
assert (target >= 0).all() and (target <= 1).all(), \
'in one-hot encoding, target values should be 0 or 1'
assert (target.sum(1) == 1).all(), \
'multi-label setting is not supported'
target = np.argmax(target, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 0 and k-1'
# hack for bincounting 2 arrays together
x = predicted + self.k * target
bincount_2d = np.bincount(x.astype(np.int32),
minlength=self.k ** 2)
assert bincount_2d.size == self.k ** 2
conf = bincount_2d.reshape((self.k, self.k))
self.conf += conf
def value(self):
"""
Returns:
Confustion matrix of K rows and K columns, where rows corresponds
to ground-truth targets and columns corresponds to predicted
targets.
"""
if self.normalized:
conf = self.conf.astype(np.float32)
return conf / conf.sum(1).clip(min=1e-12)[:, None]
else:
return self.conf
|
[
"numpy.ndarray",
"numpy.ndim",
"numpy.argmax"
] |
[((1420, 1454), 'numpy.ndarray', 'np.ndarray', (['(k, k)'], {'dtype': 'np.int32'}), '((k, k), dtype=np.int32)\n', (1430, 1454), True, 'import numpy as np\n'), ((2379, 2397), 'numpy.ndim', 'np.ndim', (['predicted'], {}), '(predicted)\n', (2386, 2397), True, 'import numpy as np\n'), ((2559, 2582), 'numpy.argmax', 'np.argmax', (['predicted', '(1)'], {}), '(predicted, 1)\n', (2568, 2582), True, 'import numpy as np\n'), ((2757, 2772), 'numpy.ndim', 'np.ndim', (['target'], {}), '(target)\n', (2764, 2772), True, 'import numpy as np\n'), ((3185, 3205), 'numpy.argmax', 'np.argmax', (['target', '(1)'], {}), '(target, 1)\n', (3194, 3205), True, 'import numpy as np\n')]
|
import numpy as np
# Constants
BIAS = 1
ITERATIONS = 1000
class MulticlassPreceptron:
''''''
'''
Initializer function to takes the possible classes for the
data set.
'''
def __init__(self, possible_classes):
self.learning_rate = 0.1
self.X_train = None
self.y_train = None
self.classes = set(possible_classes)
self.number_of_features = 0
self.number_of_classes = len(self.classes)
self.weights = {}
'''
Function initialize the weights dictionary with zeros with
the same length as the feature vector + 1 for the BIAS.
Each class will have its own weights in the dictionary.
'''
def initialize_weights(self):
for data_class in self.classes:
self.weights[str(data_class)] = np.array([0.0] * (self.number_of_features + 1))
'''
Compute the predicted outcome for each single instance in the data set.
The outcome is computed as follows:
- For every class in the total number of classes,
compute the product of that class weight vector, with the
data instance.
- Return the class that causes the biggest activation, meaning
the biggest product among all the different classes.
'''
def find_closest_class(self, training_instance_data):
max_prediction = 0
max_prediction_class = 0
for perceptron_class in self.classes:
prediction = np.dot(training_instance_data, self.weights[str(perceptron_class)])
if prediction >= max_prediction:
max_prediction = prediction
max_prediction_class = perceptron_class
return max_prediction_class
'''
Main Algorithm for the Multi-class Perceptron is as follows:
1. Initialize the weights dictionary with p weight vectors,
where p is the number of distinct classes or outcomes in the
data set.
2. Train the weight vector on the data set by a predefined
number of iterations.
3. In each iteration, compute the predicted outcome for each single
instance in the data set. The outcome is computed as follows:
- For every weight vector in the weights dictionary,
compute the product of that weight vector, with the
data instance.
- Return the class that causes the biggest activation, meaning
the biggest product among all the different classes.
4. If the prediction class is the same as the expected class, then
do nothing.
5. If the prediction class is different from the expected class, then:
- Add the feature vector to the weights of the expected class.
- Subtract the feature vector from the weights of the predicted (wrong) class.
'''
def train_perceptron(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
self.number_of_features = X_train.shape[1]
number_of_training_instances = X_train.shape[0]
self.initialize_weights()
for i in range(ITERATIONS):
for j in range(number_of_training_instances):
training_instance_data = X_train[j].A[0].T.data
training_instance_data = np.append(training_instance_data, BIAS)
y_pred = self.find_closest_class(training_instance_data)
if y_pred != y_train[j]:
self.weights[str(int(y_train[j]))] += training_instance_data
self.weights[str(int(y_pred))] -= training_instance_data
return self.weights
'''
Function that takes a list of test instances, and returns an
array of predictions for those instances.
'''
def predict(self, test_instances):
number_of_test_instances = test_instances.shape[0]
predictions = np.array([])
for j in range(number_of_test_instances):
training_instance_data = test_instances[j].A[0].T.data
training_instance_data = np.append(training_instance_data, BIAS)
y_pred = self.find_closest_class(training_instance_data)
predictions = np.append(predictions, y_pred)
return predictions
'''
Function that returns the score (accuracy) of the Multi-class Perceptron.
It takes a testing feature array and a testing outcome array.
The score is calculated by returning the number of correct
classifications out of the total number of testing instances.
'''
def score(self, X_test, y_test):
number_of_test_instances = X_test.shape[0]
y_predictions = self.predict(X_test)
correct_prediction_counter = 0
for i in range(number_of_test_instances):
if y_predictions[i] == y_test[i]:
correct_prediction_counter += 1
accuracy = correct_prediction_counter / number_of_test_instances
return accuracy
|
[
"numpy.append",
"numpy.array"
] |
[((3810, 3822), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3818, 3822), True, 'import numpy as np\n'), ((810, 857), 'numpy.array', 'np.array', (['([0.0] * (self.number_of_features + 1))'], {}), '([0.0] * (self.number_of_features + 1))\n', (818, 857), True, 'import numpy as np\n'), ((3977, 4016), 'numpy.append', 'np.append', (['training_instance_data', 'BIAS'], {}), '(training_instance_data, BIAS)\n', (3986, 4016), True, 'import numpy as np\n'), ((4112, 4142), 'numpy.append', 'np.append', (['predictions', 'y_pred'], {}), '(predictions, y_pred)\n', (4121, 4142), True, 'import numpy as np\n'), ((3221, 3260), 'numpy.append', 'np.append', (['training_instance_data', 'BIAS'], {}), '(training_instance_data, BIAS)\n', (3230, 3260), True, 'import numpy as np\n')]
|
import numpy as np
from numpy import sin, cos, tan, pi, sqrt
from numpy.core.defchararray import index
import yaml
import os
from collections import OrderedDict
# import imp
# import welleng.error
from ..utils import NEV_to_HLA
# since this is running on different OS flavors
PATH = os.path.dirname(__file__)
TOOL_INDEX = os.path.join(
'', *[PATH, 'tool_index.yaml']
)
ACCURACY = 1e-6
class ToolError:
def __init__(
self,
error,
model
):
"""
Class using the ISCWSA listed tool errors to determine well bore
uncertainty.
Parameters
----------
error: an intitiated welleng.error.ErrorModel object
model: string
Returns
-------
errors: welleng.error.ErrorModel object
A populated ErrorModel object for the selected error model.
"""
error.__init__
self.e = error
self.errors = {}
filename = os.path.join(
'', *[PATH, 'tool_codes', f"{model}.yaml"]
)
with open(filename, 'r') as file:
self.em = yaml.safe_load(file)
# for gyro tools the continuous survey errors need to be done last
self.em['codes'] = OrderedDict(self.em['codes'])
gyro_continuous = ['GXY-GD', 'GXY-GRW']
gyro_stationary = ['GXY-B1S', 'GXY-B2S', 'GXY-G4', 'GXY-RN']
for tool in gyro_continuous:
if tool in self.em['codes']:
self.gyro_continuous = []
self.em['codes'].move_to_end(tool)
self.gyro_continuous.append(tool)
self.gyro_stationary = [
tool for tool in gyro_stationary
if tool in self.em['codes']
]
# self.em = iscwsa_error_models[model]
# iscwsa_error_models = yaml.safe_load(file)
# self.em = iscwsa_error_models[model]
if 'Default Tortusity (rad/m)' in self.em['header']:
self.tortuosity = self.em['header']['Default Tortusity (rad/m)']
elif 'XCL Tortuosity' in self.em['header']:
# assuming that this is always 1 deg / 100 ft but this might not
# be the case
# TODO use pint to handle this string inputs
self.tortuosity = (np.radians(1.) / 100) * 3.281
else:
self.tortuosity = None
# if model == "iscwsa_mwd_rev5":
# if model == "ISCWSA MWD Rev5":
# assert self.tortuosity is not None, (
# "No default tortuosity defined in model header"
# )
if "Inclination Range Max" in self.em['header'].keys():
value = np.radians(float(
self.em['header']['Inclination Range Max'].split(" ")[0]
))
assert np.amax(self.e.survey.inc_rad) < value, (
"Model not suitable for this well path inclination"
)
self._initiate_func_dict()
for err in self.em['codes']:
# func = self._get_the_func_out(err)
func = self.em['codes'][err]['function']
mag = self.em['codes'][err]['magnitude']
propagation = self.em['codes'][err]['propagation']
self.errors[err] = (
self.call_func(
code=err,
func=func,
error=self.e,
mag=mag,
propagation=propagation,
tortuosity=self.tortuosity,
header=self.em['header'],
errors=self
)
)
self.cov_NEVs = np.zeros((3, 3, len(self.e.survey_rad)))
for _, value in self.errors.items():
self.cov_NEVs += value.cov_NEV
self.cov_HLAs = NEV_to_HLA(self.e.survey_rad, self.cov_NEVs)
def _get_the_func_out(self, err):
if err in self.exceptional_funcs:
func = self.exceptional_funcs[err]
else:
func = self.em['codes'][err]['function']
return func
def call_func(self, code, func, error, mag, propagation, **kwargs):
"""
Function for calling functions by mapping function labels to their
functions.
"""
assert func in self.func_dict, f"no function for function {func}"
return self.func_dict[func](code, error, mag, propagation, **kwargs)
def _initiate_func_dict(self):
"""
This dictionary will need to be updated if/when additional error
functions are added to the model.
"""
self.func_dict = {
'ABXY_TI1': ABXY_TI1,
'ABXY_TI2': ABXY_TI2,
'ABZ': ABZ,
'AMIL': AMIL,
'ASXY_TI1': ASXY_TI1,
'ASXY_TI2': ASXY_TI2,
'ASXY_TI3': ASXY_TI3,
'ASZ': ASZ,
'DBH': DBH,
'AZ': AZ,
'DREF': DREF,
'DSF': DSF,
'DST': DST,
'MBXY_TI1': MBXY_TI1,
'MBXY_TI2': MBXY_TI2,
'MBZ': MBZ,
'MSXY_TI1': MSXY_TI1,
'MSXY_TI2': MSXY_TI2,
'MSXY_TI3': MSXY_TI3,
'MSZ': MSZ,
'SAG': SAG,
'XYM1': XYM1,
'XYM2': XYM2,
'XYM3': XYM3,
'XYM4': XYM4,
'SAGE': SAGE,
'XCL': XCL, # requires an exception
'XYM3L': XYM3L, # looks like there's a mistake in the ISCWSA model
'XYM4L': XYM4L,
'XCLA': XCLA,
'XCLH': XCLH,
'XYM3E': XYM3E, # Needs QAQC
'XYM4E': XYM4E, # Need QAQC
'ASIXY_TI1': ASIXY_TI1, # Needs QAQC
'ASIXY_TI2': ASIXY_TI2, # Needs QAQC
'ASIXY_TI3': ASIXY_TI3, # Needs QAQC
'ABIXY_TI1': ABIXY_TI1, # Needs QAQC
'ABIXY_TI2': ABIXY_TI2, # Needs QAQC
'ABIZ': ABIZ, # Needs QAQC
'ASIZ': ASIZ, # Needs QAQC
'MBIXY_TI1': MBIXY_TI1, # Needs QAQC
'MBIXY_TI2': MBIXY_TI2, # Needs QAQC
'MDI': MDI, # Needs QAQC
'AXYZ_MIS': AXYZ_MIS, # Needs QAQC
'AXYZ_SF': AXYZ_SF, # Needs QAQC
'AXYZ_ZB': AXYZ_ZB, # Needs QAQC
'GXY_B1': GXY_B1, # Needs QAQC
'GXY_B2': GXY_B2, # Needs QAQC
'GXY_G1': GXY_G1, # Needs QAQC
'GXY_G4': GXY_G4, # Needs QAQC
'GXY_RN': GXY_RN, # Needs QAQC
'GXY_GD': GXY_GD, # Needs QAQC
'GXY_GRW': GXY_GRW, # Needs QAQC
'MFI': MFI, # Needs QAQC
'MSIXY_TI1': MSIXY_TI1, # Needs QAQC
'MSIXY_TI2': MSIXY_TI1, # Needs QAQC
'MSIXY_TI3': MSIXY_TI1, # Needs QAQC
'AMID': AMID, # Needs QAQC
'CNA': CNA, # Needs QAQC
'CNI': CNI, # Needs QAQC
}
def _funky_denominator(error):
with np.errstate(divide='ignore', invalid='ignore'):
result = np.nan_to_num((
1 - sin(error.survey.inc_rad) ** 2
* sin(error.survey.azi_mag_rad) ** 2
),
# nan=1e-6,
# posinf=1.0,
# neginf=-1.0
)
# ACCURACY = 1e-6
# with np.errstate(divide='ignore', invalid='ignore'):
# coeff = np.nan_to_num(
# result / np.abs(result) * ACCURACY,
# nan=ACCURACY
# )
# result = np.where(np.abs(result) > ACCURACY, result, coeff)
return result
# error functions #
def DREF(code, error, mag=0.35, propagation='random', NEV=True, **kwargs):
dpde = np.full((len(error.survey_rad), 3), [1., 0., 0.])
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def DSF(
code, error, mag=0.00056, propagation='systematic', NEV=True, **kwargs
):
dpde = np.full((len(error.survey_rad), 3), [1., 0., 0.])
dpde = dpde * np.array(error.survey_rad)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def DST(
code, error, mag=0.00000025, propagation='systematic', NEV=True, **kwargs
):
dpde = np.full((len(error.survey_rad), 3), [1., 0., 0.])
dpde[:, 0] = error.survey.tvd
dpde = dpde * np.array(error.survey_rad)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ABIZ(
code, error, mag=0.0040, propagation='systematic', NEV=True, **kwargs
):
denom = _funky_denominator(error) / error.survey.header.G
denom = np.where(denom > ACCURACY, denom, ACCURACY)
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = -sin(error.survey.inc_rad) / error.survey.header.G
dpde[:, 2] = (
sin(error.survey.inc_rad)
* cos(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
+ sin(error.survey.inc_rad) * cos(error.survey.azi_mag_rad)
)
) / denom
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ABIXY_TI1(
code, error, mag=0.0040, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = -cos(error.survey.inc_rad) / error.survey.header.G
dpde[:, 2] = (
cos(error.survey.inc_rad) ** 2
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
+ sin(error.survey.inc_rad) * cos(error.survey.azi_mag_rad)
)
) / (
error.survey.header.G * (
_funky_denominator(error)
)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ABXY_TI1(
code, error, mag=0.0040, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = -cos(error.survey.inc_rad) / error.survey.header.G
dpde[:, 2] = (
cos(error.survey.inc_rad)
* tan(error.survey.header.dip)
* sin(error.survey.azi_mag_rad)
) / error.survey.header.G
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ABIXY_TI2(
code, error, mag=0.004, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.nan_to_num(
(
-(
tan(error.survey.header.dip)
* cos(error.survey.azi_mag_rad)
- tan(
pi/2 - error.survey.inc_rad
)
) / (
error.survey.header.G
* (
_funky_denominator(error)
)
)
),
posinf=0.0,
neginf=0.0
)
e_DIA = dpde * mag
sing = np.where(
error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
n = np.array(
0.5 * error.drdp_sing['double_delta_md']
* -sin(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
e = np.array(
0.5 * error.drdp_sing['double_delta_md']
* cos(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
v = np.zeros_like(n)
e_NEV_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV_sing[1, 1] = (
(
error.survey.md[2]
+ error.survey.md[1]
- 2 * error.survey.md[0]
) / 2
* mag * cos(error.survey.azi_true_rad[1])
/ error.survey.header.G
)
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
n = np.array(
0.5 * error.drdp_sing['delta_md']
* -sin(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
e = np.array(
0.5 * error.drdp_sing['delta_md']
* cos(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
v = np.zeros_like(n)
e_NEV_star_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV_star_sing[1, 1] = (
(error.survey.md[1] - error.survey.md[0])
* mag
* (
cos(error.survey.azi_true_rad[1])
/ error.survey.header.G
)
)
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
def ABXY_TI2(
code, error, mag=0.004, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.nan_to_num(
(
(
tan(-(error.survey_rad[:, 1]) + (pi/2))
- tan(error.survey.header.dip)
* cos(error.survey.azi_mag_rad)
) / error.survey.header.G
),
posinf=0.0,
neginf=0.0
)
e_DIA = dpde * mag
sing = np.where(
error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
n = np.array(
0.5 * error.drdp_sing['double_delta_md']
* -sin(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
e = np.array(
0.5 * error.drdp_sing['double_delta_md']
* cos(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
v = np.zeros_like(n)
e_NEV_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
if error.error_model.lower().split(' ')[-1] != 'rev4':
e_NEV_sing[1, 1] = (
(
error.survey.md[2]
+ error.survey.md[1]
- 2 * error.survey.md[0]
) / 2
* mag * cos(error.survey.azi_true_rad[1])
/ error.survey.header.G
)
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
n = np.array(
0.5 * error.drdp_sing['delta_md']
* -sin(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
e = np.array(
0.5 * error.drdp_sing['delta_md']
* cos(error.drdp_sing['azi2']) * mag
) / error.survey.header.G
v = np.zeros_like(n)
e_NEV_star_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
if error.error_model.lower().split(' ')[-1] != 'rev4':
e_NEV_star_sing[1, 1] = (
(error.survey.md[1] - error.survey.md[0])
* mag
* (
cos(error.survey.azi_true_rad[1])
/ error.survey.header.G
)
)
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
def AMID(code, error, mag=0.04363323129985824, propagation='systematic',
NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
sin(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad)
)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def ABZ(code, error, mag=0.004, propagation='systematic', NEV=True, **kwargs):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = -sin(np.array(error.survey_rad)[:, 1]) / error.survey.header.G
dpde[:, 2] = (
sin(np.array(error.survey_rad)[:, 1])
* tan(error.survey.header.dip) * sin(error.survey.azi_mag_rad)
) / error.survey.header.G
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASXY_TI1(
code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = (
sin(error.survey.inc_rad)
* cos(error.survey.inc_rad)
) / sqrt(2)
dpde[:, 2] = (
sin(error.survey.inc_rad)
* -tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad)
) / sqrt(2)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASIXY_TI1(
code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = (
sin(error.survey.inc_rad)
* cos(error.survey.inc_rad)
/ sqrt(2)
)
dpde[:, 2] = -(
sin(error.survey.inc_rad)
* cos(error.survey.inc_rad) ** 2
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
+ sin(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad)
)
) / (
sqrt(2) * _funky_denominator(error)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASXY_TI2(
code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = sin(
np.array(error.survey_rad)[:, 1]
) * cos(np.array(error.survey_rad)[:, 1]) / 2
dpde[:, 2] = (
sin(np.array(error.survey_rad)[:, 1])
* -tan(error.survey.header.dip) * cos(np.array(error.survey_rad)[:, 1])
* sin(error.survey.azi_mag_rad)
) / 2
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASIXY_TI2(
code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = (
sin(error.survey.inc_rad)
* cos(error.survey.inc_rad)
/ 2
)
dpde[:, 2] = -(
sin(error.survey.inc_rad)
* cos(error.survey.inc_rad) ** 2
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
+ sin(error.survey.inc_rad) * cos(error.survey.azi_mag_rad)
)
) / (
2 * _funky_denominator(error)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASXY_TI3(
code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
sin(np.array(error.survey_rad)[:, 1])
* tan(error.survey.header.dip) * cos(error.survey.azi_mag_rad)
- cos(np.array(error.survey_rad)[:, 1])) / 2
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASIXY_TI3(
code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
tan(error.survey.header.dip)
* sin(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad)
- cos(error.survey.inc_rad)
) / (
2 * _funky_denominator(error)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASZ(code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = (
-sin(np.array(error.survey_rad)[:, 1])
* cos(np.array(error.survey_rad)[:, 1])
)
dpde[:, 2] = (
sin(np.array(error.survey_rad)[:, 1])
* tan(error.survey.header.dip)
* cos(np.array(error.survey_rad)[:, 1])
* sin(error.survey.azi_mag_rad)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def ASIZ(
code, error, mag=0.0005, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = (
-sin(error.survey.inc_rad)
* cos(error.survey.inc_rad)
)
dpde[:, 2] = (
sin(error.survey.inc_rad)
* cos(error.survey.inc_rad) ** 2
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
+ sin(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad)
)
) / (
_funky_denominator(error)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def AXYZ_MIS(
code, error, mag=0.0001658062789394613, propagation='systematic', NEV=True,
**kwargs
):
"""
SPE 90408 Table 1
"""
dpde = np.full((len(error.survey_rad), 3), [0., 1., 0.])
dpde = dpde * np.array(error.survey_rad)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def AXYZ_SF(
code, error, mag=0.000111, propagation='systematic', NEV=True,
**kwargs
):
"""
SPE 90408 Table 1
"""
dpde = np.full((len(error.survey_rad), 3), [0., 1., 0.])
dpde[:, 1] = (
1.3 * sin(error.survey.inc_rad) * cos(error.survey.inc_rad)
)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def AXYZ_ZB(
code, error, mag=0.0017, propagation='systematic', NEV=True,
**kwargs
):
"""
SPE 90408 Table 1
"""
dpde = np.full((len(error.survey_rad), 3), [0., 1., 0.])
dpde[:, 1] = (
sin(error.survey.inc_rad) / error.survey.header.G
)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def _get_ref_init_error(dpde, error, **kwargs):
"""
Function that identifies where the continuous gyro begins, initiates and
then carries the static errors during the continuous modes.
"""
temp = [0.0]
for coeff, inc in zip(dpde[1:, 2], error.survey.inc_rad[1:]):
if inc > kwargs['header']['XY Static Gyro']['End Inc']:
temp.append(temp[-1])
else:
temp.append(coeff)
dpde[:, 2] = temp
return dpde
def CNA(
code, error, mag=0.35, propagation='systematic', NEV=True,
**kwargs
):
dpde = np.full((len(error.survey_rad), 3), [0., 0., 0.])
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.nan_to_num(
1 / sin(error.survey.inc_rad),
posinf=1,
neginf=-1
)
e_DIA = dpde * mag
sing = np.where(
error.survey.inc_rad < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
n = (
np.array(0.5 * error.drdp_sing['double_delta_md'])
* -sin(getattr(
error.survey, f"azi_{error.survey.header.azi_reference}_rad"
)[1: -1])
* mag
)
e = (
np.array(0.5 * error.drdp_sing['double_delta_md'])
* cos(getattr(
error.survey, f"azi_{error.survey.header.azi_reference}_rad"
)[1: -1])
* mag
)
v = np.zeros_like(n)
e_NEV_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
n = (
np.array(0.5 * error.drdp_sing['delta_md'])
* -sin(getattr(
error.survey, f"azi_{error.survey.header.azi_reference}_rad"
)[1: -1])
* mag
)
e = (
np.array(0.5 * error.drdp_sing['delta_md'])
* cos(getattr(
error.survey, f"azi_{error.survey.header.azi_reference}_rad"
)[1: -1])
* mag
)
e_NEV_star_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
# result = error._generate_error(code, e_DIA, propagation, NEV)
# return result
def CNI(
code, error, mag=0.35, propagation='systematic', NEV=True,
**kwargs
):
dpde = np.full((len(error.survey_rad), 3), [0., 1., 0.])
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def GXY_B1(
code, error, mag=0.002617993877991494, propagation='random',
NEV=True, **kwargs
):
"""
SPE 90408 Table 4
"""
dpde = np.full((len(error.survey_rad), 3), [0., 0., 1.])
dpde[:, 2] = np.where(
error.survey.inc_rad <= kwargs['header']['XY Static Gyro']['End Inc'],
sin(error.survey.azi_true_rad)
/ (
error.survey.header.earth_rate
* cos(np.radians(error.survey.header.latitude))
* cos(error.survey.inc_rad)
),
np.zeros_like(error.survey.md)
)
dpde = _get_ref_init_error(dpde, error, **kwargs)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def GXY_B2(
code, error, mag=0.002617993877991494, propagation='random',
NEV=True, **kwargs
):
"""
SPE 90408 Table 4
"""
dpde = np.full((len(error.survey_rad), 3), [0., 0., 1.])
dpde[:, 2] = np.where(
error.survey.inc_rad <= kwargs['header']['XY Static Gyro']['End Inc'],
cos(error.survey.azi_true_rad)
/ (
error.survey.header.earth_rate
* cos(np.radians(error.survey.header.latitude))
),
np.zeros_like(error.survey.md)
)
dpde = _get_ref_init_error(dpde, error, **kwargs)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def GXY_G1(
code, error, mag=0.006981317007977318, propagation='systematic',
NEV=True, **kwargs
):
"""
SPE 90408 Table 4
"""
dpde = np.full((len(error.survey_rad), 3), [0., 0., 1.])
dpde[:, 2] = np.where(
error.survey.inc_rad <= kwargs['header']['XY Static Gyro']['End Inc'],
cos(error.survey.azi_true_rad) * sin(error.survey.inc_rad)
/ (
error.survey.header.earth_rate
* cos(np.radians(error.survey.header.latitude))
),
np.zeros_like(error.survey.md)
)
dpde = _get_ref_init_error(dpde, error, **kwargs)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def GXY_G4(
code, error, mag=0.010471975511965976, propagation='systematic',
NEV=True, **kwargs
):
"""
SPE 90408 Table 4
"""
dpde = np.full((len(error.survey_rad), 3), [0., 0., 1.])
dpde[:, 2] = np.where(
error.survey.inc_rad <= kwargs['header']['XY Static Gyro']['End Inc'],
sin(error.survey.azi_true_rad) * tan(error.survey.inc_rad)
/ (
error.survey.header.earth_rate
* cos(np.radians(error.survey.header.latitude))
),
np.zeros_like(error.survey.md)
)
dpde = _get_ref_init_error(dpde, error, **kwargs)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def GXY_RN(
code, error, mag=0.006981317007977318, propagation='random',
NEV=True, **kwargs
):
"""
SPE 90408 Table 4
"""
dpde = np.full((len(error.survey_rad), 3), [0., 0., 1.])
dpde[:, 2] = np.where(
error.survey.inc_rad <= kwargs['header']['XY Static Gyro']['End Inc'],
1.0
* (
np.sqrt(
1 - cos(error.survey.azi_true_rad) ** 2
* sin(error.survey.inc_rad) ** 2
)
/ (
error.survey.header.earth_rate
* cos(np.radians(error.survey.header.latitude))
* cos(error.survey.inc_rad)
)
),
np.zeros_like(error.survey.md)
)
dpde = _get_ref_init_error(dpde, error, **kwargs)
dpde_systematic = np.zeros_like(dpde)
index_systematic = np.where(
error.survey.inc_rad > kwargs['header']['XY Static Gyro']['End Inc']
)
np.put(
dpde_systematic[:, 2],
index_systematic,
(
dpde[index_systematic][:, 2]
* kwargs['header']['Noise Reduction Factor']
)
)
e_DIA_systematic = dpde_systematic * mag
result_systematic = error._generate_error(
code, e_DIA_systematic, 'systematic', NEV
)
np.put(
dpde[:, 2],
index_systematic,
np.zeros(len(index_systematic))
)
# dpde[:, 2] = np.where(
# error.survey.inc_rad > kwargs['header']['XY Static Gyro']['End Inc'],
# dpde[:, 2],
# dpde[:, 2] * kwargs['header']['Noise Reduction Factor'],
# )
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
result.cov_NEV += result_systematic.cov_NEV
return result
def GXY_GD(
code, error, mag=0.008726646259971648, propagation='systematic',
NEV=True, **kwargs
):
"""
SPE 90408 Table 7
"""
dpde = np.full((len(error.survey_rad), 3), [0., 0., 1.])
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.where(
error.survey.inc_rad > kwargs['header']['XY Static Gyro']['End Inc'],
np.append(
np.array([0]),
(
(error.survey.md[1:] - error.survey.md[:-1])
/ (
float(
kwargs['header']['XY Continuous Gyro']['Running Speed'].split()[0]
)
* sin(
(error.survey.inc_rad[1:] + error.survey.inc_rad[:-1])
/ 2
)
)
)
),
np.zeros_like(error.survey.md)
)
init_error = []
for i, (u, l) in enumerate(zip(
error.survey.inc_rad[1:], error.survey.inc_rad[:-1]
)):
init_error.append(0.0)
if all((
u > kwargs['header']['XY Static Gyro']['End Inc'],
l <= kwargs['header']['XY Static Gyro']['End Inc']
)):
for tool in kwargs['errors'].gyro_stationary:
temp = kwargs['errors'].errors[tool].e_DIA[i - 1][2]
if tool in ['GXY_RN']:
temp *= kwargs['header']['Noise Reduction Factor']
init_error[-1] += temp
temp = [0.0]
for i, (u, e) in enumerate(zip(dpde[1:, 2], init_error)):
temp.append(0.0)
if u != 0.0:
temp[-1] += temp[-2] + u * mag
dpde[:, 2] = temp
e_DIA = dpde
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def GXY_GRW(
code, error, mag=0.004363323129985824, propagation='systematic',
NEV=True, **kwargs
):
"""
SPE 90408 Table 7
"""
dpde = np.full((len(error.survey_rad), 3), [0., 0., 1.])
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.where(
error.survey.inc_rad > kwargs['header']['XY Static Gyro']['End Inc'],
np.append(
np.array([0]),
(error.survey.md[1:] - error.survey.md[:-1])
/ (
float(
kwargs['header']['XY Continuous Gyro']['Running Speed'].split()[0]
)
* sin(
(error.survey.inc_rad[1:] + error.survey.inc_rad[:-1])
/ 2
) ** 2
)
),
np.zeros_like(error.survey.md)
)
init_error = []
for i, (u, l) in enumerate(zip(
error.survey.inc_rad[1:], error.survey.inc_rad[:-1]
)):
init_error.append(0.0)
if all((
u > kwargs['header']['XY Static Gyro']['End Inc'],
l <= kwargs['header']['XY Static Gyro']['End Inc']
)):
for tool in kwargs['errors'].gyro_stationary:
temp = kwargs['errors'].errors[tool].e_DIA[i - 1][2]
if tool in ['GXY_RN']:
temp *= kwargs['header']['Noise Reduction Factor']
init_error[-1] += temp
temp = [0.0]
for i, (u, e) in enumerate(zip(dpde[1:, 2], init_error)):
temp.append(0.0)
if u != 0.0:
temp[-1] += np.sqrt(temp[-2] ** 2 + u * mag)
dpde[:, 2] = temp
e_DIA = dpde
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def MBXY_TI1(
code, error, mag=70.0, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
-cos(np.array(error.survey_rad)[:, 1])
* sin(error.survey.azi_mag_rad)
) / (error.survey.header.b_total * cos(error.survey.header.dip))
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MBIXY_TI1(
code, error, mag=70.0, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
-cos(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad)
) / (
error.survey.header.b_total
* cos(error.survey.header.dip)
* (
_funky_denominator(error)
)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MBXY_TI2(
code, error, mag=70.0, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
cos(error.survey.azi_mag_rad)
/ (
error.survey.header.b_total
* cos(error.survey.header.dip)
)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MBIXY_TI2(
code, error, mag=70.0, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
cos(error.survey.azi_mag_rad)
/ (
error.survey.header.b_total
* cos(error.survey.header.dip)
* (
_funky_denominator(error)
)
)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MBZ(code, error, mag=70.0, propagation='systematic', NEV=True, **kwargs):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
-sin(np.array(error.survey_rad)[:, 1])
* sin(error.survey.azi_mag_rad)
) / (error.survey.header.b_total * cos(error.survey.header.dip))
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MFI(
code, error, mag=70, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
-sin(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
+ sin(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad)
) / (
_funky_denominator(error)
)
/ error.survey.header.b_total
)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def MSXY_TI1(
code, error, mag=0.0016, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
sin(np.array(error.survey_rad)[:, 1])
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(np.array(error.survey_rad)[:, 1])
+ sin(np.array(error.survey_rad)[:, 1])
* cos(error.survey.azi_mag_rad)
) / sqrt(2)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MSXY_TI2(
code, error, mag=0.0016, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
sin(error.survey.azi_mag_rad) * (
tan(error.survey.header.dip)
* sin(np.array(error.survey_rad)[:, 1])
* cos(np.array(error.survey_rad)[:, 1])
- cos(np.array(error.survey_rad)[:, 1])
* cos(np.array(error.survey_rad)[:, 1])
* cos(error.survey.azi_mag_rad) - cos(error.survey.azi_mag_rad)
) / 2
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MSXY_TI3(
code, error, mag=0.0016, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
cos(np.array(error.survey_rad)[:, 1])
* cos(error.survey.azi_mag_rad) * cos(error.survey.azi_mag_rad)
- cos(np.array(error.survey_rad)[:, 1])
* sin(error.survey.azi_mag_rad) * sin(error.survey.azi_mag_rad)
- tan(error.survey.header.dip) * sin(np.array(error.survey_rad)[:, 1])
* cos(error.survey.azi_mag_rad)
) / 2
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MSIXY_TI1(
code, error, mag=0.0016, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
sin(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* cos(error.survey.inc_rad)
+ sin(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad)
) / (
sqrt(2)
* (
_funky_denominator(error)
)
)
)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def MSIXY_TI2(
code, error, mag=0.0016, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
sin(error.survey.azi_mag_rad)
* (
tan(error.survey.header.dip)
* sin(error.survey.inc_rad)
* cos(error.survey.inc_rad)
- cos(error.survey.inc_rad) ** 2
* cos(error.survey.azi_mag_rad)
- cos(error.survey.azi_mag_rad)
) / (
2 * (
_funky_denominator(error)
)
)
)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def MSIXY_TI3(
code, error, mag=0.0016, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
(
cos(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad) ** 2
- cos(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad) ** 2
- tan(error.survey.header.dip)
* sin(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad)
) / (
2 * (
_funky_denominator(error)
)
)
)
e_DIA = dpde * mag
result = error._generate_error(code, e_DIA, propagation, NEV)
return result
def MSZ(
code, error, mag=0.0016, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = -(
sin(np.array(error.survey_rad)[:, 1])
* cos(error.survey.azi_mag_rad)
+ tan(error.survey.header.dip) * cos(np.array(error.survey_rad)[:, 1])
) * sin(np.array(error.survey_rad)[:, 1]) * sin(error.survey.azi_mag_rad)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def AZ(code, error, mag=0.00628, propagation='systematic', NEV=True, **kwargs):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = 1
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def DBH(
code, error, mag=np.radians(0.09), propagation='systematic', NEV=True,
**kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = 1 / (
error.survey.header.b_total * cos(error.survey.header.dip)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def MDI(
code, error, mag=np.radians(5000), propagation='systematic', NEV=True,
**kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
-sin(error.survey.inc_rad)
* sin(error.survey.azi_mag_rad)
* (
cos(error.survey.inc_rad)
- tan(error.survey.header.dip)
* sin(error.survey.inc_rad)
* cos(error.survey.azi_mag_rad)
)
) / (
_funky_denominator(error)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def DBHR(
code, error, mag=np.radians(3000), propagation='random', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = 1 / (
error.survey.header.b_total * cos(error.survey.header.dip)
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def AMIL(code, error, mag=220.0, propagation='systematic', NEV=True, **kwargs):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = (
-sin(np.array(error.survey_rad)[:, 1])
* sin(error.survey.azi_mag_rad)
/ (error.survey.header.b_total * cos(error.survey.header.dip))
)
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def SAG(
code, error, mag=0.00349, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = sin(np.array(error.survey_rad)[:, 1])
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def SAGE(
code, error, mag=0.00175, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = sin(np.array(error.survey.inc_rad)) ** 0.25
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def XYM1(
code, error, mag=0.00175, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = np.absolute(sin(np.array(error.survey.inc_rad)))
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def XYM2(
code, error, mag=0.00175, propagation='systematic', NEV=True, **kwargs
):
propagation = 'systematic' # incorrect in the rev5 model tab
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 2] = -1
e_DIA = dpde * mag
return error._generate_error(code, e_DIA, propagation, NEV)
def XYM3(
code, error, mag=0.00175, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = (
np.absolute(cos(np.array(error.survey_rad)[:, 1]))
* cos(error.survey.azi_true_rad)
)
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.nan_to_num(
-(
np.absolute(cos(np.array(error.survey_rad)[:, 1]))
* sin(error.survey.azi_true_rad)
) / sin(np.array(error.survey_rad)[:, 1]),
posinf=0.0,
neginf=0.0
)
e_DIA = dpde * mag
sing = np.where(
error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
n = np.array(0.5 * error.drdp_sing['double_delta_md'] * mag)
e = np.zeros(len(error.drdp_sing['double_delta_md']))
v = np.zeros_like(n)
e_NEV_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
n = np.array(0.5 * error.drdp_sing['delta_md'] * mag)
e = np.zeros(len(error.drdp_sing['delta_md']))
v = np.zeros_like(n)
e_NEV_star_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
def XYM3E(code, error, mag=0.00524, propagation='random', NEV=True, **kwargs):
coeff = np.ones(len(error.survey.md))
coeff[1:-1] = np.amax(np.stack((
coeff[1:-1],
sqrt(
10 / error.drdp_sing['delta_md']
)
), axis=-1), axis=-1)
coeff[-1] = np.amax(np.stack((
coeff[-1],
sqrt(
10 / (error.survey.md[-1] - error.survey.md[-2])
)
), axis=-1), axis=-1)
dpde = np.zeros((len(error.survey.md), 3))
dpde[1:, 1] = np.absolute(
cos(error.survey.inc_rad[1:])
* cos(error.survey.azi_true_rad[1:])
* coeff[1:]
)
with np.errstate(divide='ignore', invalid='ignore'):
dpde[1:, 2] = (
(
-np.absolute(cos(error.survey.inc_rad[1:]))
* sin(error.survey.azi_true_rad[1:])
/ sin(error.survey.inc_rad[1:])
)
* coeff[1:]
)
dpde[1:, 2] = np.where(
error.survey.inc_rad[1:] < error.survey.header.vertical_inc_limit,
coeff[1:],
dpde[1:, 2]
)
e_DIA = dpde * mag
sing = np.where(
error.survey.inc_rad < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
e_NEV_sing = np.zeros_like(e_NEV)
e_NEV_sing[:, 0] = e_NEV[:, 0]
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
e_NEV_star_sing = np.zeros_like(e_NEV_star)
e_NEV_star_sing[:, 0] = e_NEV_star[:, 0]
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
return error._generate_error(code, e_DIA, propagation, NEV)
def XYM4(
code, error, mag=0.00175, propagation='systematic', NEV=True, **kwargs
):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[:, 1] = np.absolute(
cos(np.array(error.survey_rad)[:, 1])
) * sin(error.survey.azi_true_rad)
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.nan_to_num(
(
np.absolute(np.cos(np.array(error.survey_rad)[:, 1]))
* cos(error.survey.azi_true_rad)
)
/ sin(np.array(error.survey_rad)[:, 1]),
posinf=0.0,
neginf=0.0
)
e_DIA = dpde * mag
sing = np.where(
error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
n = np.zeros(len(error.drdp_sing['double_delta_md']))
e = np.array(0.5 * error.drdp_sing['double_delta_md'] * mag)
v = np.zeros_like(n)
e_NEV_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
n = np.zeros(len(error.drdp_sing['delta_md']))
e = np.array(0.5 * error.drdp_sing['delta_md'] * mag)
v = np.zeros_like(n)
e_NEV_star_sing = np.vstack(
(
np.zeros((1, 3)),
np.stack((n, e, v), axis=-1),
np.zeros((1, 3))
)
)
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
def XYM4E(code, error, mag=0.00524, propagation='random', NEV=True, **kwargs):
coeff = np.ones(len(error.survey.md))
coeff[1:-1] = np.amax(np.stack((
coeff[1:-1],
sqrt(
10 / error.drdp_sing['delta_md']
)
), axis=-1), axis=-1)
coeff[-1] = np.amax(np.stack((
coeff[-1],
sqrt(
10 / (error.survey.md[-1] - error.survey.md[-2])
)
), axis=-1), axis=-1)
dpde = np.zeros((len(error.survey.md), 3))
dpde[1:, 1] = (
cos(error.survey.inc_rad[1:])
* sin(error.survey.azi_true_rad[1:])
* coeff[1:]
)
with np.errstate(divide='ignore', invalid='ignore'):
dpde[1:, 2] = np.nan_to_num(
(
(
cos(error.survey.inc_rad[1:])
* cos(error.survey.azi_true_rad[1:])
/ sin(error.survey.inc_rad[1:])
)
* coeff[1:]
),
posinf=0,
neginf=0
)
e_DIA = dpde * mag
sing = np.where(
error.survey.inc_rad < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
# this is a bit of a cop out way of handling these exceptions, but it's
# simple and it works...
xym3e = XYM3E(
code, error, mag=mag, propagation=propagation, NEV=NEV
)
e_NEV = error._e_NEV(e_DIA)
e_NEV_sing = np.zeros_like(e_NEV)
e_NEV_sing[:, 1] = xym3e.e_NEV[:, 0]
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
e_NEV_star_sing = np.zeros_like(e_NEV_star)
e_NEV_star_sing[:, 1] = xym3e.e_NEV_star[:, 0]
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
def XCL(code, error, mag=0.0167, propagation='random', NEV=True, **kwargs):
"""
Dummy function to manage the ISCWSA workbook not correctly defining the
weighting functions.
"""
tortuosity = kwargs['tortuosity']
if code == "XCLA":
return XCLA(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
)
else:
return XCLH(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
)
def XCLA(code, error, mag=0.167, propagation='random', NEV=True, **kwargs):
dpde = np.zeros((len(error.survey_rad), 3))
def manage_sing(error, kwargs):
temp = np.absolute(
sin(error.survey.inc_rad[1:])
* (((
error.survey.azi_true_rad[1:]
- error.survey.azi_true_rad[:-1]
+ pi
) % (2 * pi)) - pi)
)
temp[np.where(
error.survey.inc_rad[:-1] < error.survey.header.vertical_inc_limit
)] = 0
return temp
dpde[1:, 0] = (
(error.survey.md[1:] - error.survey.md[0:-1])
* np.amax(np.stack((
manage_sing(error, kwargs),
(
kwargs['tortuosity']
* (error.survey.md[1:] - error.survey.md[0:-1])
)
), axis=-1), axis=-1)
* -sin(error.survey.azi_true_rad[1:])
)
dpde[1:, 1] = (
(error.survey.md[1:] - error.survey.md[0:-1])
* np.amax(np.stack((
manage_sing(error, kwargs),
(
kwargs['tortuosity']
* (error.survey.md[1:] - error.survey.md[0:-1])
)
), axis=-1), axis=-1)
* cos(error.survey.azi_true_rad[1:])
)
e_DIA = dpde * mag
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV=e_DIA, e_NEV_star=e_DIA
)
def XCLH(code, error, mag=0.0167, propagation='random', NEV=True, **kwargs):
dpde = np.zeros((len(error.survey_rad), 3))
dpde[1:, 0] = (
(error.survey.md[1:] - error.survey.md[0:-1])
* np.amax(np.stack((
np.absolute(
(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])
),
(
kwargs['tortuosity']
* (error.survey.md[1:] - error.survey.md[0:-1])
)
), axis=-1), axis=-1)
* cos(error.survey.inc_rad[1:])
* cos(error.survey.azi_true_rad[1:])
)
dpde[1:, 1] = (
(error.survey.md[1:] - error.survey.md[0:-1])
* np.amax(np.stack((
np.absolute(
(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])
),
(
kwargs['tortuosity']
* (error.survey.md[1:] - error.survey.md[0:-1])
)
), axis=-1), axis=-1)
* cos(error.survey.inc_rad[1:])
* sin(error.survey.azi_true_rad[1:])
)
dpde[1:, 2] = (
(error.survey.md[1:] - error.survey.md[0:-1])
* np.amax(np.stack((
np.absolute(
(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])
),
(
kwargs['tortuosity']
* (error.survey.md[1:] - error.survey.md[0:-1])
)
), axis=-1), axis=-1)
* -sin(error.survey.inc_rad[1:])
)
e_DIA = dpde * mag
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV=e_DIA, e_NEV_star=e_DIA
)
def XYM3L(code, error, mag=0.0167, propagation='random', NEV=True, **kwargs):
coeff = np.ones(len(error.survey.md) - 1)
coeff = np.amax(np.stack((
coeff,
sqrt(
10 / (error.survey.md[1:] - error.survey.md[:-1])
)
), axis=-1), axis=-1)
dpde = np.zeros((len(error.survey_rad), 3))
dpde[1:, 1] = np.absolute(
cos(error.survey.inc_rad[1:])
* cos(error.survey.azi_true_rad[1:])
* coeff
)
dpde[0, 1] = dpde[1, 1]
with np.errstate(divide='ignore', invalid='ignore'):
dpde[1:, 2] = np.nan_to_num(
(
-np.absolute(
cos(error.survey.inc_rad[1:])
)
* (
sin(error.survey.azi_true_rad[1:])
/ sin(error.survey.inc_rad[1:])
)
* coeff
),
posinf=0,
neginf=0
)
dpde[0, 2] = dpde[1, 2]
e_DIA = dpde * mag
sing = np.where(
error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
e_NEV_sing = np.zeros_like(e_NEV)
e_NEV_sing[1:-1, 0] = (
coeff[:-1]
* (
error.survey.md[2:]
- error.survey.md[:-2]
) / 2
* mag
)
e_NEV_sing[1, 0] = (
coeff[1]
* (
error.survey.md[2] + error.survey.md[1]
- 2 * error.survey.md[0]
) / 2
* mag
)
e_NEV_sing[-1, 0] = (
coeff[-1]
* (
error.survey.md[-1]
- error.survey.md[-2]
) / 2
* mag
)
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
e_NEV_star_sing = np.zeros_like(e_NEV)
e_NEV_star_sing[1:, 0] = (
(
error.survey.md[1:]
- error.survey.md[:-1]
) / 2
* mag
)
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
def XYM4L(code, error, mag=0.0167, propagation='random', NEV=True, **kwargs):
propagation = 'random'
coeff = np.ones(len(error.survey.md))
coeff[1:] = np.amax(np.stack((
coeff[1:],
sqrt(
10 / (error.survey.md[1:] - error.survey.md[:-1])
)
), axis=-1), axis=-1)
dpde = np.zeros((len(error.survey_rad), 3))
with np.errstate(divide='ignore', invalid='ignore'):
dpde[:, 2] = np.nan_to_num(
np.absolute(
cos(error.survey.inc_rad)
* cos(error.survey.azi_true_rad)
/ sin(error.survey.inc_rad)
* coeff
),
posinf=0,
neginf=0,
)
dpde[:, 1] = (
np.absolute(
cos(error.survey.inc_rad)
)
* (
sin(error.survey.azi_true_rad)
)
* coeff
)
e_DIA = dpde * mag
sing = np.where(
error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit
)
if len(sing[0]) < 1:
return error._generate_error(code, e_DIA, propagation, NEV)
else:
e_NEV = error._e_NEV(e_DIA)
e_NEV_sing = np.zeros_like(e_NEV)
e_NEV_sing[1:-1, 1] = (
coeff[1:-1]
* (
error.survey.md[2:]
- error.survey.md[:-2]
) / 2
* mag
)
e_NEV_sing[1, 1] = (
coeff[1]
* (
error.survey.md[2] + error.survey.md[1]
- 2 * error.survey.md[0]
) / 2
* mag
)
e_NEV_sing[-1, 1] = (
coeff[-1]
* (
error.survey.md[-1]
- error.survey.md[-2]
) / 2
* mag
)
e_NEV[sing] = e_NEV_sing[sing]
e_NEV_star = error._e_NEV_star(e_DIA)
e_NEV_star_sing = np.zeros_like(e_NEV)
e_NEV_star_sing[1:, 1] = (
(
error.survey.md[1:]
- error.survey.md[:-1]
) / 2
* mag
)
e_NEV_star_sing[1, 1] = (
(
error.survey.md[1]
- error.survey.md[0]
)
* mag
)
e_NEV_star[sing] = e_NEV_star_sing[sing]
return error._generate_error(
code, e_DIA, propagation, NEV, e_NEV, e_NEV_star
)
|
[
"numpy.stack",
"numpy.radians",
"numpy.absolute",
"numpy.zeros_like",
"numpy.put",
"os.path.dirname",
"numpy.zeros",
"numpy.errstate",
"numpy.amax",
"numpy.where",
"numpy.array",
"numpy.sin",
"numpy.cos",
"yaml.safe_load",
"collections.OrderedDict",
"numpy.tan",
"os.path.join",
"numpy.sqrt"
] |
[((285, 310), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (300, 310), False, 'import os\n'), ((324, 368), 'os.path.join', 'os.path.join', (['""""""', "*[PATH, 'tool_index.yaml']"], {}), "('', *[PATH, 'tool_index.yaml'])\n", (336, 368), False, 'import os\n'), ((8435, 8478), 'numpy.where', 'np.where', (['(denom > ACCURACY)', 'denom', 'ACCURACY'], {}), '(denom > ACCURACY, denom, ACCURACY)\n', (8443, 8478), True, 'import numpy as np\n'), ((10885, 10958), 'numpy.where', 'np.where', (['(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)'], {}), '(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)\n', (10893, 10958), True, 'import numpy as np\n'), ((13519, 13592), 'numpy.where', 'np.where', (['(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)'], {}), '(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)\n', (13527, 13592), True, 'import numpy as np\n'), ((23169, 23240), 'numpy.where', 'np.where', (['(error.survey.inc_rad < error.survey.header.vertical_inc_limit)'], {}), '(error.survey.inc_rad < error.survey.header.vertical_inc_limit)\n', (23177, 23240), True, 'import numpy as np\n'), ((28934, 28953), 'numpy.zeros_like', 'np.zeros_like', (['dpde'], {}), '(dpde)\n', (28947, 28953), True, 'import numpy as np\n'), ((28977, 29055), 'numpy.where', 'np.where', (["(error.survey.inc_rad > kwargs['header']['XY Static Gyro']['End Inc'])"], {}), "(error.survey.inc_rad > kwargs['header']['XY Static Gyro']['End Inc'])\n", (28985, 29055), True, 'import numpy as np\n'), ((29074, 29201), 'numpy.put', 'np.put', (['dpde_systematic[:, 2]', 'index_systematic', "(dpde[index_systematic][:, 2] * kwargs['header']['Noise Reduction Factor'])"], {}), "(dpde_systematic[:, 2], index_systematic, dpde[index_systematic][:, 2\n ] * kwargs['header']['Noise Reduction Factor'])\n", (29080, 29201), True, 'import numpy as np\n'), ((40858, 40874), 'numpy.radians', 'np.radians', (['(0.09)'], {}), '(0.09)\n', (40868, 40874), True, 'import numpy as np\n'), ((41192, 41208), 'numpy.radians', 'np.radians', (['(5000)'], {}), '(5000)\n', (41202, 41208), True, 'import numpy as np\n'), ((41762, 41778), 'numpy.radians', 'np.radians', (['(3000)'], {}), '(3000)\n', (41772, 41778), True, 'import numpy as np\n'), ((44262, 44335), 'numpy.where', 'np.where', (['(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)'], {}), '(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)\n', (44270, 44335), True, 'import numpy as np\n'), ((46363, 46466), 'numpy.where', 'np.where', (['(error.survey.inc_rad[1:] < error.survey.header.vertical_inc_limit)', 'coeff[1:]', 'dpde[1:, 2]'], {}), '(error.survey.inc_rad[1:] < error.survey.header.vertical_inc_limit,\n coeff[1:], dpde[1:, 2])\n', (46371, 46466), True, 'import numpy as np\n'), ((46529, 46600), 'numpy.where', 'np.where', (['(error.survey.inc_rad < error.survey.header.vertical_inc_limit)'], {}), '(error.survey.inc_rad < error.survey.header.vertical_inc_limit)\n', (46537, 46600), True, 'import numpy as np\n'), ((47888, 47961), 'numpy.where', 'np.where', (['(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)'], {}), '(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)\n', (47896, 47961), True, 'import numpy as np\n'), ((50091, 50162), 'numpy.where', 'np.where', (['(error.survey.inc_rad < error.survey.header.vertical_inc_limit)'], {}), '(error.survey.inc_rad < error.survey.header.vertical_inc_limit)\n', (50099, 50162), True, 'import numpy as np\n'), ((55506, 55579), 'numpy.where', 'np.where', (['(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)'], {}), '(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)\n', (55514, 55579), True, 'import numpy as np\n'), ((57750, 57823), 'numpy.where', 'np.where', (['(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)'], {}), '(error.survey_rad[:, 1] < error.survey.header.vertical_inc_limit)\n', (57758, 57823), True, 'import numpy as np\n'), ((972, 1028), 'os.path.join', 'os.path.join', (['""""""', "*[PATH, 'tool_codes', f'{model}.yaml']"], {}), "('', *[PATH, 'tool_codes', f'{model}.yaml'])\n", (984, 1028), False, 'import os\n'), ((1240, 1269), 'collections.OrderedDict', 'OrderedDict', (["self.em['codes']"], {}), "(self.em['codes'])\n", (1251, 1269), False, 'from collections import OrderedDict\n'), ((6858, 6904), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (6869, 6904), True, 'import numpy as np\n'), ((7837, 7863), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (7845, 7863), True, 'import numpy as np\n'), ((8157, 8183), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (8165, 8183), True, 'import numpy as np\n'), ((10281, 10327), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (10292, 10327), True, 'import numpy as np\n'), ((11449, 11465), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (11462, 11465), True, 'import numpy as np\n'), ((12341, 12357), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (12354, 12357), True, 'import numpy as np\n'), ((13091, 13137), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (13102, 13137), True, 'import numpy as np\n'), ((14083, 14099), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (14096, 14099), True, 'import numpy as np\n'), ((15073, 15089), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (15086, 15089), True, 'import numpy as np\n'), ((15944, 15969), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (15947, 15969), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((15980, 16009), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (15983, 16009), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16826, 16833), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (16830, 16833), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17011, 17018), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (17015, 17018), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17348, 17355), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (17352, 17355), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20353, 20382), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (20356, 20382), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20678, 20703), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (20681, 20703), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((21398, 21424), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (21406, 21424), True, 'import numpy as np\n'), ((21792, 21817), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (21795, 21817), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((22155, 22180), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (22158, 22180), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((22952, 22998), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (22963, 22998), True, 'import numpy as np\n'), ((23869, 23885), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (23882, 23885), True, 'import numpy as np\n'), ((25826, 25856), 'numpy.zeros_like', 'np.zeros_like', (['error.survey.md'], {}), '(error.survey.md)\n', (25839, 25856), True, 'import numpy as np\n'), ((26510, 26540), 'numpy.zeros_like', 'np.zeros_like', (['error.survey.md'], {}), '(error.survey.md)\n', (26523, 26540), True, 'import numpy as np\n'), ((27226, 27256), 'numpy.zeros_like', 'np.zeros_like', (['error.survey.md'], {}), '(error.survey.md)\n', (27239, 27256), True, 'import numpy as np\n'), ((27942, 27972), 'numpy.zeros_like', 'np.zeros_like', (['error.survey.md'], {}), '(error.survey.md)\n', (27955, 27972), True, 'import numpy as np\n'), ((28821, 28851), 'numpy.zeros_like', 'np.zeros_like', (['error.survey.md'], {}), '(error.survey.md)\n', (28834, 28851), True, 'import numpy as np\n'), ((30099, 30145), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (30110, 30145), True, 'import numpy as np\n'), ((31953, 31999), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (31964, 31999), True, 'import numpy as np\n'), ((34573, 34602), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (34576, 34602), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((34969, 34998), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (34972, 34998), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((36739, 36746), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (36743, 36746), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((40471, 40500), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (40474, 40500), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((43854, 43884), 'numpy.cos', 'cos', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (43857, 43884), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((43900, 43946), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (43911, 43946), True, 'import numpy as np\n'), ((44501, 44557), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['double_delta_md'] * mag)"], {}), "(0.5 * error.drdp_sing['double_delta_md'] * mag)\n", (44509, 44557), True, 'import numpy as np\n'), ((44632, 44648), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (44645, 44648), True, 'import numpy as np\n'), ((44930, 44979), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['delta_md'] * mag)"], {}), "(0.5 * error.drdp_sing['delta_md'] * mag)\n", (44938, 44979), True, 'import numpy as np\n'), ((45047, 45063), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (45060, 45063), True, 'import numpy as np\n'), ((46050, 46096), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (46061, 46096), True, 'import numpy as np\n'), ((46775, 46795), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV'], {}), '(e_NEV)\n', (46788, 46795), True, 'import numpy as np\n'), ((46947, 46972), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV_star'], {}), '(e_NEV_star)\n', (46960, 46972), True, 'import numpy as np\n'), ((47468, 47498), 'numpy.sin', 'sin', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (47471, 47498), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((47508, 47554), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (47519, 47554), True, 'import numpy as np\n'), ((48189, 48245), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['double_delta_md'] * mag)"], {}), "(0.5 * error.drdp_sing['double_delta_md'] * mag)\n", (48197, 48245), True, 'import numpy as np\n'), ((48258, 48274), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (48271, 48274), True, 'import numpy as np\n'), ((48611, 48660), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['delta_md'] * mag)"], {}), "(0.5 * error.drdp_sing['delta_md'] * mag)\n", (48619, 48660), True, 'import numpy as np\n'), ((48673, 48689), 'numpy.zeros_like', 'np.zeros_like', (['n'], {}), '(n)\n', (48686, 48689), True, 'import numpy as np\n'), ((49665, 49711), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (49676, 49711), True, 'import numpy as np\n'), ((50550, 50570), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV'], {}), '(e_NEV)\n', (50563, 50570), True, 'import numpy as np\n'), ((50728, 50753), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV_star'], {}), '(e_NEV_star)\n', (50741, 50753), True, 'import numpy as np\n'), ((52714, 52748), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (52717, 52748), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((53439, 53473), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (53442, 53473), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((53904, 53938), 'numpy.sin', 'sin', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (53907, 53938), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((55007, 55053), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (55018, 55053), True, 'import numpy as np\n'), ((55754, 55774), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV'], {}), '(e_NEV)\n', (55767, 55774), True, 'import numpy as np\n'), ((56477, 56497), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV'], {}), '(e_NEV)\n', (56490, 56497), True, 'import numpy as np\n'), ((57201, 57247), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (57212, 57247), True, 'import numpy as np\n'), ((57998, 58018), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV'], {}), '(e_NEV)\n', (58011, 58018), True, 'import numpy as np\n'), ((58722, 58742), 'numpy.zeros_like', 'np.zeros_like', (['e_NEV'], {}), '(e_NEV)\n', (58735, 58742), True, 'import numpy as np\n'), ((1116, 1136), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (1130, 1136), False, 'import yaml\n'), ((8546, 8571), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (8549, 8571), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9163, 9188), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (9166, 9188), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9831, 9856), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (9834, 9856), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9983, 10012), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (9986, 10012), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16440, 16469), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (16443, 16469), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16756, 16781), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (16759, 16781), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16792, 16817), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (16795, 16817), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16973, 17002), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (16976, 17002), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17276, 17301), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (17279, 17301), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17312, 17337), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (17315, 17337), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17702, 17709), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (17706, 17709), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18241, 18270), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (18244, 18270), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18538, 18563), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (18541, 18563), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18574, 18599), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (18577, 18599), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((19773, 19798), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (19776, 19798), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20642, 20667), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (20645, 20667), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((21764, 21789), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (21767, 21789), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((25621, 25651), 'numpy.sin', 'sin', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (25624, 25651), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((26345, 26375), 'numpy.cos', 'cos', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (26348, 26375), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((30812, 30842), 'numpy.zeros_like', 'np.zeros_like', (['error.survey.md'], {}), '(error.survey.md)\n', (30825, 30842), True, 'import numpy as np\n'), ((32595, 32625), 'numpy.zeros_like', 'np.zeros_like', (['error.survey.md'], {}), '(error.survey.md)\n', (32608, 32625), True, 'import numpy as np\n'), ((33373, 33405), 'numpy.sqrt', 'np.sqrt', (['(temp[-2] ** 2 + u * mag)'], {}), '(temp[-2] ** 2 + u * mag)\n', (33380, 33405), True, 'import numpy as np\n'), ((33747, 33776), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (33750, 33776), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((33816, 33844), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (33819, 33844), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((34138, 34167), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (34141, 34167), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((34669, 34697), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (34672, 34697), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((35474, 35503), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (35477, 35503), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((35543, 35571), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (35546, 35571), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37009, 37038), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37012, 37038), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38529, 38536), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (38533, 38536), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38904, 38933), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (38907, 38933), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((41037, 41065), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (41040, 41065), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((41933, 41961), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (41936, 41961), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((42262, 42291), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (42265, 42291), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((42333, 42361), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (42336, 42361), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((42615, 42641), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (42623, 42641), True, 'import numpy as np\n'), ((42896, 42926), 'numpy.array', 'np.array', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (42904, 42926), True, 'import numpy as np\n'), ((43195, 43225), 'numpy.array', 'np.array', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (43203, 43225), True, 'import numpy as np\n'), ((49554, 49583), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (49557, 49583), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((49594, 49628), 'numpy.sin', 'sin', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (49597, 49628), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((51922, 51998), 'numpy.where', 'np.where', (['(error.survey.inc_rad[:-1] < error.survey.header.vertical_inc_limit)'], {}), '(error.survey.inc_rad[:-1] < error.survey.header.vertical_inc_limit)\n', (51930, 51998), True, 'import numpy as np\n'), ((52360, 52394), 'numpy.sin', 'sin', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (52363, 52394), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((53399, 53428), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (53402, 53428), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((53864, 53893), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (53867, 53893), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((54330, 54359), 'numpy.sin', 'sin', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (54333, 54359), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((57651, 57681), 'numpy.sin', 'sin', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (57654, 57681), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((2763, 2793), 'numpy.amax', 'np.amax', (['self.e.survey.inc_rad'], {}), '(self.e.survey.inc_rad)\n', (2770, 2793), True, 'import numpy as np\n'), ((8695, 8724), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (8698, 8724), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9281, 9310), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (9284, 9310), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9908, 9933), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (9911, 9933), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9944, 9972), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (9947, 9972), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((11528, 11544), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (11536, 11544), True, 'import numpy as np\n'), ((11562, 11590), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (11570, 11590), True, 'import numpy as np\n'), ((11608, 11624), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (11616, 11624), True, 'import numpy as np\n'), ((11844, 11877), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1]'], {}), '(error.survey.azi_true_rad[1])\n', (11847, 11877), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((12425, 12441), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (12433, 12441), True, 'import numpy as np\n'), ((12459, 12487), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (12467, 12487), True, 'import numpy as np\n'), ((12505, 12521), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (12513, 12521), True, 'import numpy as np\n'), ((12685, 12718), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1]'], {}), '(error.survey.azi_true_rad[1])\n', (12688, 12718), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((14162, 14178), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (14170, 14178), True, 'import numpy as np\n'), ((14196, 14224), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (14204, 14224), True, 'import numpy as np\n'), ((14242, 14258), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (14250, 14258), True, 'import numpy as np\n'), ((15157, 15173), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (15165, 15173), True, 'import numpy as np\n'), ((15191, 15219), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (15199, 15219), True, 'import numpy as np\n'), ((15237, 15253), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (15245, 15253), True, 'import numpy as np\n'), ((16409, 16437), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (16412, 16437), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16937, 16962), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (16940, 16962), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((19317, 19346), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (19320, 19346), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((19733, 19762), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (19736, 19762), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20151, 20177), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (20159, 20177), True, 'import numpy as np\n'), ((20266, 20294), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (20269, 20294), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20814, 20843), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (20817, 20843), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((23052, 23077), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (23055, 23077), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((23420, 23470), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['double_delta_md'])"], {}), "(0.5 * error.drdp_sing['double_delta_md'])\n", (23428, 23470), True, 'import numpy as np\n'), ((23652, 23702), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['double_delta_md'])"], {}), "(0.5 * error.drdp_sing['double_delta_md'])\n", (23660, 23702), True, 'import numpy as np\n'), ((23948, 23964), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (23956, 23964), True, 'import numpy as np\n'), ((23982, 24010), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (23990, 24010), True, 'import numpy as np\n'), ((24028, 24044), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (24036, 24044), True, 'import numpy as np\n'), ((24181, 24224), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['delta_md'])"], {}), "(0.5 * error.drdp_sing['delta_md'])\n", (24189, 24224), True, 'import numpy as np\n'), ((24406, 24449), 'numpy.array', 'np.array', (["(0.5 * error.drdp_sing['delta_md'])"], {}), "(0.5 * error.drdp_sing['delta_md'])\n", (24414, 24449), True, 'import numpy as np\n'), ((24671, 24687), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (24679, 24687), True, 'import numpy as np\n'), ((24705, 24733), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (24713, 24733), True, 'import numpy as np\n'), ((24751, 24767), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (24759, 24767), True, 'import numpy as np\n'), ((25781, 25806), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (25784, 25806), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((27033, 27063), 'numpy.cos', 'cos', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (27036, 27063), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((27066, 27091), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (27069, 27091), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((27749, 27779), 'numpy.sin', 'sin', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (27752, 27779), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((27782, 27807), 'numpy.tan', 'tan', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (27785, 27807), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((30299, 30312), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (30307, 30312), True, 'import numpy as np\n'), ((32153, 32166), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (32161, 32166), True, 'import numpy as np\n'), ((34102, 34127), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (34105, 34127), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((34224, 34252), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (34227, 34252), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((35065, 35093), 'numpy.cos', 'cos', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (35068, 35093), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((36496, 36525), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (36499, 36525), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37338, 37367), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37341, 37367), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37963, 37992), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37966, 37992), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38260, 38285), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (38263, 38285), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38296, 38325), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (38299, 38325), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39170, 39199), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (39173, 39199), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39858, 39887), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (39861, 39887), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((41374, 41403), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (41377, 41403), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((41428, 41453), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (41431, 41453), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((44711, 44727), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (44719, 44727), True, 'import numpy as np\n'), ((44745, 44773), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (44753, 44773), True, 'import numpy as np\n'), ((44791, 44807), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (44799, 44807), True, 'import numpy as np\n'), ((45131, 45147), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (45139, 45147), True, 'import numpy as np\n'), ((45165, 45193), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (45173, 45193), True, 'import numpy as np\n'), ((45211, 45227), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (45219, 45227), True, 'import numpy as np\n'), ((45600, 45638), 'numpy.sqrt', 'sqrt', (["(10 / error.drdp_sing['delta_md'])"], {}), "(10 / error.drdp_sing['delta_md'])\n", (45604, 45638), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((45749, 45803), 'numpy.sqrt', 'sqrt', (['(10 / (error.survey.md[-1] - error.survey.md[-2]))'], {}), '(10 / (error.survey.md[-1] - error.survey.md[-2]))\n', (45753, 45803), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((45939, 45968), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (45942, 45968), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((45979, 46013), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (45982, 46013), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((46267, 46296), 'numpy.sin', 'sin', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (46270, 46296), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((48337, 48353), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (48345, 48353), True, 'import numpy as np\n'), ((48371, 48399), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (48379, 48399), True, 'import numpy as np\n'), ((48417, 48433), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (48425, 48433), True, 'import numpy as np\n'), ((48757, 48773), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (48765, 48773), True, 'import numpy as np\n'), ((48791, 48819), 'numpy.stack', 'np.stack', (['(n, e, v)'], {'axis': '(-1)'}), '((n, e, v), axis=-1)\n', (48799, 48819), True, 'import numpy as np\n'), ((48837, 48853), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (48845, 48853), True, 'import numpy as np\n'), ((49226, 49264), 'numpy.sqrt', 'sqrt', (["(10 / error.drdp_sing['delta_md'])"], {}), "(10 / error.drdp_sing['delta_md'])\n", (49230, 49264), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((49375, 49429), 'numpy.sqrt', 'sqrt', (['(10 / (error.survey.md[-1] - error.survey.md[-2]))'], {}), '(10 / (error.survey.md[-1] - error.survey.md[-2]))\n', (49379, 49429), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((51703, 51732), 'numpy.sin', 'sin', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (51706, 51732), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((54680, 54735), 'numpy.sqrt', 'sqrt', (['(10 / (error.survey.md[1:] - error.survey.md[:-1]))'], {}), '(10 / (error.survey.md[1:] - error.survey.md[:-1]))\n', (54684, 54735), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((54872, 54901), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (54875, 54901), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((54912, 54946), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (54915, 54946), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((57039, 57094), 'numpy.sqrt', 'sqrt', (['(10 / (error.survey.md[1:] - error.survey.md[:-1]))'], {}), '(10 / (error.survey.md[1:] - error.survey.md[:-1]))\n', (57043, 57094), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((57591, 57616), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (57594, 57616), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((8623, 8648), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (8626, 8648), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((8659, 8684), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (8662, 8684), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((8749, 8777), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (8752, 8777), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((8792, 8817), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (8795, 8817), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((8832, 8857), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (8835, 8857), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((8860, 8889), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (8863, 8889), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9240, 9265), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (9243, 9265), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9335, 9363), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (9338, 9363), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9378, 9403), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (9381, 9403), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9418, 9443), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (9421, 9443), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((9446, 9475), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (9449, 9475), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((13227, 13264), 'numpy.tan', 'tan', (['(-error.survey_rad[:, 1] + pi / 2)'], {}), '(-error.survey_rad[:, 1] + pi / 2)\n', (13230, 13264), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((14568, 14601), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1]'], {}), '(error.survey.azi_true_rad[1])\n', (14571, 14601), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((15499, 15532), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1]'], {}), '(error.survey.azi_true_rad[1])\n', (15502, 15532), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16276, 16302), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (16284, 16302), True, 'import numpy as np\n'), ((16861, 16886), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (16864, 16886), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17467, 17496), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (17470, 17496), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18003, 18029), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (18011, 18029), True, 'import numpy as np\n'), ((18048, 18074), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (18056, 18074), True, 'import numpy as np\n'), ((18723, 18752), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (18726, 18752), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((19286, 19314), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (19289, 19314), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((19361, 19387), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (19369, 19387), True, 'import numpy as np\n'), ((19657, 19685), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (19660, 19685), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((19696, 19721), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (19699, 19721), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20103, 20129), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (20111, 20129), True, 'import numpy as np\n'), ((20309, 20335), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (20317, 20335), True, 'import numpy as np\n'), ((20737, 20762), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (20740, 20762), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20868, 20896), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (20871, 20896), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20911, 20936), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (20914, 20936), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20951, 20976), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (20954, 20976), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20991, 21020), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (20994, 21020), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((26449, 26489), 'numpy.radians', 'np.radians', (['error.survey.header.latitude'], {}), '(error.survey.header.latitude)\n', (26459, 26489), True, 'import numpy as np\n'), ((27165, 27205), 'numpy.radians', 'np.radians', (['error.survey.header.latitude'], {}), '(error.survey.header.latitude)\n', (27175, 27205), True, 'import numpy as np\n'), ((27881, 27921), 'numpy.radians', 'np.radians', (['error.survey.header.latitude'], {}), '(error.survey.header.latitude)\n', (27891, 27921), True, 'import numpy as np\n'), ((28762, 28787), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (28765, 28787), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((35857, 35886), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (35860, 35886), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((36550, 36578), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (36553, 36578), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((36697, 36726), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (36700, 36726), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37724, 37753), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37727, 37753), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37844, 37873), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37847, 37873), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37884, 37912), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (37887, 37912), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38350, 38378), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (38353, 38378), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38393, 38418), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (38396, 38418), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38433, 38458), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (38436, 38458), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((38473, 38502), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (38476, 38502), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39597, 39622), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (39600, 39622), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39686, 39711), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (39689, 39711), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39775, 39803), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (39778, 39803), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39818, 39843), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (39821, 39843), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((40435, 40461), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (40443, 40461), True, 'import numpy as np\n'), ((41338, 41363), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (41341, 41363), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((41551, 41580), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (41554, 41580), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((43809, 43835), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (43817, 43835), True, 'import numpy as np\n'), ((46214, 46248), 'numpy.sin', 'sin', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (46217, 46248), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((47426, 47452), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (47434, 47452), True, 'import numpy as np\n'), ((47694, 47724), 'numpy.cos', 'cos', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (47697, 47724), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((49911, 49940), 'numpy.sin', 'sin', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (49914, 49940), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((2269, 2284), 'numpy.radians', 'np.radians', (['(1.0)'], {}), '(1.0)\n', (2279, 2284), True, 'import numpy as np\n'), ((6955, 6980), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (6958, 6980), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((7000, 7029), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (7003, 7029), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((10521, 10555), 'numpy.tan', 'tan', (['(pi / 2 - error.survey.inc_rad)'], {}), '(pi / 2 - error.survey.inc_rad)\n', (10524, 10555), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((11364, 11392), 'numpy.cos', 'cos', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (11367, 11392), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((12256, 12284), 'numpy.cos', 'cos', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (12259, 12284), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((13289, 13317), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (13292, 13317), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((13340, 13369), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (13343, 13369), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((13998, 14026), 'numpy.cos', 'cos', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (14001, 14026), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((14988, 15016), 'numpy.cos', 'cos', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (14991, 15016), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((16365, 16391), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (16373, 16391), True, 'import numpy as np\n'), ((16898, 16926), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (16901, 16926), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17390, 17415), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (17393, 17415), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17521, 17549), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (17524, 17549), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17564, 17589), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (17567, 17589), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17604, 17629), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (17607, 17629), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17644, 17673), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (17647, 17673), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18162, 18190), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (18165, 18190), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18197, 18223), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (18205, 18223), True, 'import numpy as np\n'), ((18646, 18671), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (18649, 18671), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18777, 18805), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (18780, 18805), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18820, 18845), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (18823, 18845), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18860, 18885), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (18863, 18885), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18888, 18917), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (18891, 18917), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((20222, 20248), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (20230, 20248), True, 'import numpy as np\n'), ((20773, 20798), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (20776, 20798), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((25725, 25765), 'numpy.radians', 'np.radians', (['error.survey.header.latitude'], {}), '(error.survey.header.latitude)\n', (25735, 25765), True, 'import numpy as np\n'), ((30599, 30662), 'numpy.sin', 'sin', (['((error.survey.inc_rad[1:] + error.survey.inc_rad[:-1]) / 2)'], {}), '((error.survey.inc_rad[1:] + error.survey.inc_rad[:-1]) / 2)\n', (30602, 30662), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((33703, 33729), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (33711, 33729), True, 'import numpy as np\n'), ((35430, 35456), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (35438, 35456), True, 'import numpy as np\n'), ((35821, 35846), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (35824, 35846), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((35911, 35939), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (35914, 35939), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((35954, 35979), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (35957, 35979), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((35994, 36019), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (35997, 36019), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((36034, 36063), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (36037, 36063), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((36452, 36478), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (36460, 36478), True, 'import numpy as np\n'), ((37306, 37335), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37309, 37335), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37692, 37721), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37695, 37721), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37812, 37841), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (37815, 37841), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39041, 39066), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (39044, 39066), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39126, 39155), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (39129, 39155), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39637, 39666), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (39640, 39666), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39726, 39755), 'numpy.sin', 'sin', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (39729, 39755), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((40314, 40343), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (40317, 40343), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((40354, 40382), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (40357, 40382), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((41468, 41496), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (41471, 41496), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((41511, 41536), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (41514, 41536), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((42218, 42244), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (42226, 42244), True, 'import numpy as np\n'), ((44084, 44114), 'numpy.sin', 'sin', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (44087, 44114), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((44135, 44161), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (44143, 44161), True, 'import numpy as np\n'), ((47757, 47783), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (47765, 47783), True, 'import numpy as np\n'), ((49802, 49831), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (49805, 49831), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((49854, 49888), 'numpy.cos', 'cos', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (49857, 49888), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((54061, 54126), 'numpy.absolute', 'np.absolute', (['(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])'], {}), '(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])\n', (54072, 54126), True, 'import numpy as np\n'), ((55244, 55278), 'numpy.sin', 'sin', (['error.survey.azi_true_rad[1:]'], {}), '(error.survey.azi_true_rad[1:])\n', (55247, 55278), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((55301, 55330), 'numpy.sin', 'sin', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (55304, 55330), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((57419, 57444), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (57422, 57444), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((10418, 10446), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (10421, 10446), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((10469, 10498), 'numpy.cos', 'cos', (['error.survey.azi_mag_rad'], {}), '(error.survey.azi_mag_rad)\n', (10472, 10498), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((11202, 11230), 'numpy.sin', 'sin', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (11205, 11230), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((12097, 12125), 'numpy.sin', 'sin', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (12100, 12125), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((13836, 13864), 'numpy.sin', 'sin', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (13839, 13864), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((14829, 14857), 'numpy.sin', 'sin', (["error.drdp_sing['azi2']"], {}), "(error.drdp_sing['azi2'])\n", (14832, 14857), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((17426, 17451), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (17429, 17451), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((18117, 18143), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (18125, 18143), True, 'import numpy as np\n'), ((18682, 18707), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (18685, 18707), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((19242, 19268), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (19250, 19268), True, 'import numpy as np\n'), ((28702, 28742), 'numpy.radians', 'np.radians', (['error.survey.header.latitude'], {}), '(error.survey.header.latitude)\n', (28712, 28742), True, 'import numpy as np\n'), ((32411, 32474), 'numpy.sin', 'sin', (['((error.survey.inc_rad[1:] + error.survey.inc_rad[:-1]) / 2)'], {}), '((error.survey.inc_rad[1:] + error.survey.inc_rad[:-1]) / 2)\n', (32414, 32474), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((36597, 36623), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (36605, 36623), True, 'import numpy as np\n'), ((36649, 36675), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (36657, 36675), True, 'import numpy as np\n'), ((37055, 37083), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (37058, 37083), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37919, 37945), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (37927, 37945), True, 'import numpy as np\n'), ((38958, 38986), 'numpy.tan', 'tan', (['error.survey.header.dip'], {}), '(error.survey.header.dip)\n', (38961, 38986), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39001, 39026), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (39004, 39026), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((39081, 39106), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (39084, 39106), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((46165, 46194), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (46168, 46194), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((53131, 53196), 'numpy.absolute', 'np.absolute', (['(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])'], {}), '(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])\n', (53142, 53196), True, 'import numpy as np\n'), ((53596, 53661), 'numpy.absolute', 'np.absolute', (['(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])'], {}), '(error.survey.inc_rad[1:] - error.survey.inc_rad[:-1])\n', (53607, 53661), True, 'import numpy as np\n'), ((55156, 55185), 'numpy.cos', 'cos', (['error.survey.inc_rad[1:]'], {}), '(error.survey.inc_rad[1:])\n', (55159, 55185), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((57326, 57351), 'numpy.cos', 'cos', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (57329, 57351), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((57370, 57400), 'numpy.cos', 'cos', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (57373, 57400), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((28518, 28548), 'numpy.cos', 'cos', (['error.survey.azi_true_rad'], {}), '(error.survey.azi_true_rad)\n', (28521, 28548), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((28572, 28597), 'numpy.sin', 'sin', (['error.survey.inc_rad'], {}), '(error.survey.inc_rad)\n', (28575, 28597), False, 'from numpy import sin, cos, tan, pi, sqrt\n'), ((37154, 37180), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (37162, 37180), True, 'import numpy as np\n'), ((37648, 37674), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (37656, 37674), True, 'import numpy as np\n'), ((37768, 37794), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (37776, 37794), True, 'import numpy as np\n'), ((40270, 40296), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (40278, 40296), True, 'import numpy as np\n'), ((40389, 40415), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (40397, 40415), True, 'import numpy as np\n'), ((47641, 47667), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (47649, 47667), True, 'import numpy as np\n'), ((37102, 37128), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (37110, 37128), True, 'import numpy as np\n'), ((37206, 37232), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (37214, 37232), True, 'import numpy as np\n'), ((37258, 37284), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (37266, 37284), True, 'import numpy as np\n'), ((44031, 44057), 'numpy.array', 'np.array', (['error.survey_rad'], {}), '(error.survey_rad)\n', (44039, 44057), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#
# Project: silx (originally pyFAI)
# https://github.com/silx-kit/silx
#
# Copyright (C) 2012-2017 European Synchrotron Radiation Facility, Grenoble, France
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "25/11/2020"
import unittest
import numpy
import logging
logger = logging.getLogger(__name__)
from ..bilinear import BilinearImage
class TestBilinear(unittest.TestCase):
"""basic maximum search test"""
N = 1000
def test_max_search_round(self):
"""test maximum search using random points: maximum is at the pixel center"""
a = numpy.arange(100) - 40.
b = numpy.arange(100) - 60.
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
self.assertAlmostEqual(b.maxi, 1, 2, "maxi is almost 1")
self.assertLess(b.mini, 0.3, "mini should be around 0.23")
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40) > 1e-4 or abs(l - 60) > 1e-4:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_max_search_half(self):
"""test maximum search using random points: maximum is at a pixel edge"""
a = numpy.arange(100) - 40.5
b = numpy.arange(100) - 60.5
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40.5) > 0.5 or abs(l - 60.5) > 0.5:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_map(self):
N = 6
y, x = numpy.ogrid[:N,:N + 10]
img = x + y
b = BilinearImage(img)
x2d = numpy.zeros_like(y) + x
y2d = numpy.zeros_like(x) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img).max(), 0, "images are the same (corners)")
x2d = numpy.zeros_like(y) + (x[:,:-1] + 0.5)
y2d = numpy.zeros_like(x[:,:-1]) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:,:-1] - 0.5).max(), 0, "images are the same (middle)")
x2d = numpy.zeros_like(y[:-1,:]) + (x[:,:-1] + 0.5)
y2d = numpy.zeros_like(x[:,:-1]) + (y[:-1,:] + 0.5)
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:-1, 1:]).max(), 0, "images are the same (center)")
def test_mask_grad(self):
N = 100
img = numpy.arange(N * N).reshape(N, N)
# No mask on the boundaries, makes the test complicated, pixel always separated
masked = 2 * numpy.random.randint(0, int((N - 1) / 2), size=(2, N)) + 1
mask = numpy.zeros((N, N), dtype=numpy.uint8)
mask[(masked[0], masked[1])] = 1
self.assertLessEqual(mask.sum(), N, "At most N pixels are masked")
b = BilinearImage(img, mask=mask)
self.assertEqual(b.has_mask, True, "interpolator has mask")
self.assertEqual(b.maxi, N * N - 1, "maxi is N²-1")
self.assertEqual(b.mini, 0, "mini is 0")
y, x = numpy.ogrid[:N,:N]
x2d = numpy.zeros_like(y) + x
y2d = numpy.zeros_like(x) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(numpy.nanmax(abs(res1 - img)), 0, "images are the same (corners), or Nan ")
x2d = numpy.zeros_like(y) + (x[:,:-1] + 0.5)
y2d = numpy.zeros_like(x[:,:-1]) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertLessEqual(numpy.max(abs(res1 - img[:, 1:] + 1 / 2.)), 0.5, "images are the same (middle) +/- 0.5")
x2d = numpy.zeros_like(y[:-1]) + (x[:,:-1] + 0.5)
y2d = numpy.zeros_like(x[:,:-1]) + (y[:-1] + 0.5)
res1 = b.map_coordinates((y2d, x2d))
exp = 0.25 * (img[:-1,:-1] + img[:-1, 1:] + img[1:,:-1] + img[1:, 1:])
self.assertLessEqual(abs(res1 - exp).max(), N / 4, "images are almost the same (center)")
def test_profile_grad(self):
N = 100
img = numpy.arange(N * N).reshape(N, N)
b = BilinearImage(img)
res1 = b.profile_line((0, 0), (N - 1, N - 1))
l = numpy.ceil(numpy.sqrt(2) * N)
self.assertEqual(len(res1), l, "Profile has correct length")
self.assertLess((res1[:-2] - res1[1:-1]).std(), 1e-3, "profile is linear (excluding last point)")
def test_profile_gaus(self):
N = 100
x = numpy.arange(N) - N // 2.0
g = numpy.exp(-x * x / (N * N))
img = numpy.outer(g, g)
b = BilinearImage(img)
res_hor = b.profile_line((N // 2, 0), (N // 2, N - 1))
res_ver = b.profile_line((0, N // 2), (N - 1, N // 2))
self.assertEqual(len(res_hor), N, "Profile has correct length")
self.assertEqual(len(res_ver), N, "Profile has correct length")
self.assertLess(abs(res_hor - g).max(), 1e-5, "correct horizontal profile")
self.assertLess(abs(res_ver - g).max(), 1e-5, "correct vertical profile")
# Profile with linewidth=3
expected_profile = img[:, N // 2 - 1:N // 2 + 2].mean(axis=1)
res_hor = b.profile_line((N // 2, 0), (N // 2, N - 1), linewidth=3)
res_ver = b.profile_line((0, N // 2), (N - 1, N // 2), linewidth=3)
self.assertEqual(len(res_hor), N, "Profile has correct length")
self.assertEqual(len(res_ver), N, "Profile has correct length")
self.assertLess(abs(res_hor - expected_profile).max(), 1e-5,
"correct horizontal profile")
self.assertLess(abs(res_ver - expected_profile).max(), 1e-5,
"correct vertical profile")
def suite():
testsuite = unittest.TestSuite()
testsuite.addTest(TestBilinear("test_max_search_round"))
testsuite.addTest(TestBilinear("test_max_search_half"))
testsuite.addTest(TestBilinear("test_map"))
testsuite.addTest(TestBilinear("test_profile_grad"))
testsuite.addTest(TestBilinear("test_profile_gaus"))
testsuite.addTest(TestBilinear("test_mask_grad"))
return testsuite
|
[
"numpy.outer",
"numpy.zeros_like",
"unittest.TestSuite",
"numpy.zeros",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"logging.getLogger",
"numpy.sqrt"
] |
[((1380, 1407), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1397, 1407), False, 'import logging\n'), ((7507, 7527), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (7525, 7527), False, 'import unittest\n'), ((1744, 1768), 'numpy.exp', 'numpy.exp', (['(-a * a / 4000)'], {}), '(-a * a / 4000)\n', (1753, 1768), False, 'import numpy\n'), ((1782, 1806), 'numpy.exp', 'numpy.exp', (['(-b * b / 6000)'], {}), '(-b * b / 6000)\n', (1791, 1806), False, 'import numpy\n'), ((1820, 1839), 'numpy.outer', 'numpy.outer', (['ga', 'gb'], {}), '(ga, gb)\n', (1831, 1839), False, 'import numpy\n'), ((2770, 2794), 'numpy.exp', 'numpy.exp', (['(-a * a / 4000)'], {}), '(-a * a / 4000)\n', (2779, 2794), False, 'import numpy\n'), ((2808, 2832), 'numpy.exp', 'numpy.exp', (['(-b * b / 6000)'], {}), '(-b * b / 6000)\n', (2817, 2832), False, 'import numpy\n'), ((2846, 2865), 'numpy.outer', 'numpy.outer', (['ga', 'gb'], {}), '(ga, gb)\n', (2857, 2865), False, 'import numpy\n'), ((4568, 4606), 'numpy.zeros', 'numpy.zeros', (['(N, N)'], {'dtype': 'numpy.uint8'}), '((N, N), dtype=numpy.uint8)\n', (4579, 4606), False, 'import numpy\n'), ((6302, 6329), 'numpy.exp', 'numpy.exp', (['(-x * x / (N * N))'], {}), '(-x * x / (N * N))\n', (6311, 6329), False, 'import numpy\n'), ((6344, 6361), 'numpy.outer', 'numpy.outer', (['g', 'g'], {}), '(g, g)\n', (6355, 6361), False, 'import numpy\n'), ((1671, 1688), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (1683, 1688), False, 'import numpy\n'), ((1707, 1724), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (1719, 1724), False, 'import numpy\n'), ((2695, 2712), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (2707, 2712), False, 'import numpy\n'), ((2732, 2749), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (2744, 2749), False, 'import numpy\n'), ((3601, 3620), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (3617, 3620), False, 'import numpy\n'), ((3639, 3658), 'numpy.zeros_like', 'numpy.zeros_like', (['x'], {}), '(x)\n', (3655, 3658), False, 'import numpy\n'), ((3807, 3826), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (3823, 3826), False, 'import numpy\n'), ((3860, 3887), 'numpy.zeros_like', 'numpy.zeros_like', (['x[:, :-1]'], {}), '(x[:, :-1])\n', (3876, 3887), False, 'import numpy\n'), ((4047, 4074), 'numpy.zeros_like', 'numpy.zeros_like', (['y[:-1, :]'], {}), '(y[:-1, :])\n', (4063, 4074), False, 'import numpy\n'), ((4107, 4134), 'numpy.zeros_like', 'numpy.zeros_like', (['x[:, :-1]'], {}), '(x[:, :-1])\n', (4123, 4134), False, 'import numpy\n'), ((4992, 5011), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (5008, 5011), False, 'import numpy\n'), ((5030, 5049), 'numpy.zeros_like', 'numpy.zeros_like', (['x'], {}), '(x)\n', (5046, 5049), False, 'import numpy\n'), ((5215, 5234), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (5231, 5234), False, 'import numpy\n'), ((5268, 5295), 'numpy.zeros_like', 'numpy.zeros_like', (['x[:, :-1]'], {}), '(x[:, :-1])\n', (5284, 5295), False, 'import numpy\n'), ((5477, 5501), 'numpy.zeros_like', 'numpy.zeros_like', (['y[:-1]'], {}), '(y[:-1])\n', (5493, 5501), False, 'import numpy\n'), ((5535, 5562), 'numpy.zeros_like', 'numpy.zeros_like', (['x[:, :-1]'], {}), '(x[:, :-1])\n', (5551, 5562), False, 'import numpy\n'), ((6263, 6278), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (6275, 6278), False, 'import numpy\n'), ((2070, 2095), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (2090, 2095), False, 'import numpy\n'), ((2097, 2122), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (2117, 2122), False, 'import numpy\n'), ((2962, 2987), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (2982, 2987), False, 'import numpy\n'), ((2989, 3014), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (3009, 3014), False, 'import numpy\n'), ((4351, 4370), 'numpy.arange', 'numpy.arange', (['(N * N)'], {}), '(N * N)\n', (4363, 4370), False, 'import numpy\n'), ((5865, 5884), 'numpy.arange', 'numpy.arange', (['(N * N)'], {}), '(N * N)\n', (5877, 5884), False, 'import numpy\n'), ((6007, 6020), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (6017, 6020), False, 'import numpy\n')]
|
"""
scatter plots of magnitudes vs various parameters... to see whether the EPD
linear fitting approach works.
"""
import numpy as np, matplotlib.pyplot as plt, pandas as pd
from glob import glob
import os
from astropy.io import fits
from plot_mag_vs_EPD_parameters import get_data
import seaborn as sns
from scipy.interpolate import splprep, splev
from numpy import array as nparr
def make_pairplots(lcpaths, lcdatalist, magtype='IRM1', frac_of_lc=0.4):
# make pairplots of X,Y,T,MAG,S,D,K, and just X,Y,T,MAG
# frac_of_lc: for fitting purposes, we want orbit-specific data.
xcols = ['FSV','FDV','FKV','XIC','YIC','CCDTEMP',magtype]
xkeys = ['<KEY>',magtype]
for lcpath, lc in zip(lcpaths, lcdatalist):
savdir = '../results/bspline_fit_xyTmag/'
savname = '{}_frac{:.1f}_pairplot_{}.png'.format(
magtype, frac_of_lc, os.path.splitext(os.path.basename(lcpath))[0])
savpath = os.path.join(savdir, savname)
if os.path.exists(savpath):
print('found {}, continue'.format(savpath))
continue
time = lc['TMID_UTC']
timeind = np.argsort(time)
d = {}
for xcol, k in zip(xcols, xkeys):
# pandas/seaborn wants little-endian
le_array = lc[xcol].byteswap().newbyteorder()
# sort everything by time
time_sorted_arr = le_array[timeind]
# take the cut so that you deal with orbit-specific data, if
# desired
d[k] = time_sorted_arr[:int(frac_of_lc*len(time_sorted_arr))]
df = pd.DataFrame(d)
if np.all(pd.isnull(df[magtype])):
print('mags are all NaN for {}, continue'.format(savname))
continue
# PLOT X,Y,T,MAG,S,D,K
plt.close('all')
g = sns.PairGrid(df)
g = g.map_diag(plt.hist)
g = g.map_offdiag(plt.scatter, rasterized=True, s=10, alpha=0.8)
plt.savefig(savpath, bbox_inches='tight', dpi=400)
print('made {}'.format(savpath))
# PLOT SUBSET: ONLY X,Y,T,MAG
savname = '{}_frac{:.1f}_pairplot_xyTmag_{}.png'.format(
magtype, frac_of_lc, os.path.splitext(os.path.basename(lcpath))[0])
savpath = os.path.join(savdir, savname)
if os.path.exists(savpath):
print('found {}, continue'.format(savpath))
continue
plt.close('all')
g = sns.PairGrid(df, vars=['x','y','T',magtype])
g = g.map_diag(plt.hist)
g = g.map_offdiag(plt.scatter, rasterized=True, s=10, alpha=0.8)
plt.savefig(savpath, bbox_inches='tight', dpi=400)
print('made {}'.format(savpath))
def do_bspline_fit_xyTmag(lcpaths, lcdatalist, magtype='IRM1', frac_of_lc=0.4,
isffi=True):
if not isffi:
raise AssertionError('windowsize for polling std assumes is ffi')
# spline parameters
s_initial = 1.0 # smoothness parameter
korder = 3 # spline order
nest = -1 # estimate of number of knots needed (-1 = maximal)
# NOTE: here "s" is a hyperparameter. We want to tune it, by
# cross-validation. (Not BIC/chi-squared, which doesn't seem to be
# well-defined here.)
magerrtype = magtype.replace('M','E')
xcols = ['XIC','YIC','CCDTEMP',magtype,magerrtype]
xkeys = ['x','y','T',magtype,magerrtype]
for lcpath, lc in zip(lcpaths, lcdatalist):
time = lc['TMID_UTC']
timeind = np.argsort(time)
d = {}
for xcol, k in zip(xcols, xkeys):
# pandas/seaborn wants little-endian
le_array = lc[xcol].byteswap().newbyteorder()
# sort everything by time
time_sorted_arr = le_array[timeind]
# take the cut so that you fit orbit-specific data
d[k] = time_sorted_arr[:int(frac_of_lc*len(time_sorted_arr))]
df = pd.DataFrame(d)
if np.all(pd.isnull(df[magtype])):
print('mags are all NaN for {}, continue'.format(savname))
continue
savdir = '../results/bspline_fit_xyTmag/'
savname = '{}_frac{:.1f}_scatterfit_xval_xyTmag_{}.png'.format(
magtype, frac_of_lc, os.path.splitext(os.path.basename(lcpath))[0])
savpath = os.path.join(savdir, savname)
if os.path.exists(savpath):
print('found {}, continue'.format(savpath))
continue
# find the knot points
vec_x_list = [nparr(df['x']), nparr(df['y']), nparr(df['T']),
nparr(df[magtype])]
ndim = len(vec_x_list)
# NOTE omitting any weight vector (seems to be OK).
# # to get the weight vector, estimate uncertainty in x,y,T,mag as
# # 1-sigma standard deviation from 6-hr timescale window. Then weights =
# # 1/standard deviation.
# w = []
# for _x in x:
# windowsize = 12 # 6 hour timescale = 12 time points for FFIs.
# _w = pd.rolling_std(_x, windowsize)
# w.append(1/(np.ones_like(_x)*np.nanmean(_w)))
# Find B-spline representation of N-dimensional curve. Assumption is
# that the list of time-series vectors represent a curve in
# N-dimensional space parametrized by u, for u in [0,1]. We are trying
# to find a smooth approximating spline curve g(u). This function wraps
# the PARCUR routine from FITPACK, a FORTRAN library. Written by Paul
# Dierckx. PARCUR is for fitting of parametric open curves. (e.g.,
# Dierckx, "Curve and surface fitting with splines", (1993, pg 111, sec
# 6.3.1). The method, and statement of the optimization problem, are
# equations 6.46 and 6.47 of the above reference.
# The smoothing parameter s must be positive.
s_grid = np.logspace(-3,-1,num=5)
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score, explained_variance_score
spld = {}
r_sq_means, expl_var_means, tckp_d = [], [], {}
for s in s_grid[::-1]: #FIXME
# split data into k subsets
n_splits = 4 # = k
X = np.atleast_2d(vec_x_list).T
n_data = X.shape[0]
kf = KFold(n_splits=n_splits)
r_sq_list, expl_var_list = [], []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
# supposedly necessary transformation for splprep to run
X_train_list = [ X_train[:,0],X_train[:,1],X_train[:,2],X_train[:,3] ]
X_test_list = [ X_test[:,0],X_test[:,1],X_test[:,2],X_test[:,3] ]
mag_true = X_test[:,3]
(tckp_train, u_train), metric, ier, msg = splprep(X_train_list,
s=s,
k=korder,
nest=-1,
task=0,
full_output=1)
# tckp[0]: knots as a function of u
# tckp[1]: knots as a function of (x,y,T,mag)
# tckp[2]: order
train_knots = tckp_train[0]
# get coefficients for the TEST data, using the set of knots
# determined for the TRAINING data.
import IPython; IPython.embed() #FIXME: does this work?
(tckp_test, u_test), metric_test, ier_test, msg_test = (
splprep(X_test_list, s=s, k=korder, t=train_knots, task=-1,
full_output=1)
)
# tckp contains information about the knots. answers: what are the
# spline fit coefficients? _where_ are the knots? (doesn't need to
# be x,y,T,mag). what order is the spline?
x_pred, y_pred, T_pred, mag_pred = splev(u_test, tckp_test)
r_sq_list.append(r2_score(mag_true, mag_pred))
expl_var_list.append(explained_variance_score(mag_true, mag_pred))
import IPython; IPython.embed() #FIXME: does this work?
r_sq_means.append( np.mean(r_sq_list) )
expl_var_means.append( np.mean(expl_var_list) )
# SEPARATELY, get the spline fit coefficient for the entire
# dataset. (using whatever knots are found to be best. this is the
# same number, since it's the same s as above).
(tckp_full, u_full), _, _, _ = (
splprep(vec_x_list, s=s, k=korder, nest=-1, task=0,
full_output=1)
)
tckp_d[s] = {}
tckp_d[s]['tckp_full'] = tckp_full
tckp_d[s]['u_full'] = u_full
tckp_d[s]['xval_rsq'] = np.mean(r_sq_list)
tckp_d[s]['xval_explvar'] = np.mean(expl_var_list)
r_sq_means = nparr(r_sq_means)
expl_var_means = nparr(expl_var_means)
best_s_from_r_sq = s_grid[ np.argmax(r_sq_means) ]
best_s_from_explvar = s_grid[ np.argmax(expl_var_means) ]
newd_grid = {}
for s in s_grid:
newd_grid[s] = {}
tckp = spld[s]['tckp_full']
# evaluate b-spline along the full interval u=[0,1]. use the knots
# and coefficients from the b-spline fits.
xnew, ynew, Tnew, magnew = splev(np.linspace(0,1,400), tckp)
newd_grid[s]['x'] = xnew
newd_grid[s]['y'] = ynew
newd_grid[s]['T'] = Tnew
newd_grid[s][magtype] = magnew
newd_grid[s]['n_knots'] = len(tckp[1])
newd_grid[s]['n_data'] = len(nparr(df['x']))
# 3 by 3 triangle plot
plt.close('all')
fig, axs = plt.subplots(nrows=3, ncols=3, figsize=(6,6))
axs = axs.flatten()
noplotinds = nparr([2,3,6])-1
plotinds = nparr([1,4,5,7,8,9])-1
xvals=['x','x','y','x','y','T']
yvals=['y','T','T',magtype,magtype,magtype]
noxlabelinds = nparr([1,4,5])-1
noylabelinds = nparr([5,8,9])-1
for ind, xval, yval in zip(plotinds, xvals, yvals):
ax = axs[ind]
ax.scatter(df[xval], df[yval], rasterized=True, label='data',
alpha=0.8, zorder=5, c='k', lw=0, s=3)
for s in s_grid:
labelstr = ('s={:.1e}; got {:d} knots; {:d} points; '
'xval $R^2$ {:.2f}; xval explvar {:.2f}' )
label = labelstr.format(s, spld[s]['n_knots'],
spld[s]['n_data'],
tckp_d[s]['xval_rsq'],
tckp_d[s]['xval_explvar'])
ax.plot(newd_grid[s][xval], newd_grid[s][yval], label=label,
lw=1, markersize=0, zorder=6, alpha=0.6)
if ind==0:
ax.legend(bbox_to_anchor=(0.95,0.95), loc='upper right',
bbox_transform=fig.transFigure, fontsize='xx-small',
framealpha=1)
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
if ind not in noxlabelinds:
ax.set_xlabel(xval, fontsize='xx-small')
if ind not in noylabelinds:
ax.set_ylabel(yval, fontsize='xx-small')
if ind in noxlabelinds:
ax.set_xticklabels([])
if ind in noylabelinds:
ax.set_yticklabels([])
for ind in noplotinds:
ax = axs[ind]
ax.axis('off')
fig.tight_layout(h_pad=-2, w_pad=-2)
fig.savefig(savpath, dpi=400, bbox_inches='tight')
print('made {}'.format(savpath))
if __name__=="__main__":
np.random.seed(42)
n_lcs = 10
lcpaths, lcdatalist = get_data(n_lcs=n_lcs)
make_pairplots(lcpaths, lcdatalist, magtype='IRM1', frac_of_lc=1.0)
make_pairplots(lcpaths, lcdatalist, magtype='IRM1', frac_of_lc=0.4)
do_bspline_fit_xyTmag(lcpaths, lcdatalist)
|
[
"numpy.random.seed",
"numpy.argmax",
"numpy.logspace",
"sklearn.metrics.r2_score",
"numpy.argsort",
"numpy.mean",
"seaborn.PairGrid",
"os.path.join",
"numpy.atleast_2d",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"os.path.exists",
"IPython.embed",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"os.path.basename",
"plot_mag_vs_EPD_parameters.get_data",
"sklearn.metrics.explained_variance_score",
"pandas.isnull",
"sklearn.model_selection.KFold",
"scipy.interpolate.splprep",
"numpy.array",
"scipy.interpolate.splev",
"matplotlib.pyplot.savefig"
] |
[((11965, 11983), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (11979, 11983), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((12027, 12048), 'plot_mag_vs_EPD_parameters.get_data', 'get_data', ([], {'n_lcs': 'n_lcs'}), '(n_lcs=n_lcs)\n', (12035, 12048), False, 'from plot_mag_vs_EPD_parameters import get_data\n'), ((939, 968), 'os.path.join', 'os.path.join', (['savdir', 'savname'], {}), '(savdir, savname)\n', (951, 968), False, 'import os\n'), ((981, 1004), 'os.path.exists', 'os.path.exists', (['savpath'], {}), '(savpath)\n', (995, 1004), False, 'import os\n'), ((1132, 1148), 'numpy.argsort', 'np.argsort', (['time'], {}), '(time)\n', (1142, 1148), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((1582, 1597), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (1594, 1597), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((1774, 1790), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1783, 1790), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((1804, 1820), 'seaborn.PairGrid', 'sns.PairGrid', (['df'], {}), '(df)\n', (1816, 1820), True, 'import seaborn as sns\n'), ((1936, 1986), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savpath'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "(savpath, bbox_inches='tight', dpi=400)\n", (1947, 1986), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((2230, 2259), 'os.path.join', 'os.path.join', (['savdir', 'savname'], {}), '(savdir, savname)\n', (2242, 2259), False, 'import os\n'), ((2271, 2294), 'os.path.exists', 'os.path.exists', (['savpath'], {}), '(savpath)\n', (2285, 2294), False, 'import os\n'), ((2382, 2398), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2391, 2398), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((2412, 2459), 'seaborn.PairGrid', 'sns.PairGrid', (['df'], {'vars': "['x', 'y', 'T', magtype]"}), "(df, vars=['x', 'y', 'T', magtype])\n", (2424, 2459), True, 'import seaborn as sns\n'), ((2572, 2622), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savpath'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "(savpath, bbox_inches='tight', dpi=400)\n", (2583, 2622), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((3445, 3461), 'numpy.argsort', 'np.argsort', (['time'], {}), '(time)\n', (3455, 3461), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((3863, 3878), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3875, 3878), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((4236, 4265), 'os.path.join', 'os.path.join', (['savdir', 'savname'], {}), '(savdir, savname)\n', (4248, 4265), False, 'import os\n'), ((4278, 4301), 'os.path.exists', 'os.path.exists', (['savpath'], {}), '(savpath)\n', (4292, 4301), False, 'import os\n'), ((5777, 5803), 'numpy.logspace', 'np.logspace', (['(-3)', '(-1)'], {'num': '(5)'}), '(-3, -1, num=5)\n', (5788, 5803), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((9015, 9032), 'numpy.array', 'nparr', (['r_sq_means'], {}), '(r_sq_means)\n', (9020, 9032), True, 'from numpy import array as nparr\n'), ((9058, 9079), 'numpy.array', 'nparr', (['expl_var_means'], {}), '(expl_var_means)\n', (9063, 9079), True, 'from numpy import array as nparr\n'), ((9838, 9854), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9847, 9854), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((9875, 9921), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(3)', 'figsize': '(6, 6)'}), '(nrows=3, ncols=3, figsize=(6, 6))\n', (9887, 9921), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((1617, 1639), 'pandas.isnull', 'pd.isnull', (['df[magtype]'], {}), '(df[magtype])\n', (1626, 1639), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((3898, 3920), 'pandas.isnull', 'pd.isnull', (['df[magtype]'], {}), '(df[magtype])\n', (3907, 3920), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((4434, 4448), 'numpy.array', 'nparr', (["df['x']"], {}), "(df['x'])\n", (4439, 4448), True, 'from numpy import array as nparr\n'), ((4450, 4464), 'numpy.array', 'nparr', (["df['y']"], {}), "(df['y'])\n", (4455, 4464), True, 'from numpy import array as nparr\n'), ((4466, 4480), 'numpy.array', 'nparr', (["df['T']"], {}), "(df['T'])\n", (4471, 4480), True, 'from numpy import array as nparr\n'), ((4499, 4517), 'numpy.array', 'nparr', (['df[magtype]'], {}), '(df[magtype])\n', (4504, 4517), True, 'from numpy import array as nparr\n'), ((6204, 6228), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (6209, 6228), False, 'from sklearn.model_selection import KFold\n'), ((8654, 8720), 'scipy.interpolate.splprep', 'splprep', (['vec_x_list'], {'s': 's', 'k': 'korder', 'nest': '(-1)', 'task': '(0)', 'full_output': '(1)'}), '(vec_x_list, s=s, k=korder, nest=-1, task=0, full_output=1)\n', (8661, 8720), False, 'from scipy.interpolate import splprep, splev\n'), ((8911, 8929), 'numpy.mean', 'np.mean', (['r_sq_list'], {}), '(r_sq_list)\n', (8918, 8929), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((8970, 8992), 'numpy.mean', 'np.mean', (['expl_var_list'], {}), '(expl_var_list)\n', (8977, 8992), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((9116, 9137), 'numpy.argmax', 'np.argmax', (['r_sq_means'], {}), '(r_sq_means)\n', (9125, 9137), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((9178, 9203), 'numpy.argmax', 'np.argmax', (['expl_var_means'], {}), '(expl_var_means)\n', (9187, 9203), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((9971, 9987), 'numpy.array', 'nparr', (['[2, 3, 6]'], {}), '([2, 3, 6])\n', (9976, 9987), True, 'from numpy import array as nparr\n'), ((10007, 10032), 'numpy.array', 'nparr', (['[1, 4, 5, 7, 8, 9]'], {}), '([1, 4, 5, 7, 8, 9])\n', (10012, 10032), True, 'from numpy import array as nparr\n'), ((10146, 10162), 'numpy.array', 'nparr', (['[1, 4, 5]'], {}), '([1, 4, 5])\n', (10151, 10162), True, 'from numpy import array as nparr\n'), ((10186, 10202), 'numpy.array', 'nparr', (['[5, 8, 9]'], {}), '([5, 8, 9])\n', (10191, 10202), True, 'from numpy import array as nparr\n'), ((6125, 6150), 'numpy.atleast_2d', 'np.atleast_2d', (['vec_x_list'], {}), '(vec_x_list)\n', (6138, 6150), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((6739, 6807), 'scipy.interpolate.splprep', 'splprep', (['X_train_list'], {'s': 's', 'k': 'korder', 'nest': '(-1)', 'task': '(0)', 'full_output': '(1)'}), '(X_train_list, s=s, k=korder, nest=-1, task=0, full_output=1)\n', (6746, 6807), False, 'from scipy.interpolate import splprep, splev\n'), ((7492, 7507), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (7505, 7507), False, 'import IPython\n'), ((7625, 7699), 'scipy.interpolate.splprep', 'splprep', (['X_test_list'], {'s': 's', 'k': 'korder', 't': 'train_knots', 'task': '(-1)', 'full_output': '(1)'}), '(X_test_list, s=s, k=korder, t=train_knots, task=-1, full_output=1)\n', (7632, 7699), False, 'from scipy.interpolate import splprep, splev\n'), ((8023, 8047), 'scipy.interpolate.splev', 'splev', (['u_test', 'tckp_test'], {}), '(u_test, tckp_test)\n', (8028, 8047), False, 'from scipy.interpolate import splprep, splev\n'), ((8228, 8243), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (8241, 8243), False, 'import IPython\n'), ((8300, 8318), 'numpy.mean', 'np.mean', (['r_sq_list'], {}), '(r_sq_list)\n', (8307, 8318), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((8356, 8378), 'numpy.mean', 'np.mean', (['expl_var_list'], {}), '(expl_var_list)\n', (8363, 8378), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((9506, 9528), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(400)'], {}), '(0, 1, 400)\n', (9517, 9528), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd\n'), ((9782, 9796), 'numpy.array', 'nparr', (["df['x']"], {}), "(df['x'])\n", (9787, 9796), True, 'from numpy import array as nparr\n'), ((891, 915), 'os.path.basename', 'os.path.basename', (['lcpath'], {}), '(lcpath)\n', (907, 915), False, 'import os\n'), ((2182, 2206), 'os.path.basename', 'os.path.basename', (['lcpath'], {}), '(lcpath)\n', (2198, 2206), False, 'import os\n'), ((4188, 4212), 'os.path.basename', 'os.path.basename', (['lcpath'], {}), '(lcpath)\n', (4204, 4212), False, 'import os\n'), ((8082, 8110), 'sklearn.metrics.r2_score', 'r2_score', (['mag_true', 'mag_pred'], {}), '(mag_true, mag_pred)\n', (8090, 8110), False, 'from sklearn.metrics import r2_score, explained_variance_score\n'), ((8149, 8193), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['mag_true', 'mag_pred'], {}), '(mag_true, mag_pred)\n', (8173, 8193), False, 'from sklearn.metrics import r2_score, explained_variance_score\n')]
|
import cv2
import numpy as np
from scipy import misc
i = misc.ascent()
import matplotlib.pyplot as plt
plt.grid(False)
i_transformed = np.copy(i)
print(i_transformed.shape)
size_x = i_transformed.shape[0]
size_y = i_transformed.shape[1]
print(i_transformed.shape)
filter = [ [-1, -2, -1], [0, 0, 0], [1, 2, 1]]
weight = 1
for x in range(1,size_x-1):
for y in range(1,size_y-1):
output_pixel = 0.0
output_pixel = output_pixel + (i[x - 1, y-1] * filter[0][0])
output_pixel = output_pixel + (i[x, y-1] * filter[0][1])
output_pixel = output_pixel + (i[x + 1, y-1] * filter[0][2])
output_pixel = output_pixel + (i[x-1, y] * filter[1][0])
output_pixel = output_pixel + (i[x, y] * filter[1][1])
output_pixel = output_pixel + (i[x+1, y] * filter[1][2])
output_pixel = output_pixel + (i[x-1, y+1] * filter[2][0])
output_pixel = output_pixel + (i[x, y+1] * filter[2][1])
output_pixel = output_pixel + (i[x+1, y+1] * filter[2][2])
output_pixel = output_pixel * weight
if(output_pixel<0):
output_pixel=0
if(output_pixel>255):
output_pixel=255
i_transformed[x, y] = output_pixel
plt.gray()
plt.axis('off')
plt.imshow(i_transformed)
plt.show()
#pooling, taking the most signficiant contributor to the image within a scaled down data
# new_x = int(size_x/2)
# new_y = int(size_y/2)
# newImage = np.zeros((new_x, new_y))
# for x in range(0, size_x, 2):
# for y in range(0, size_y, 2):
# pixels = []
# pixels.append(i_transformed[x, y])
# pixels.append(i_transformed[x+1, y])
# pixels.append(i_transformed[x, y+1])
# pixels.append(i_transformed[x+1, y+1])
# pixels.sort(reverse=True)
# newImage[int(x/2),int(y/2)] = pixels[0]
# # Plot the image. Note the size of the axes -- now 256 pixels instead of 512
# plt.gray()
# plt.grid(False)
# plt.imshow(newImage)
# #plt.axis('off')
# plt.show()
|
[
"matplotlib.pyplot.gray",
"matplotlib.pyplot.show",
"numpy.copy",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"scipy.misc.ascent",
"matplotlib.pyplot.grid"
] |
[((57, 70), 'scipy.misc.ascent', 'misc.ascent', ([], {}), '()\n', (68, 70), False, 'from scipy import misc\n'), ((104, 119), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (112, 119), True, 'import matplotlib.pyplot as plt\n'), ((137, 147), 'numpy.copy', 'np.copy', (['i'], {}), '(i)\n', (144, 147), True, 'import numpy as np\n'), ((1173, 1183), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (1181, 1183), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1199), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1192, 1199), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1225), 'matplotlib.pyplot.imshow', 'plt.imshow', (['i_transformed'], {}), '(i_transformed)\n', (1210, 1225), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1234, 1236), True, 'import matplotlib.pyplot as plt\n')]
|
"""
:author:
<NAME> (<EMAIL>)
2021
:License:
This package is published under Simplified BSD License.
"""
from array import array
import numpy as np
FORMAT = {1: "b", 2: "h", 4: "i"}
def pad(data, max_len, type):
if type == "Silence":
return pad_with_silence(data, max_len)
elif type == "Data":
return pad_with_data(data, max_len)
else:
return pad_with_noise(data, max_len)
def pad_with_silence(data, max_len):
to_add = max(max_len - len(data), 0)
padded = np.pad(data, (0, to_add), mode='constant', constant_values=0)
return padded
def pad_with_data(data, max_len):
to_add = max(max_len - len(data), 0)
padded = np.zeros(shape=(max_len,), dtype="int16")
if to_add:
repeat = int(max_len / len(data))
rest = max_len % len(data)
for i in range(repeat):
start = i * len(data)
end = (i+1) * len(data)
padded[start:end] = data[:]
# padded[repeat*len(data):] = data[:rest]
pad_with_silence(padded, max_len)
return padded
return data
def pad_with_noise(data, max_len):
print("padding with noise not implemented yet... padding with silence")
return pad_with_silence(data, max_len)
def separate_channels(data, fmt, channels):
all_channels = array(fmt, data)
mono_channels = [
array(fmt, all_channels[ch::channels]) for ch in range(channels)
]
return mono_channels
def to_array(data, sample_width, channels):
fmt = FORMAT[sample_width]
if channels == 1:
return np.array(array(fmt, data))
return separate_channels(data, fmt, channels)
|
[
"numpy.pad",
"numpy.zeros",
"array.array"
] |
[((480, 541), 'numpy.pad', 'np.pad', (['data', '(0, to_add)'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(data, (0, to_add), mode='constant', constant_values=0)\n", (486, 541), True, 'import numpy as np\n'), ((641, 682), 'numpy.zeros', 'np.zeros', ([], {'shape': '(max_len,)', 'dtype': '"""int16"""'}), "(shape=(max_len,), dtype='int16')\n", (649, 682), True, 'import numpy as np\n'), ((1190, 1206), 'array.array', 'array', (['fmt', 'data'], {}), '(fmt, data)\n', (1195, 1206), False, 'from array import array\n'), ((1228, 1266), 'array.array', 'array', (['fmt', 'all_channels[ch::channels]'], {}), '(fmt, all_channels[ch::channels])\n', (1233, 1266), False, 'from array import array\n'), ((1429, 1445), 'array.array', 'array', (['fmt', 'data'], {}), '(fmt, data)\n', (1434, 1445), False, 'from array import array\n')]
|
'''
cmbmap.py
Plot intensity skymap from Planck, and get power spectrum.
NOTE that I have not been able to install healpy in Canopy on Windows :(
'''
import numpy as np
import matplotlib.pyplot as plt
# from pylab import *
import healpy as hp
# here are a bunch of different maps; the last one seems to work best
# http://healpy.readthedocs.org/en/latest/tutorial.html
# http://irsa.ipac.caltech.edu/data/Planck/release_1/all-sky-maps/previews/COM_CompMap_CMB-commrul_2048_R1.00/index.html
# fn = '/Users/ajw/Downloads/COM_CompMap_CMB-commrul_2048_R1.00.fits'
# http://irsa.ipac.caltech.edu/data/Planck/release_1/all-sky-maps/previews/COM_CompMap_CMB-smica_2048_R1.20/index.html
# fn = '/Users/ajw/Downloads/COM_CompMap_CMB-smica_2048_R1.20.fits'
# http://irsa.ipac.caltech.edu/data/Planck/release_1/all-sky-maps/previews/COM_CompMap_CMB-nilc_2048_R1.20/index.html
fn = '/Users/ajw/Downloads/COM_CompMap_CMB-nilc_2048_R1.20.fits'
map_I = hp.read_map(fn)
# plot the Planck T map
#figure()
hp.mollview(map_I, coord='G', title='Planck R1, Histogram equalized Galactic', unit='mK', norm='hist')
#hp.graticule(dpar=30,dmer=30)
# analyze the map, get Cl power spectrum:
LMAX = 2048
cl = hp.anafast(map_I, lmax=LMAX)
#hp.write_cl('cl_Planck.fits', cl)
ell = np.arange(len(cl))
ecl = ell * (ell+1) * cl/(2.*np.pi)
# Get the Planck fit result: http://pla.esac.esa.int/pla/aio/planckResults.jsp?
# http://irsa.ipac.caltech.edu/data/Planck/release_1/ancillary-data/previews/COM_PowerSpect_CMB_R1.10/index.html
fn = 'COM_PowerSpect_CMB_R1.10.txt'
f = open(fn)
lines = f.readlines()
f.close()
# extract low-ell and high-ell power spectra
a = np.genfromtxt(lines[10:58],delimiter=",").transpose()
b = np.genfromtxt(lines[68:],delimiter=",").transpose()
# I don't know how to get the 2nd (high-ell) table from thre fits file
#fn = 'COM_PowerSpect_CMB_R1.10.fits'
#cls = hp.read_cl(fn)
# "fit" from CAMB: http://lambda.gsfc.nasa.gov/toolbox/tb_camb_form.cfm
fn = 'test_scalCls.dat'
fn = 'camb_53319956_scalcls.dat.txt'
f = open(fn)
lines = f.readlines()
f.close()
c = np.genfromtxt(lines).transpose()
# here we see a significant unexplained discrepancy mid-ell.
plt.figure()
ax = plt.subplot()
plt.plot(ell,ecl,'k',label='anafast fit')
plt.errorbar(a[0],a[1],yerr=a[3],fmt='bo',label='COM_PowerSpect_CMB_R1.10')
plt.errorbar(b[0],b[3],xerr=b[2]-b[0],yerr=b[4],fmt='bo',label='COM_PowerSpect_CMB_R1.10')
plt.plot(c[0],c[1],'r',label='camb_53319956_scalcls')
#ax.set_xscale("log", nonposx='clip')
plt.xlim([2.,2500.])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$\ell(\ell+1) C_\ell/2\pi$')
plt.title('Data from Planck, prediction from CAMB')
plt.legend()
plt.grid()
# part of the discrepancy come from masking, so reanalyze map with mask:
mask = hp.read_map('HFI_PowerSpect_Mask_2048_R1.10.fits').astype(np.bool)
map_I_masked = hp.ma(map_I)
map_I_masked.mask = np.logical_not(mask)
clm = hp.anafast(map_I_masked, lmax=LMAX)
eclm = ell * (ell+1) * clm/(2.*np.pi) * 2.5 # note the norm fudge-factor; should come from the mask
# plot results:
plt.figure()
ax = plt.subplot()
plt.plot(ell,eclm,'k',label='anafast fit - masked')
plt.errorbar(a[0],a[1],yerr=a[3],fmt='bo',label='COM_PowerSpect_CMB_R1.10')
plt.errorbar(b[0],b[3],xerr=b[2]-b[0],yerr=b[4],fmt='bo',label='COM_PowerSpect_CMB_R1.10')
plt.plot(c[0],c[1],'r',label='camb_53319956_scalcls')
#ax.set_xscale("log", nonposx='clip')
plt.xlim([2.,2500.])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$\ell(\ell+1) C_\ell/2\pi$')
plt.title('Data from Planck - masked')
plt.legend()
plt.grid()
plt.show()
'''
# for fun, make a simulated map
nside = hp.get_nside(pl)
smap = hp.synfast(cl,nside)
hp.mollview(smap, coord='G', title='simulated data', unit='mK', norm='hist')
hp.graticule(dpar=30,dmer=30)
LMAX = 2048
cl1 = hp.anafast(smap, lmax=LMAX)
ell = arange(len(cl))
ecl1 = ell * (ell+1) * cl1/(2.*np.pi)
plt.figure()
ax = plt.subplot()
plt.plot(ell,ecl,'b',label="into to synfast")
plt.plot(ell,ecl1,'r',label="output from synfast")
plt.xlim([2.,2500.])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$\ell(\ell+1) C_\ell/2\pi$')
plt.legend()
plt.grid()
plt.show()
'''
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"healpy.mollview",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.logical_not",
"matplotlib.pyplot.ylabel",
"numpy.genfromtxt",
"healpy.ma",
"matplotlib.pyplot.figure",
"healpy.read_map",
"healpy.anafast",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.errorbar"
] |
[((942, 957), 'healpy.read_map', 'hp.read_map', (['fn'], {}), '(fn)\n', (953, 957), True, 'import healpy as hp\n'), ((993, 1100), 'healpy.mollview', 'hp.mollview', (['map_I'], {'coord': '"""G"""', 'title': '"""Planck R1, Histogram equalized Galactic"""', 'unit': '"""mK"""', 'norm': '"""hist"""'}), "(map_I, coord='G', title=\n 'Planck R1, Histogram equalized Galactic', unit='mK', norm='hist')\n", (1004, 1100), True, 'import healpy as hp\n'), ((1187, 1215), 'healpy.anafast', 'hp.anafast', (['map_I'], {'lmax': 'LMAX'}), '(map_I, lmax=LMAX)\n', (1197, 1215), True, 'import healpy as hp\n'), ((2157, 2169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2167, 2169), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2188), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (2186, 2188), True, 'import matplotlib.pyplot as plt\n'), ((2189, 2233), 'matplotlib.pyplot.plot', 'plt.plot', (['ell', 'ecl', '"""k"""'], {'label': '"""anafast fit"""'}), "(ell, ecl, 'k', label='anafast fit')\n", (2197, 2233), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2310), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['a[0]', 'a[1]'], {'yerr': 'a[3]', 'fmt': '"""bo"""', 'label': '"""COM_PowerSpect_CMB_R1.10"""'}), "(a[0], a[1], yerr=a[3], fmt='bo', label='COM_PowerSpect_CMB_R1.10')\n", (2243, 2310), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2409), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['b[0]', 'b[3]'], {'xerr': '(b[2] - b[0])', 'yerr': 'b[4]', 'fmt': '"""bo"""', 'label': '"""COM_PowerSpect_CMB_R1.10"""'}), "(b[0], b[3], xerr=b[2] - b[0], yerr=b[4], fmt='bo', label=\n 'COM_PowerSpect_CMB_R1.10')\n", (2319, 2409), True, 'import matplotlib.pyplot as plt\n'), ((2398, 2454), 'matplotlib.pyplot.plot', 'plt.plot', (['c[0]', 'c[1]', '"""r"""'], {'label': '"""camb_53319956_scalcls"""'}), "(c[0], c[1], 'r', label='camb_53319956_scalcls')\n", (2406, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2513), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[2.0, 2500.0]'], {}), '([2.0, 2500.0])\n', (2498, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\ell$"""'], {}), "('$\\\\ell$')\n", (2521, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2577), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\ell(\\\\ell+1) C_\\\\ell/2\\\\pi$"""'], {}), "('$\\\\ell(\\\\ell+1) C_\\\\ell/2\\\\pi$')\n", (2543, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2575, 2626), 'matplotlib.pyplot.title', 'plt.title', (['"""Data from Planck, prediction from CAMB"""'], {}), "('Data from Planck, prediction from CAMB')\n", (2584, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2639), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2637, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2640, 2650), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2648, 2650), True, 'import matplotlib.pyplot as plt\n'), ((2814, 2826), 'healpy.ma', 'hp.ma', (['map_I'], {}), '(map_I)\n', (2819, 2826), True, 'import healpy as hp\n'), ((2847, 2867), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (2861, 2867), True, 'import numpy as np\n'), ((2874, 2909), 'healpy.anafast', 'hp.anafast', (['map_I_masked'], {'lmax': 'LMAX'}), '(map_I_masked, lmax=LMAX)\n', (2884, 2909), True, 'import healpy as hp\n'), ((3029, 3041), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3039, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3060), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (3058, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3061, 3115), 'matplotlib.pyplot.plot', 'plt.plot', (['ell', 'eclm', '"""k"""'], {'label': '"""anafast fit - masked"""'}), "(ell, eclm, 'k', label='anafast fit - masked')\n", (3069, 3115), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3192), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['a[0]', 'a[1]'], {'yerr': 'a[3]', 'fmt': '"""bo"""', 'label': '"""COM_PowerSpect_CMB_R1.10"""'}), "(a[0], a[1], yerr=a[3], fmt='bo', label='COM_PowerSpect_CMB_R1.10')\n", (3125, 3192), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3291), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['b[0]', 'b[3]'], {'xerr': '(b[2] - b[0])', 'yerr': 'b[4]', 'fmt': '"""bo"""', 'label': '"""COM_PowerSpect_CMB_R1.10"""'}), "(b[0], b[3], xerr=b[2] - b[0], yerr=b[4], fmt='bo', label=\n 'COM_PowerSpect_CMB_R1.10')\n", (3201, 3291), True, 'import matplotlib.pyplot as plt\n'), ((3280, 3336), 'matplotlib.pyplot.plot', 'plt.plot', (['c[0]', 'c[1]', '"""r"""'], {'label': '"""camb_53319956_scalcls"""'}), "(c[0], c[1], 'r', label='camb_53319956_scalcls')\n", (3288, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3395), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[2.0, 2500.0]'], {}), '([2.0, 2500.0])\n', (3380, 3395), True, 'import matplotlib.pyplot as plt\n'), ((3393, 3414), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\ell$"""'], {}), "('$\\\\ell$')\n", (3403, 3414), True, 'import matplotlib.pyplot as plt\n'), ((3415, 3459), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\ell(\\\\ell+1) C_\\\\ell/2\\\\pi$"""'], {}), "('$\\\\ell(\\\\ell+1) C_\\\\ell/2\\\\pi$')\n", (3425, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3495), 'matplotlib.pyplot.title', 'plt.title', (['"""Data from Planck - masked"""'], {}), "('Data from Planck - masked')\n", (3466, 3495), True, 'import matplotlib.pyplot as plt\n'), ((3496, 3508), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3506, 3508), True, 'import matplotlib.pyplot as plt\n'), ((3509, 3519), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3517, 3519), True, 'import matplotlib.pyplot as plt\n'), ((3520, 3530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3528, 3530), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1679), 'numpy.genfromtxt', 'np.genfromtxt', (['lines[10:58]'], {'delimiter': '""","""'}), "(lines[10:58], delimiter=',')\n", (1650, 1679), True, 'import numpy as np\n'), ((1695, 1735), 'numpy.genfromtxt', 'np.genfromtxt', (['lines[68:]'], {'delimiter': '""","""'}), "(lines[68:], delimiter=',')\n", (1708, 1735), True, 'import numpy as np\n'), ((2062, 2082), 'numpy.genfromtxt', 'np.genfromtxt', (['lines'], {}), '(lines)\n', (2075, 2082), True, 'import numpy as np\n'), ((2732, 2782), 'healpy.read_map', 'hp.read_map', (['"""HFI_PowerSpect_Mask_2048_R1.10.fits"""'], {}), "('HFI_PowerSpect_Mask_2048_R1.10.fits')\n", (2743, 2782), True, 'import healpy as hp\n')]
|
"""
Function:Feedforward Neural Network (base class)
Author:lzb
Date:2021.01.07
"""
import numpy as np
import time
from gl import errorcode
from gl.array_string import array_2_string
from activation.normal_activation import Sigmoid
from activation.last_hop_activation import DichotomyLHA
from loss.loss import MSELoss
"""
class:FNN 神经网络(base class)
特别说明:
语义上的 vector,在代码中,实际上是一个 [n, 1] 的 matrix
"""
class FNN:
# 神经网络输入样本,向量维度
_sx_dim = 0
# 神经网络输出样本,向量维度
_sy_dim = 0
# 神经网络层数
_layer_count = 0
# 每一层神经元的数量
_neuron_count_list = None
# 每一层 w 参数,w 是个 matrix(BP 网络) or 3维数组(卷积网络)
_w_layer = None
# 每一层 b 参数,b 是个 vector(BP 网络) or 2维数组(卷积网络)
_b_layer = None
# 每一层 w 参数的 shape list(除了卷积网络,这个参数没有意义)
_w_shape_layer = None
# 样本数量
_sample_count = 0
# 样本输入列表(Sample X list),每一个输入样本是一个 vector
_sx_list = None
# 样本输出列表(Sample Y list),每一个输出样本是一个 vector
_sy_list = None
# 循环训练的最大次数
_loop_max = 1
# 学习效率
_rate = 0
# 激活函数对象(class Activation 的实例)
_activation = Sigmoid()
# 最后一跳激活函数对象(class LastHopActivation 的实例)
_last_hop_activation = DichotomyLHA()
# 损失函数
_loss = MSELoss()
def __init__(self, activation=None, last_hop_activation=None, loss=None):
"""
构造函数
:param activation: 激活函数对象
:param last_hop_activation: 后一跳激活函数对象
:param loss: 损失函数对象
"""
if activation is not None:
self._activation = activation
if last_hop_activation is not None:
self._last_hop_activation = last_hop_activation
if loss is not None:
self._loss = loss
''''''
def train(self, sx_list, sy_list, loop_max, neuron_count_list, rate, w_shape_list=None):
"""
功能:神经网络训练\n
参数:\n
sx_list:训练样本输入列表\n
sy_list:训练样本输出列表\n
loop_max:循环训练的最大次数 \n
neuron_count_list:每一层神经元数量(对于卷积网络,这个参数没有意义)\n
rate:学习效率 \n
activation:激活函数对象\n
last_hop_activation:最后一跳激活函数对象\n
loss:损失函数对象\n
w_shape_list:每一层 w 参数的 shape list(除了卷积网络,这个参数没有意义)\n
返回值:错误码\n
"""
# 1. 成员变量赋值
self._sx_list = sx_list
self._sy_list = sy_list
self._loop_max = loop_max
self._rate = rate
# 如果是卷积网络,这个参数没有意义(如果是卷积网络,直接传入 None 即可)
self._neuron_count_list = neuron_count_list
# 如果不是卷积网络,这个参数,没有意义(如果不是卷积网络,直接传入默认值即可)
self._w_shape_layer = w_shape_list
# 2. 校验
err = self._valid()
if errorcode.SUCCESS != err:
print("\nvalid error, errcode = %d\n" % err)
return err
# 3. 初始化 w, b,及其他参数
self._init_other_para()
# 4. 训练
return self._train()
"""
功能:参数校验
参数:NULL
返回值:错误码
"""
def _valid(self):
# 1. 校验每层神经元
self._valid_layer_neuron()
# 2. 输入样本与输出样本
err = self._valid_sample()
if errorcode.SUCCESS != err:
return err
# 3. 最大循环训练次数,须 >= 1
if 1 > self._loop_max:
return errorcode.FAILED
return errorcode.SUCCESS
"""
功能:校验每层神经元
参数:NULL
返回值:错误码
"""
def _valid_layer_neuron(self):
# 1. 神经网络层数,须 >= 1
layer_count = len(self._neuron_count_list)
if 1 > layer_count:
return errorcode.FAILED
# 2. 每层的神经元个数,须 >= 1
for layer in range(0, layer_count):
count = self._neuron_count_list[layer]
if 1 > count:
return errorcode.FAILED
"""
功能:校验样本
参数:NULL
返回值:错误码
"""
def _valid_sample(self):
# 1 输入样本的数量与输出样本的数量,须相同
len1 = len(self._sx_list)
len2 = len(self._sy_list)
if len1 != len2:
return errorcode.FAILED
# 2 样本数量,须 >= 1
sample_count = len(self._sx_list)
if 1 > sample_count:
return errorcode.FAILED
# 3. 样本向量维度
# 输入向量维度
sx_dim = len(self._sx_list[0])
# 输出向量维度
layer_count = len(self._neuron_count_list)
sy_dim = self._neuron_count_list[layer_count - 1]
# 3.1 输入样本/输出样本,向量维度 > 1
if (1 > sx_dim) or (1 > sy_dim):
return errorcode.FAILED
# 3.2 每一个输入/输出样本的向量维度
for i in range(0, sample_count):
shape_in = self._sx_list[i].shape
shape_out = self._sy_list[i].shape
# 输入样本的向量维度
if shape_in[0] != sx_dim:
return errorcode.FAILED
# 输入样本只能有1列(因为是个向量)
if shape_in[1] != 1:
return errorcode.FAILED
# 输出样本的向量维度
if shape_out[0] != sy_dim:
return errorcode.FAILED
# 输出样本只能有1列(因为是个向量)
if shape_out[1] != 1:
return errorcode.FAILED
return errorcode.SUCCESS
"""
功能:初始化其它参数
参数:NULL
返回值:错误码
"""
def _init_other_para(self):
# 每一层 w、B 参数,w 是个2维数组,b 是个2维数组
self._w_layer = list()
self._b_layer = list()
# 样本数量
self._sample_count = len(self._sx_list)
# 神经网络输入,向量维度
self._sx_dim = len(self._sx_list[0])
# 神经网络的层数
self._layer_count = len(self._neuron_count_list)
# 神经网络输出,向量维度
self._sy_dim = self._neuron_count_list[self._layer_count - 1]
# 第1层 w 参数,w 是一个2维数组
w = np.random.random((self._neuron_count_list[0], self._sx_dim))
self._w_layer.append(w)
# 第2层~第layer-1层 w 参数,w 是一个2维数组
for i in range(1, self._layer_count):
w = np.random.random((self._neuron_count_list[i], self._neuron_count_list[i - 1]))
self._w_layer.append(w)
# 第1层 ~ 第layer-1层 b 参数,b 是一个向量
for i in range(0, self._layer_count):
b = np.zeros([self._neuron_count_list[i], 1])
self._b_layer.append(b)
return errorcode.SUCCESS
"""
功能:训练(protected 函数)
参数:NULL
返回值:错误码
"""
def _train(self):
# 循环训练次数
loop = 0
# 打印开始时间
localtime = time.asctime(time.localtime(time.time()))
print("\nbegin time = " + localtime + "\n")
while 1:
if loop >= self._loop_max:
# 打印结束时间
localtime = time.asctime(time.localtime(time.time()))
print("\nend time = " + localtime + "\n")
# 打印最后一轮参数
self._print_w_b_loop(loop)
break
# 1. 每一轮训练之前,预准备工作
self._pre_train()
loop = loop + 1
# 2. 训练每一个样本
for i in range(0, self._sample_count):
# 第 i 个训练样本
sx = self._sx_list[i]
sy = self._sy_list[i]
# 2.1 第 m 个训练样本,经过(多层)神经网络的计算
nn_y_list = self._calc_nn(sx)
# 2.2 最后一跳激活
nn_y = nn_y_list[len(nn_y_list) - 1]
last_hop_y = self._last_hop_activation.active_array(nn_y)
nn_y_list.append(last_hop_y)
# 2.3 根据计算结果,修正参数神经网络参数,比如:W,B
self._modify_fnn_para(nn_y_list, sx, sy)
return errorcode.SUCCESS
# 每一轮训练之前预准备工作
def _pre_train(self):
"""
每一轮训练之前预准备工作(一般来说,啥都不用做)
:return: NULL
"""
pass
# 计算整个网络的输出
def _calc_nn(self, sx):
"""
计算整个网络的输出
:param sx: 神经网络的输入
:return: 整个神经网络,每一层的输出
"""
x = sx
nn_y_list = list()
# 逐层计算
for layer in range(0, self._layer_count):
# 计算该层的输出
y = self._calc_layer(x, layer)
# 将该层的输出,记录下来
nn_y_list.append(y)
# 本层输出,等于下一层的输入
x = y
# 返回逐层计算的结果
return nn_y_list
''''''
def _calc_layer(self, x, layer):
"""
计算神经网络某一层的输出
:param x: 该层神经网络的输入,x 是一个向量
:param layer: 当前的层数
:return: y,该层神经网络的输出, y 是一个向量
"""
# 获取该层的参数:w, b
w = self._w_layer[layer]
b = self._b_layer[layer]
y = np.matmul(w, x) + b
y = y + self._calc_recurrent(layer)
# 针对每一个元素,调用激活函数
row = len(y)
for i in range(0, row):
y[i, 0] = self._activation.active(y[i, 0])
return y
''''''
def _calc_recurrent(self, layer):
"""
计算循环神经网络, u * h(t - 1) ,默认值是 0
:param layer: 层数
:return: u * h(t - 1)
"""
return 0
''''''
def _modify_fnn_para(self, nn_y_list, sx, sy):
"""
功能:修正 W,B
参数:
nn_y_list:神经网路计算的每一层结果,nn_y 是一个向量
sx:训练样本的输入,sx 是一个向量
sy:训练样本的输出,sy 是一个向量
返回值:NULL
"""
pass
''''''
def predict(self, sx_list, sy_list, revise_strong=False):
"""
功能:预测
参数:
sx_list:待预测的样本列表,其中 sx 是向量
返回值:预测结果
"""
count = len(sx_list)
py_list = list()
for i in range(0, count):
sx = sx_list[i]
nn_y_list = self._calc_nn(sx)
# 最后一层的 nn_y,才是神经网络的最终输出
nn_y = nn_y_list[len(nn_y_list) - 1]
# 最后一跳激活
lha_y = self._last_hop_activation.active_array(nn_y)
# 最后一跳修正
lhr_y = self._last_hop_activation.predict_revise(lha_y, revise_strong)
# 然后再添加到预测列表
py_list.append(lhr_y)
return py_list
"""
功能:打印 W, B, loop
参数:
loop:神经网络的训练次数
返回值:NULL
"""
def _print_w_b_loop(self, loop):
print("\n")
print("训练次数 = %d\n" % loop)
for layer in range(0, self._layer_count):
print("层数 = %d" % layer)
print("W:")
# print(self.W[layer])
print(array_2_string(self._w_layer[layer]))
print("\nB:")
# print(self.B[layer])
print(array_2_string(self._b_layer[layer]))
if layer < self._layer_count - 1:
print("\n")
"""
功能:桩函数,设置参数(不必训练,直接设置)
参数:
sx_dim:神经网络输入,向量维度
layer_count:神经网络层数
neuron_count_list:每一层神经元的数量(Neuron Count)
W:每一层 w 参数 列表,w 是个 matrix
B:每一层 b 参数 列表,b 是个 vector
返回值:NULL
"""
def stub_set_para(self, sx_dim, neuron_count_list, W, B, activation):
# 神经网络输入,向量维度
self._sx_dim = sx_dim
# 每一层神经元的数量(Neuron Count)
self._neuron_count_list = neuron_count_list
# 神经网络层数
self._layer_count = len(W)
# 每一层 w 参数,w 是个 matrix
self._w_layer = W
# 每一层 b 参数,b 是个 vector
self._b_layer = B
# 激活函数对象
self._activation = activation
|
[
"loss.loss.MSELoss",
"activation.normal_activation.Sigmoid",
"numpy.zeros",
"time.time",
"numpy.random.random",
"activation.last_hop_activation.DichotomyLHA",
"numpy.matmul",
"gl.array_string.array_2_string"
] |
[((1065, 1074), 'activation.normal_activation.Sigmoid', 'Sigmoid', ([], {}), '()\n', (1072, 1074), False, 'from activation.normal_activation import Sigmoid\n'), ((1149, 1163), 'activation.last_hop_activation.DichotomyLHA', 'DichotomyLHA', ([], {}), '()\n', (1161, 1163), False, 'from activation.last_hop_activation import DichotomyLHA\n'), ((1188, 1197), 'loss.loss.MSELoss', 'MSELoss', ([], {}), '()\n', (1195, 1197), False, 'from loss.loss import MSELoss\n'), ((5448, 5508), 'numpy.random.random', 'np.random.random', (['(self._neuron_count_list[0], self._sx_dim)'], {}), '((self._neuron_count_list[0], self._sx_dim))\n', (5464, 5508), True, 'import numpy as np\n'), ((5643, 5721), 'numpy.random.random', 'np.random.random', (['(self._neuron_count_list[i], self._neuron_count_list[i - 1])'], {}), '((self._neuron_count_list[i], self._neuron_count_list[i - 1]))\n', (5659, 5721), True, 'import numpy as np\n'), ((5860, 5901), 'numpy.zeros', 'np.zeros', (['[self._neuron_count_list[i], 1]'], {}), '([self._neuron_count_list[i], 1])\n', (5868, 5901), True, 'import numpy as np\n'), ((8149, 8164), 'numpy.matmul', 'np.matmul', (['w', 'x'], {}), '(w, x)\n', (8158, 8164), True, 'import numpy as np\n'), ((6163, 6174), 'time.time', 'time.time', ([], {}), '()\n', (6172, 6174), False, 'import time\n'), ((9844, 9880), 'gl.array_string.array_2_string', 'array_2_string', (['self._w_layer[layer]'], {}), '(self._w_layer[layer])\n', (9858, 9880), False, 'from gl.array_string import array_2_string\n'), ((9962, 9998), 'gl.array_string.array_2_string', 'array_2_string', (['self._b_layer[layer]'], {}), '(self._b_layer[layer])\n', (9976, 9998), False, 'from gl.array_string import array_2_string\n'), ((6367, 6378), 'time.time', 'time.time', ([], {}), '()\n', (6376, 6378), False, 'import time\n')]
|
#Pandas is used to open csv files and convert to lists
import pandas as pd
#Used for making plots
import matplotlib.pyplot as plt
#Library used to do maths
import numpy as np
#Load data to pandas
data = pd.read_csv('data.csv')
#Give time from data
time = data['Time']
light_values = data['Light']
#Use len function to find the number of values
number_of_readings = len(light_values)
print(number_of_readings)
#Adds all the values using the sum function numpy and then print it
sum_of_all_light_values = np.sum( light_values )
print(sum_of_all_light_values)
#Add all the number of readings then divide by the number of readings
mean_value_of_the_light = sum_of_all_light_values / number_of_readings
print(mean_value_of_the_light)
#We use np.full to repeat the mean value 968 times to be able to visulise them on the graph
mean_value_of_the_light = np.full(shape=number_of_readings, fill_value=mean_value_of_the_light)
#We plot the pressure value and label it
plt.plot( light_values, label="light")
#We plot the mean value and label it
plt.plot( mean_value_of_the_light, label="mean")
#Put title for the graph
plt.title("HAB-LAB: Lights Graph")
#Legend creates the box for the key for the graph
plt.legend()
#Labels the x-axis
plt.xlabel("Number of readings")
#Labels the y-axis
plt.ylabel("Light Intensity (KB)")
#Show the graph
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.full",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((213, 236), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (224, 236), True, 'import pandas as pd\n'), ((529, 549), 'numpy.sum', 'np.sum', (['light_values'], {}), '(light_values)\n', (535, 549), True, 'import numpy as np\n'), ((883, 952), 'numpy.full', 'np.full', ([], {'shape': 'number_of_readings', 'fill_value': 'mean_value_of_the_light'}), '(shape=number_of_readings, fill_value=mean_value_of_the_light)\n', (890, 952), True, 'import numpy as np\n'), ((999, 1036), 'matplotlib.pyplot.plot', 'plt.plot', (['light_values'], {'label': '"""light"""'}), "(light_values, label='light')\n", (1007, 1036), True, 'import matplotlib.pyplot as plt\n'), ((1078, 1125), 'matplotlib.pyplot.plot', 'plt.plot', (['mean_value_of_the_light'], {'label': '"""mean"""'}), "(mean_value_of_the_light, label='mean')\n", (1086, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1156, 1190), 'matplotlib.pyplot.title', 'plt.title', (['"""HAB-LAB: Lights Graph"""'], {}), "('HAB-LAB: Lights Graph')\n", (1165, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1255), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1253, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1277, 1309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of readings"""'], {}), "('Number of readings')\n", (1287, 1309), True, 'import matplotlib.pyplot as plt\n'), ((1331, 1365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Light Intensity (KB)"""'], {}), "('Light Intensity (KB)')\n", (1341, 1365), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1392, 1394), True, 'import matplotlib.pyplot as plt\n')]
|
from keras.models import load_model
from sklearn.externals import joblib
import numpy as np
from keras import backend as K
# Set learning phase (disable dropout)
K.set_learning_phase(0)
# Load model, char mapping, encoder, and raw text from disk
model = load_model('shakespeare/model_1-2.h5')
char_mapping = joblib.load('shakespeare/char_mapping.sav')
ohe = joblib.load('shakespeare/ohe.sav')
raw_text = open('shakespeare.txt').read().lower()
n_vocab = len(char_mapping)
# Create an inverse map of char mapping
reverse_mapping = {}
for x in char_mapping:
reverse_mapping[char_mapping[x]] = x
def select(output):
# Probabilistically determine output based on softmax output
from random import uniform
letter = uniform(0., 1.)
added = 0
for i in range(len(output)):
if added + output[i] >= letter:
return i
else:
added += output[i]
def generate_text(seed, steps):
seed = seed.lower()
print(seed, end='')
last = list(seed)
for i in range(steps):
# Get input sequence and encode it
input_seq = [char_mapping[char] for char in last]
input_seq = np.reshape(input_seq, (len(input_seq), 1))
input_seq = ohe.transform(input_seq).toarray()[:,:-1]
input_seq = np.expand_dims(input_seq, axis=0)
# Predict output character and add to input sequence
y = model.predict(input_seq).flatten()
y = select(y)
del last[0]
last.append(reverse_mapping[y])
print(reverse_mapping[y], end='')
# Select a random 100-character seed
start = np.random.randint(0, len(raw_text)-100)
seed = raw_text[start:start+100]
print('Seed: ')
print(seed)
print('-----------------------------')
# Predict 1000 characters
generate_text(seed, 1000)
|
[
"keras.models.load_model",
"random.uniform",
"numpy.expand_dims",
"keras.backend.set_learning_phase",
"sklearn.externals.joblib.load"
] |
[((163, 186), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(0)'], {}), '(0)\n', (183, 186), True, 'from keras import backend as K\n'), ((256, 294), 'keras.models.load_model', 'load_model', (['"""shakespeare/model_1-2.h5"""'], {}), "('shakespeare/model_1-2.h5')\n", (266, 294), False, 'from keras.models import load_model\n'), ((310, 353), 'sklearn.externals.joblib.load', 'joblib.load', (['"""shakespeare/char_mapping.sav"""'], {}), "('shakespeare/char_mapping.sav')\n", (321, 353), False, 'from sklearn.externals import joblib\n'), ((360, 394), 'sklearn.externals.joblib.load', 'joblib.load', (['"""shakespeare/ohe.sav"""'], {}), "('shakespeare/ohe.sav')\n", (371, 394), False, 'from sklearn.externals import joblib\n'), ((729, 746), 'random.uniform', 'uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (736, 746), False, 'from random import uniform\n'), ((1286, 1319), 'numpy.expand_dims', 'np.expand_dims', (['input_seq'], {'axis': '(0)'}), '(input_seq, axis=0)\n', (1300, 1319), True, 'import numpy as np\n')]
|
"""
Implements the Gradient aligned adversarial "subspace" (GAAS) of [tra17].
REFERENCES:
[tra17] Tramer et al. "The Space of Transferable Adversarial
Examples," arXiv 2017.
[gvl96] Golub and <NAME> "Matrix Computations" 1996.
"""
__author__ = "mjp"
__date__ = 'november, 2017'
import numpy as np
from numpy.linalg import norm
import unittest
import pdb
def gaas(g, k, sanity_check=True):
"""
g : the gradient of the loss (a tensor)
k : the GAAS dimensionality (a scalar)
Returns:
R : a dxk matrix of k orthogonal vectors satisfying the GAAS conditions
"""
tol = 1e-5 # our tolerance for error
g = g.flatten().astype(np.float64) # TF gives float32 and we want more precision here...
d = g.size
R = np.zeros((d,k)) # columns of R are the GAAS r_i
z = np.zeros((d,)); z[:k] = 1/np.sqrt(k); # this is z from proof of lemma 1 in [tra17]
#--------------------------------------------------
# SPECIAL CASE: if k is 1, just return the trivial result g / ||g||
#
#--------------------------------------------------
if k == 1:
R[:,0] = g / norm(g,2)
return R
v_s, beta_s = householder_vec(z)
v_r, beta_r = householder_vec(g)
#--------------------------------------------------
# To calculate the r_i we use:
#
# r_i := Q' e_i
# = R' S e_i
# = R S e_i
#
# where R = R' from the symmetry of Householder matrices
# (follows from symmetry of I and vv').
#--------------------------------------------------
for ii in range(k):
e_i = np.zeros((d,)); e_i[ii] = 1;
sei = apply_householder_to_vector(v_s, beta_s, e_i)
r_i = apply_householder_to_vector(v_r, beta_r, sei)
R[:,ii] = r_i
#--------------------------------------------------
# (optional) check the solution for correctness
#--------------------------------------------------
if sanity_check:
# the r_i should be orthonormal
RtR = np.dot(R.T, R)
assert(norm(RtR-np.eye(k,k), 'fro') < tol)
# make sure Qg = ||g||_2 z
#
# Note: the transpose on R below is because I stored
# the r_i as columns in R.
#
err = np.dot(R.T, g) - norm(g,2) * z[:k]
assert(norm(err,2) < tol)
# make sure <g,r_i> behaves as expected.
for ii in range(k):
gtr = np.dot(g, R[:,ii])
err = np.dot(g, r_i) - norm(g,2) / np.sqrt(k)
assert(abs(err) < tol)
return R
def householder_vec(x):
""" Returns elements needed to construct Householder
reflection matrix for the vector x, i.e.
H = I - \beta v v'
where H x = ||x||_2 e_1
See Algorithm 5.1.1 in Golub and Van Loan.
"""
n = x.size
v = np.ones((n,)); v[1:] = x[1:]
sigma = np.dot(x[1:], x[1:])
if sigma == 0:
beta = 0
else:
mu = np.sqrt(x[0] ** 2 + sigma)
if x[0] <= 0:
v[0] = x[0] - mu
else:
v[0] = -sigma / (x[0] + mu)
beta = 2 * (v[0] ** 2) / (sigma + v[0]**2)
v = v / v[0]
return v, beta
def apply_householder_to_vector(v, beta, x):
""" Computes Householder reflection of vector x.
Applying a Householder transformation H to a vector x
does not require the explict construction of H since
H x = (I - \beta vv') x = x - \beta v (v' x)
In particular, this avoids the deadly outer product vv'.
See also 5.1.4 in Golub and Van Loan.
"""
return x - beta * v * np.dot(v, x)
if __name__ == "__main__":
# example usage
for n_trials in range(10):
g = np.random.rand(300,1)
k = 20
R = gaas(g,k)
print('[info]: GAAS calculation looks ok!')
|
[
"numpy.eye",
"numpy.zeros",
"numpy.ones",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.dot",
"numpy.sqrt"
] |
[((805, 821), 'numpy.zeros', 'np.zeros', (['(d, k)'], {}), '((d, k))\n', (813, 821), True, 'import numpy as np\n'), ((883, 897), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (891, 897), True, 'import numpy as np\n'), ((2755, 2768), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (2762, 2768), True, 'import numpy as np\n'), ((2796, 2816), 'numpy.dot', 'np.dot', (['x[1:]', 'x[1:]'], {}), '(x[1:], x[1:])\n', (2802, 2816), True, 'import numpy as np\n'), ((910, 920), 'numpy.sqrt', 'np.sqrt', (['k'], {}), '(k)\n', (917, 920), True, 'import numpy as np\n'), ((1628, 1642), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (1636, 1642), True, 'import numpy as np\n'), ((2013, 2027), 'numpy.dot', 'np.dot', (['R.T', 'R'], {}), '(R.T, R)\n', (2019, 2027), True, 'import numpy as np\n'), ((2864, 2890), 'numpy.sqrt', 'np.sqrt', (['(x[0] ** 2 + sigma)'], {}), '(x[0] ** 2 + sigma)\n', (2871, 2890), True, 'import numpy as np\n'), ((3569, 3591), 'numpy.random.rand', 'np.random.rand', (['(300)', '(1)'], {}), '(300, 1)\n', (3583, 3591), True, 'import numpy as np\n'), ((1181, 1191), 'numpy.linalg.norm', 'norm', (['g', '(2)'], {}), '(g, 2)\n', (1185, 1191), False, 'from numpy.linalg import norm\n'), ((2226, 2240), 'numpy.dot', 'np.dot', (['R.T', 'g'], {}), '(R.T, g)\n', (2232, 2240), True, 'import numpy as np\n'), ((2272, 2284), 'numpy.linalg.norm', 'norm', (['err', '(2)'], {}), '(err, 2)\n', (2276, 2284), False, 'from numpy.linalg import norm\n'), ((2376, 2395), 'numpy.dot', 'np.dot', (['g', 'R[:, ii]'], {}), '(g, R[:, ii])\n', (2382, 2395), True, 'import numpy as np\n'), ((3471, 3483), 'numpy.dot', 'np.dot', (['v', 'x'], {}), '(v, x)\n', (3477, 3483), True, 'import numpy as np\n'), ((2243, 2253), 'numpy.linalg.norm', 'norm', (['g', '(2)'], {}), '(g, 2)\n', (2247, 2253), False, 'from numpy.linalg import norm\n'), ((2409, 2423), 'numpy.dot', 'np.dot', (['g', 'r_i'], {}), '(g, r_i)\n', (2415, 2423), True, 'import numpy as np\n'), ((2048, 2060), 'numpy.eye', 'np.eye', (['k', 'k'], {}), '(k, k)\n', (2054, 2060), True, 'import numpy as np\n'), ((2426, 2436), 'numpy.linalg.norm', 'norm', (['g', '(2)'], {}), '(g, 2)\n', (2430, 2436), False, 'from numpy.linalg import norm\n'), ((2438, 2448), 'numpy.sqrt', 'np.sqrt', (['k'], {}), '(k)\n', (2445, 2448), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 21:55:54 2020
@author: mostafamousavi
last update: 05/27/2021
"""
from __future__ import print_function
from __future__ import division
import os
os.environ['KERAS_BACKEND']='tensorflow'
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import csv
from tensorflow import keras
import time
import h5py
from os import listdir
import platform
import shutil
from tqdm import tqdm
from datetime import datetime, timedelta
import contextlib
import sys
import warnings
from scipy import signal
from matplotlib.lines import Line2D
from obspy import read
from os.path import join
import json
import pickle
import faulthandler; faulthandler.enable()
import obspy
import logging
from obspy.signal.trigger import trigger_onset
from .EqT_utils import f1, SeqSelfAttention, FeedForward, LayerNormalization
warnings.filterwarnings("ignore")
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
try:
f = open('setup.py')
for li, l in enumerate(f):
if li == 8:
EQT_VERSION = l.split('"')[1]
except Exception:
EQT_VERSION = "0.1.61"
def mseed_predictor(input_dir='downloads_mseeds',
input_model="sampleData&Model/EqT1D8pre_048.h5",
stations_json= "station_list.json",
output_dir="detections",
detection_threshold=0.3,
P_threshold=0.1,
S_threshold=0.1,
number_of_plots=10,
plot_mode='time',
loss_weights=[0.03, 0.40, 0.58],
loss_types=['binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'],
normalization_mode='std',
batch_size=500,
overlap = 0.3,
gpuid=None,
gpu_limit=None,
overwrite=False,
output_probabilities=False):
"""
To perform fast detection directly on mseed data.
Parameters
----------
input_dir: str
Directory name containing hdf5 and csv files-preprocessed data.
input_model: str
Path to a trained model.
stations_json: str
Path to a JSON file containing station information.
output_dir: str
Output directory that will be generated.
detection_threshold: float, default=0.3
A value in which the detection probabilities above it will be considered as an event.
P_threshold: float, default=0.1
A value which the P probabilities above it will be considered as P arrival.
S_threshold: float, default=0.1
A value which the S probabilities above it will be considered as S arrival.
number_of_plots: float, default=10
The number of plots for detected events outputed for each station data.
plot_mode: str, default=time
The type of plots: time only time series or time_frequency time and spectrograms.
loss_weights: list, default=[0.03, 0.40, 0.58]
Loss weights for detection P picking and S picking respectively.
loss_types: list, default=['binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy']
Loss types for detection P picking and S picking respectively.
normalization_mode: str, default=std
Mode of normalization for data preprocessing max maximum amplitude among three components std standard deviation.
batch_size: int, default=500
Batch size. This wont affect the speed much but can affect the performance. A value beteen 200 to 1000 is recommanded.
overlap: float, default=0.3
If set the detection and picking are performed in overlapping windows.
gpuid: int
Id of GPU used for the prediction. If using CPU set to None.
gpu_limit: int
Set the maximum percentage of memory usage for the GPU.
overwrite: Boolean, default=False
Overwrite your results automatically.
output_probabilities: Boolean, default=False
Write probability in output_dir/prob.h5 for future plotting
Structure: prediction_probabilities.hdf5{begintime: {Earthquake: probability, P_arrival: probability, S_arrival: probability}}
Notice: It you turn this parameter on, it will generate larges file (A test shows ~150 Mb file generated for a three-components station for 3 months)
Returns
--------
output_dir/STATION_OUTPUT/X_prediction_results.csv: A table containing all the detection, and picking results. Duplicated events are already removed.
output_dir/STATION_OUTPUT/X_report.txt: A summary of the parameters used for prediction and performance.
output_dir/STATION_OUTPUT/figures: A folder containing plots detected events and picked arrival times.
time_tracks.pkl: A file containing the time track of the continous data and its type.
Note
--------
This does not allow uncertainty estimation or writing the probabilities out.
"""
args = {
"input_dir": input_dir,
"input_model": input_model,
"stations_json": stations_json,
"output_dir": output_dir,
"detection_threshold": detection_threshold,
"P_threshold": P_threshold,
"S_threshold": S_threshold,
"number_of_plots": number_of_plots,
"plot_mode": plot_mode,
"loss_weights": loss_weights,
"loss_types": loss_types,
"normalization_mode": normalization_mode,
"overlap": overlap,
"batch_size": batch_size,
"gpuid": gpuid,
"gpu_limit": gpu_limit,
"output_probabilities": output_probabilities
}
if args['gpuid']:
os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(args['gpuid'])
tf.Session(config=tf.ConfigProto(log_device_placement=True))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = float(args['gpu_limit'])
K.tensorflow_backend.set_session(tf.Session(config=config))
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] [%(name)s] %(message)s',
datefmt='%m-%d %H:%M')
class DummyFile(object):
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.file)
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile(sys.stdout)
yield
sys.stdout = save_stdout
eqt_logger = logging.getLogger("EQTransformer")
eqt_logger.info(f"Running EqTransformer {EQT_VERSION}")
eqt_logger.info(f"*** Loading the model ...")
model = load_model(args['input_model'],
custom_objects={'SeqSelfAttention': SeqSelfAttention,
'FeedForward': FeedForward,
'LayerNormalization': LayerNormalization,
'f1': f1
})
model.compile(loss = args['loss_types'],
loss_weights = args['loss_weights'],
optimizer = Adam(lr = 0.001),
metrics = [f1])
eqt_logger.info(f"*** Loading is complete!")
out_dir = os.path.join(os.getcwd(), str(args['output_dir']))
if os.path.isdir(out_dir):
eqt_logger.info(f"*** {out_dir} already exists!")
if overwrite == True:
inp = "y"
eqt_logger.info(f"Overwriting your previous results")
else:
inp = input(" --> Type (Yes or y) to create a new empty directory! This will erase your previous results so make a copy if you want them.")
if inp.lower() == "yes" or inp.lower() == "y":
shutil.rmtree(out_dir)
os.makedirs(out_dir)
else:
print("Okay.")
return
if platform.system() == 'Windows':
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("\\")[-1] != ".DS_Store"];
else:
station_list = [ev.split(".")[0] for ev in listdir(args['input_dir']) if ev.split("/")[-1] != ".DS_Store"];
station_list = sorted(set(station_list))
data_track = dict()
eqt_logger.info(f"There are files for {len(station_list)} stations in {args['input_dir']} directory.")
for ct, st in enumerate(station_list):
save_dir = os.path.join(out_dir, str(st)+'_outputs')
out_probs = os.path.join(save_dir, 'prediction_probabilities.hdf5')
save_figs = os.path.join(save_dir, 'figures')
if os.path.isdir(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
try:
os.remove(out_probs)
except Exception:
pass
if args['number_of_plots']:
os.makedirs(save_figs)
if args['output_probabilities']:
HDF_PROB = h5py.File(out_probs, 'a')
plt_n = 0
csvPr_gen = open(os.path.join(save_dir,'X_prediction_results.csv'), 'w')
predict_writer = csv.writer(csvPr_gen, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
predict_writer.writerow(['file_name',
'network',
'station',
'instrument_type',
'station_lat',
'station_lon',
'station_elv',
'event_start_time',
'event_end_time',
'detection_probability',
'detection_uncertainty',
'p_arrival_time',
'p_probability',
'p_uncertainty',
'p_snr',
's_arrival_time',
's_probability',
's_uncertainty',
's_snr'
])
csvPr_gen.flush()
eqt_logger.info(f"Started working on {st}, {ct+1} out of {len(station_list)} ...")
start_Predicting = time.time()
if platform.system() == 'Windows':
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"\\"+st) if ev.split("\\")[-1].split(".")[-1].lower() == "mseed"];
else:
file_list = [join(st, ev) for ev in listdir(args["input_dir"]+"/"+st) if ev.split("/")[-1].split(".")[-1].lower() == "mseed"];
mon = [ev.split('__')[1]+'__'+ev.split('__')[2] for ev in file_list ];
uni_list = list(set(mon))
uni_list.sort()
time_slots, comp_types = [], []
for _, month in enumerate(uni_list):
eqt_logger.info(f"{month}")
matching = [s for s in file_list if month in s]
meta, time_slots, comp_types, data_set = _mseed2nparry(args, matching, time_slots, comp_types, st)
params_pred = {'batch_size': args['batch_size'],
'norm_mode': args['normalization_mode']}
pred_generator = PreLoadGeneratorTest(meta["trace_start_time"], data_set, **params_pred)
predD, predP, predS = model.predict_generator(pred_generator)
detection_memory = []
for ix in range(len(predD)):
matches, pick_errors, yh3 = _picker(args, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0])
if (len(matches) >= 1) and ((matches[list(matches)[0]][3] or matches[list(matches)[0]][6])):
snr = [_get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][3], window = 100), _get_snr(data_set[meta["trace_start_time"][ix]], matches[list(matches)[0]][6], window = 100)]
pre_write = len(detection_memory)
detection_memory=_output_writter_prediction(meta, predict_writer, csvPr_gen, matches, snr, detection_memory, ix)
post_write = len(detection_memory)
if args['output_probabilities']:
HDF_PROB.create_dataset(f'{meta["trace_start_time"][ix]}/Earthquake', data=predD[ix][:, 0], dtype= np.float32)
HDF_PROB.create_dataset(f'{meta["trace_start_time"][ix]}/P_arrival', data=predP[ix][:, 0], dtype= np.float32)
HDF_PROB.create_dataset(f'{meta["trace_start_time"][ix]}/S_arrival', data=predS[ix][:, 0], dtype= np.float32)
HDF_PROB.flush()
if plt_n < args['number_of_plots'] and post_write > pre_write:
_plotter_prediction(data_set[meta["trace_start_time"][ix]], args, save_figs, predD[ix][:, 0], predP[ix][:, 0], predS[ix][:, 0], meta["trace_start_time"][ix], matches)
plt_n += 1
end_Predicting = time.time()
data_track[st]=[time_slots, comp_types]
delta = (end_Predicting - start_Predicting)
hour = int(delta / 3600)
delta -= hour * 3600
minute = int(delta / 60)
delta -= minute * 60
seconds = delta
if args['output_probabilities']:
HDF_PROB.close()
dd = pd.read_csv(os.path.join(save_dir,'X_prediction_results.csv'))
print(f'\n', flush=True)
eqt_logger.info(f"Finished the prediction in: {hour} hours and {minute} minutes and {round(seconds, 2)} seconds.")
eqt_logger.info(f'*** Detected: '+str(len(dd))+' events.')
eqt_logger.info(f' *** Wrote the results into --> " ' + str(save_dir)+' "')
# print(' *** Finished the prediction in: {} hours and {} minutes and {} seconds.'.format(hour, minute, round(seconds, 2)), flush=True)
# print(' *** Detected: '+str(len(dd))+' events.', flush=True)
# print(' *** Wrote the results into --> " ' + str(save_dir)+' "', flush=True)
with open(os.path.join(save_dir,'X_report.txt'), 'a') as the_file:
the_file.write('================== PREDICTION FROM MSEED ===================='+'\n')
the_file.write('================== Overal Info =============================='+'\n')
the_file.write('date of report: '+str(datetime.now())+'\n')
the_file.write('input_model: '+str(args['input_model'])+'\n')
the_file.write('input_dir: '+str(args['input_dir'])+'\n')
the_file.write('output_dir: '+str(save_dir)+'\n')
the_file.write('================== Prediction Parameters ====================='+'\n')
the_file.write('finished the prediction in: {} hours and {} minutes and {} seconds \n'.format(hour, minute, round(seconds, 2)))
the_file.write('detected: '+str(len(dd))+' events.'+'\n')
the_file.write('loss_types: '+str(args['loss_types'])+'\n')
the_file.write('loss_weights: '+str(args['loss_weights'])+'\n')
the_file.write('================== Other Parameters =========================='+'\n')
the_file.write('normalization_mode: '+str(args['normalization_mode'])+'\n')
the_file.write('overlap: '+str(args['overlap'])+'\n')
the_file.write('batch_size: '+str(args['batch_size'])+'\n')
the_file.write('detection_threshold: '+str(args['detection_threshold'])+'\n')
the_file.write('P_threshold: '+str(args['P_threshold'])+'\n')
the_file.write('S_threshold: '+str(args['S_threshold'])+'\n')
the_file.write('number_of_plots: '+str(args['number_of_plots'])+'\n')
the_file.write('gpuid: '+str(args['gpuid'])+'\n')
the_file.write('gpu_limit: '+str(args['gpu_limit'])+'\n')
with open('time_tracks.pkl', 'wb') as f:
pickle.dump(data_track, f, pickle.HIGHEST_PROTOCOL)
def _mseed2nparry(args, matching, time_slots, comp_types, st_name):
' read miniseed files and from a list of string names and returns 3 dictionaries of numpy arrays, meta data, and time slice info'
json_file = open(args['stations_json'])
stations_ = json.load(json_file)
st = obspy.core.Stream()
tsw = False
for m in matching:
temp_st = read(os.path.join(str(args['input_dir']), m),debug_headers=True)
if tsw == False and temp_st:
tsw = True
for tr in temp_st:
time_slots.append((tr.stats.starttime, tr.stats.endtime))
try:
temp_st.merge(fill_value=0)
except Exception:
temp_st =_resampling(temp_st)
temp_st.merge(fill_value=0)
temp_st.detrend('demean')
st += temp_st
st.filter(type='bandpass', freqmin = 1.0, freqmax = 45, corners=2, zerophase=True)
st.taper(max_percentage=0.001, type='cosine', max_length=2)
if len([tr for tr in st if tr.stats.sampling_rate != 100.0]) != 0:
try:
st.interpolate(100, method="linear")
except Exception:
st=_resampling(st)
st.trim(min([tr.stats.starttime for tr in st]), max([tr.stats.endtime for tr in st]), pad=True, fill_value=0)
start_time = st[0].stats.starttime
end_time = st[0].stats.endtime
meta = {"start_time":start_time,
"end_time": end_time,
"trace_name":m
}
chanL = [tr.stats.channel[-1] for tr in st]
comp_types.append(len(chanL))
tim_shift = int(60-(args['overlap']*60))
next_slice = start_time+60
data_set={}
sl = 0; st_times = []
while next_slice <= end_time:
npz_data = np.zeros([6000, 3])
st_times.append(str(start_time).replace('T', ' ').replace('Z', ''))
w = st.slice(start_time, next_slice)
if 'Z' in chanL:
npz_data[:,2] = w[chanL.index('Z')].data[:6000]
if ('E' in chanL) or ('1' in chanL):
try:
npz_data[:,0] = w[chanL.index('E')].data[:6000]
except Exception:
npz_data[:,0] = w[chanL.index('1')].data[:6000]
if ('N' in chanL) or ('2' in chanL):
try:
npz_data[:,1] = w[chanL.index('N')].data[:6000]
except Exception:
npz_data[:,1] = w[chanL.index('2')].data[:6000]
data_set.update( {str(start_time).replace('T', ' ').replace('Z', '') : npz_data})
start_time = start_time+tim_shift
next_slice = next_slice+tim_shift
sl += 1
meta["trace_start_time"] = st_times
try:
meta["receiver_code"]=st[0].stats.station
meta["instrument_type"]=st[0].stats.channel[:2]
meta["network_code"]=stations_[st[0].stats.station]['network']
meta["receiver_latitude"]=stations_[st[0].stats.station]['coords'][0]
meta["receiver_longitude"]=stations_[st[0].stats.station]['coords'][1]
meta["receiver_elevation_m"]=stations_[st[0].stats.station]['coords'][2]
except Exception:
meta["receiver_code"]=st_name
meta["instrument_type"]=stations_[st_name]['channels'][0][:2]
meta["network_code"]=stations_[st_name]['network']
meta["receiver_latitude"]=stations_[st_name]['coords'][0]
meta["receiver_longitude"]=stations_[st_name]['coords'][1]
meta["receiver_elevation_m"]=stations_[st_name]['coords'][2]
return meta, time_slots, comp_types, data_set
class PreLoadGeneratorTest(keras.utils.Sequence):
"""
Keras generator with preprocessing. For testing. Pre-load version.
Parameters
----------
list_IDsx: str
List of trace names.
file_name: str
Path to the input hdf5 file.
dim: tuple
Dimension of input traces.
batch_size: int, default=32.
Batch size.
n_channels: int, default=3.
Number of channels.
norm_mode: str, default=max
The mode of normalization, 'max' or 'std'
Returns
--------
Batches of two dictionaries: {'input': X}: pre-processed waveform as input {'detector': y1, 'picker_P': y2, 'picker_S': y3}: outputs including three separate numpy arrays as labels for detection, P, and S respectively.
"""
def __init__(self,
list_IDs,
inp_data,
batch_size=32,
norm_mode = 'std'):
'Initialization'
self.batch_size = batch_size
self.list_IDs = list_IDs
self.inp_data = inp_data
self.on_epoch_end()
self.norm_mode = norm_mode
def __len__(self):
'Denotes the number of batches per epoch'
try:
return int(np.floor(len(self.list_IDs) / self.batch_size))
except ZeroDivisionError:
print("Your data duration in mseed file is too short! Try either longer files or reducing batch_size. ")
def __getitem__(self, index):
'Generate one batch of data'
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X = self.__data_generation(list_IDs_temp)
return ({'input': X})
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
def _normalize(self, data, mode = 'max'):
data -= np.mean(data, axis=0, keepdims=True)
if mode == 'max':
max_data = np.max(data, axis=0, keepdims=True)
assert(max_data.shape[-1] == data.shape[-1])
max_data[max_data == 0] = 1
data /= max_data
elif mode == 'std':
std_data = np.std(data, axis=0, keepdims=True)
assert(std_data.shape[-1] == data.shape[-1])
std_data[std_data == 0] = 1
data /= std_data
return data
def __data_generation(self, list_IDs_temp):
'readint the waveforms'
X = np.zeros((self.batch_size, 6000, 3))
# Generate data
for i, ID in enumerate(list_IDs_temp):
data = self.inp_data[ID]
data = self._normalize(data, self.norm_mode)
X[i, :, :] = data
return X
def _output_writter_prediction(meta, predict_writer, csvPr, matches, snr, detection_memory, idx):
"""
Writes the detection & picking results into a CSV file.
Parameters
----------
dataset: hdf5 obj
Dataset object of the trace.
predict_writer: obj
For writing out the detection/picking results in the CSV file.
csvPr: obj
For writing out the detection/picking results in the CSV file.
matches: dic
It contains the information for the detected and picked event.
snr: list of two floats
Estimated signal to noise ratios for picked P and S phases.
detection_memory : list
Keep the track of detected events.
Returns
-------
detection_memory : list
Keep the track of detected events.
"""
station_name = meta["receiver_code"]
station_lat = meta["receiver_latitude"]
station_lon = meta["receiver_longitude"]
station_elv = meta["receiver_elevation_m"]
start_time = meta["trace_start_time"][idx]
station_name = "{:<4}".format(station_name)
network_name = meta["network_code"]
network_name = "{:<2}".format(network_name)
instrument_type = meta["instrument_type"]
instrument_type = "{:<2}".format(instrument_type)
try:
start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S.%f')
except Exception:
start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
def _date_convertor(r):
if isinstance(r, str):
mls = r.split('.')
if len(mls) == 1:
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S')
else:
new_t = datetime.strptime(r, '%Y-%m-%d %H:%M:%S.%f')
else:
new_t = r
return new_t
for match, match_value in matches.items():
ev_strt = start_time+timedelta(seconds= match/100)
ev_end = start_time+timedelta(seconds= match_value[0]/100)
doublet = [ st for st in detection_memory if abs((st-ev_strt).total_seconds()) < 2]
if len(doublet) == 0:
det_prob = round(match_value[1], 2)
if match_value[3]:
p_time = start_time+timedelta(seconds= match_value[3]/100)
else:
p_time = None
p_prob = match_value[4]
if p_prob:
p_prob = round(p_prob, 2)
if match_value[6]:
s_time = start_time+timedelta(seconds= match_value[6]/100)
else:
s_time = None
s_prob = match_value[7]
if s_prob:
s_prob = round(s_prob, 2)
predict_writer.writerow([meta["trace_name"],
network_name,
station_name,
instrument_type,
station_lat,
station_lon,
station_elv,
_date_convertor(ev_strt),
_date_convertor(ev_end),
det_prob,
None,
_date_convertor(p_time),
p_prob,
None,
snr[0],
_date_convertor(s_time),
s_prob,
None,
snr[1]
])
csvPr.flush()
detection_memory.append(ev_strt)
return detection_memory
def _get_snr(data, pat, window=200):
"""
Estimates SNR.
Parameters
----------
data : numpy array
3 component data.
pat: positive integer
Sample point where a specific phase arrives.
window: positive integer, default=200
The length of the window for calculating the SNR (in the sample).
Returns
--------
snr : {float, None}
Estimated SNR in db.
"""
snr = None
if pat:
try:
if int(pat) >= window and (int(pat)+window) < len(data):
nw1 = data[int(pat)-window : int(pat)];
sw1 = data[int(pat) : int(pat)+window];
snr = round(10*math.log10((np.percentile(sw1,95)/np.percentile(nw1,95))**2), 1)
elif int(pat) < window and (int(pat)+window) < len(data):
window = int(pat)
nw1 = data[int(pat)-window : int(pat)];
sw1 = data[int(pat) : int(pat)+window];
snr = round(10*math.log10((np.percentile(sw1,95)/np.percentile(nw1,95))**2), 1)
elif (int(pat)+window) > len(data):
window = len(data)-int(pat)
nw1 = data[int(pat)-window : int(pat)];
sw1 = data[int(pat) : int(pat)+window];
snr = round(10*math.log10((np.percentile(sw1,95)/np.percentile(nw1,95))**2), 1)
except Exception:
pass
return snr
def _detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False):
"""
Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, default=None
detect peaks that are greater than minimum peak height.
mpd : int, default=1
detect peaks that are at least separated by minimum peak distance (in number of data).
threshold : int, default=0
detect peaks (valleys) that are greater (smaller) than `threshold in relation to their immediate neighbors.
edge : str, default=rising
for a flat peak, keep only the rising edge ('rising'), only the falling edge ('falling'), both edges ('both'), or don't detect a flat peak (None).
kpsh : bool, default=False
keep peaks with same height even if they are closer than `mpd`.
valley : bool, default=False
if True (1), detect valleys (local minima) instead of peaks.
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Modified from
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
return ind
def _picker(args, yh1, yh2, yh3):
"""
Performs detection and picking.
Parameters
----------
args : dic
A dictionary containing all of the input parameters.
yh1 : 1D array
Detection probabilities.
yh2 : 1D array
P arrival probabilities.
yh3 : 1D array
S arrival probabilities.
Returns
-------
matches : dic
Contains the information for the detected and picked event.
matches : dic
{detection statr-time:[ detection end-time, detection probability, detectin uncertainty, P arrival, P probabiliy, P uncertainty, S arrival, S probability, S uncertainty]}
yh3 : 1D array
normalized S_probability
"""
detection = trigger_onset(yh1, args['detection_threshold'], args['detection_threshold'])
pp_arr = _detect_peaks(yh2, mph=args['P_threshold'], mpd=1)
ss_arr = _detect_peaks(yh3, mph=args['S_threshold'], mpd=1)
P_PICKS = {}
S_PICKS = {}
EVENTS = {}
matches = {}
pick_errors = {}
if len(pp_arr) > 0:
P_uncertainty = None
for pick in range(len(pp_arr)):
pauto = pp_arr[pick]
if pauto:
P_prob = np.round(yh2[int(pauto)], 3)
P_PICKS.update({pauto : [P_prob, P_uncertainty]})
if len(ss_arr) > 0:
S_uncertainty = None
for pick in range(len(ss_arr)):
sauto = ss_arr[pick]
if sauto:
S_prob = np.round(yh3[int(sauto)], 3)
S_PICKS.update({sauto : [S_prob, S_uncertainty]})
if len(detection) > 0:
D_uncertainty = None
for ev in range(len(detection)):
D_prob = np.mean(yh1[detection[ev][0]:detection[ev][1]])
D_prob = np.round(D_prob, 3)
EVENTS.update({ detection[ev][0] : [D_prob, D_uncertainty, detection[ev][1]]})
# matching the detection and picks
def pair_PS(l1, l2, dist):
l1.sort()
l2.sort()
b = 0
e = 0
ans = []
for a in l1:
while l2[b] and b < len(l2) and a - l2[b] > dist:
b += 1
while l2[e] and e < len(l2) and l2[e] - a <= dist:
e += 1
ans.extend([[a,x] for x in l2[b:e]])
best_pair = None
for pr in ans:
ds = pr[1]-pr[0]
if abs(ds) < dist:
best_pair = pr
dist = ds
return best_pair
for ev in EVENTS:
bg = ev
ed = EVENTS[ev][2]
if int(ed-bg) >= 10:
candidate_Ss = {}
for Ss, S_val in S_PICKS.items():
if Ss > bg and Ss < ed:
candidate_Ss.update({Ss : S_val})
if len(candidate_Ss) > 1:
candidate_Ss = {list(candidate_Ss.keys())[0] : candidate_Ss[list(candidate_Ss.keys())[0]]}
if len(candidate_Ss) == 0:
candidate_Ss = {None:[None, None]}
candidate_Ps = {}
for Ps, P_val in P_PICKS.items():
if list(candidate_Ss)[0]:
if Ps > bg-100 and Ps < list(candidate_Ss)[0]-10:
candidate_Ps.update({Ps : P_val})
else:
if Ps > bg-100 and Ps < ed:
candidate_Ps.update({Ps : P_val})
if len(candidate_Ps) > 1:
Pr_st = 0
buffer = {}
for PsCan, P_valCan in candidate_Ps.items():
if P_valCan[0] > Pr_st:
buffer = {PsCan : P_valCan}
Pr_st = P_valCan[0]
candidate_Ps = buffer
if len(candidate_Ps) == 0:
candidate_Ps = {None:[None, None]}
if list(candidate_Ss)[0] or list(candidate_Ps)[0]:
matches.update({
bg:[ed,
EVENTS[ev][0],
EVENTS[ev][1],
list(candidate_Ps)[0],
candidate_Ps[list(candidate_Ps)[0]][0],
candidate_Ps[list(candidate_Ps)[0]][1],
list(candidate_Ss)[0],
candidate_Ss[list(candidate_Ss)[0]][0],
candidate_Ss[list(candidate_Ss)[0]][1],
] })
return matches, pick_errors, yh3
def _resampling(st):
'perform resampling on Obspy stream objects'
need_resampling = [tr for tr in st if tr.stats.sampling_rate != 100.0]
if len(need_resampling) > 0:
# print('resampling ...', flush=True)
for indx, tr in enumerate(need_resampling):
if tr.stats.delta < 0.01:
tr.filter('lowpass',freq=45,zerophase=True)
tr.resample(100)
tr.stats.sampling_rate = 100
tr.stats.delta = 0.01
tr.data.dtype = 'int32'
st.remove(tr)
st.append(tr)
return st
def _normalize(data, mode = 'max'):
"""
Normalize 3D arrays.
Parameters
----------
data : 3D numpy array
3 component traces.
mode : str, default='std'
Mode of normalization. 'max' or 'std'
Returns
-------
data : 3D numpy array
normalized data.
"""
data -= np.mean(data, axis=0, keepdims=True)
if mode == 'max':
max_data = np.max(data, axis=0, keepdims=True)
assert(max_data.shape[-1] == data.shape[-1])
max_data[max_data == 0] = 1
data /= max_data
elif mode == 'std':
std_data = np.std(data, axis=0, keepdims=True)
assert(std_data.shape[-1] == data.shape[-1])
std_data[std_data == 0] = 1
data /= std_data
return data
def _plotter_prediction(data, args, save_figs, yh1, yh2, yh3, evi, matches):
"""
Generates plots of detected events with the prediction probabilities and arrival picks.
Parameters
----------
data: NumPy array
3 component raw waveform.
evi: str
Trace name.
args: dic
A dictionary containing all of the input parameters.
save_figs: str
Path to the folder for saving the plots.
yh1: 1D array
Detection probabilities.
yh2: 1D array
P arrival probabilities.
yh3: 1D array
S arrival probabilities.
matches: dic
Contains the information for the detected and picked event.
"""
font0 = {'family': 'serif',
'color': 'white',
'stretch': 'condensed',
'weight': 'normal',
'size': 12,
}
spt, sst, detected_events = [], [], []
for match, match_value in matches.items():
detected_events.append([match, match_value[0]])
if match_value[3]:
spt.append(match_value[3])
else:
spt.append(None)
if match_value[6]:
sst.append(match_value[6])
else:
sst.append(None)
if args['plot_mode'] == 'time_frequency':
fig = plt.figure(constrained_layout=False)
widths = [6, 1]
heights = [1, 1, 1, 1, 1, 1, 1.8]
spec5 = fig.add_gridspec(ncols=2, nrows=7, width_ratios=widths,
height_ratios=heights, left=0.1, right=0.9, hspace=0.1)
ax = fig.add_subplot(spec5[0, 0])
plt.plot(data[:, 0], 'k')
plt.xlim(0, 6000)
x = np.arange(6000)
if platform.system() == 'Windows':
plt.title(save_figs.split("\\")[-2].split("_")[0]+":"+str(evi))
else:
plt.title(save_figs.split("/")[-2].split("_")[0]+":"+str(evi))
ax.set_xticks([])
plt.rcParams["figure.figsize"] = (10, 10)
legend_properties = {'weight':'bold'}
pl = None
sl = None
if len(spt) > 0 and np.count_nonzero(data[:, 0]) > 10:
ymin, ymax = ax.get_ylim()
for ipt, pt in enumerate(spt):
if pt and ipt == 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2, label='Picked P')
elif pt and ipt > 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2)
if len(sst) > 0 and np.count_nonzero(data[:, 0]) > 10:
for ist, st in enumerate(sst):
if st and ist == 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2, label='Picked S')
elif st and ist > 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2)
ax = fig.add_subplot(spec5[0, 1])
if pl or sl:
custom_lines = [Line2D([0], [0], color='k', lw=0),
Line2D([0], [0], color='c', lw=2),
Line2D([0], [0], color='m', lw=2)]
plt.legend(custom_lines, ['E', 'Picked P', 'Picked S'], fancybox=True, shadow=True)
plt.axis('off')
ax = fig.add_subplot(spec5[1, 0])
f, t, Pxx = signal.stft(data[:, 0], fs=100, nperseg=80)
Pxx = np.abs(Pxx)
plt.pcolormesh(t, f, Pxx, alpha=None, cmap='hot', shading='flat', antialiased=True)
plt.ylim(0, 40)
plt.text(1, 1, 'STFT', fontdict=font0)
plt.ylabel('Hz', fontsize=12)
ax.set_xticks([])
ax = fig.add_subplot(spec5[2, 0])
plt.plot(data[:, 1] , 'k')
plt.xlim(0, 6000)
ax.set_xticks([])
if len(spt) > 0 and np.count_nonzero(data[:, 1]) > 10:
ymin, ymax = ax.get_ylim()
for ipt, pt in enumerate(spt):
if pt and ipt == 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2, label='Picked P')
elif pt and ipt > 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2)
if len(sst) > 0 and np.count_nonzero(data[:, 1]) > 10:
for ist, st in enumerate(sst):
if st and ist == 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2, label='Picked S')
elif st and ist > 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2)
ax = fig.add_subplot(spec5[2, 1])
if pl or sl:
custom_lines = [Line2D([0], [0], color='k', lw=0),
Line2D([0], [0], color='c', lw=2),
Line2D([0], [0], color='m', lw=2)]
plt.legend(custom_lines, ['N', 'Picked P', 'Picked S'], fancybox=True, shadow=True)
plt.axis('off')
ax = fig.add_subplot(spec5[3, 0])
f, t, Pxx = signal.stft(data[:, 1], fs=100, nperseg=80)
Pxx = np.abs(Pxx)
plt.pcolormesh(t, f, Pxx, alpha=None, cmap='hot', shading='flat', antialiased=True)
plt.ylim(0, 40)
plt.text(1, 1, 'STFT', fontdict=font0)
plt.ylabel('Hz', fontsize=12)
ax.set_xticks([])
ax = fig.add_subplot(spec5[4, 0])
plt.plot(data[:, 2], 'k')
plt.xlim(0, 6000)
ax.set_xticks([])
if len(spt) > 0 and np.count_nonzero(data[:, 2]) > 10:
ymin, ymax = ax.get_ylim()
for ipt, pt in enumerate(spt):
if pt and ipt == 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2, label='Picked P')
elif pt and ipt > 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2)
if len(sst) > 0 and np.count_nonzero(data[:, 2]) > 10:
for ist, st in enumerate(sst):
if st and ist == 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2, label='Picked S')
elif st and ist > 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2)
ax = fig.add_subplot(spec5[4, 1])
if pl or sl:
custom_lines = [Line2D([0], [0], color='k', lw=0),
Line2D([0], [0], color='c', lw=2),
Line2D([0], [0], color='m', lw=2)]
plt.legend(custom_lines, ['Z', 'Picked P', 'Picked S'], fancybox=True, shadow=True)
plt.axis('off')
ax = fig.add_subplot(spec5[5, 0])
f, t, Pxx = signal.stft(data[:, 2], fs=100, nperseg=80)
Pxx = np.abs(Pxx)
plt.pcolormesh(t, f, Pxx, alpha=None, cmap='hot', shading='flat', antialiased=True)
plt.ylim(0, 40)
plt.text(1, 1, 'STFT', fontdict=font0)
plt.ylabel('Hz', fontsize=12)
ax.set_xticks([])
ax = fig.add_subplot(spec5[6, 0])
x = np.linspace(0, data.shape[0], data.shape[0], endpoint=True)
plt.plot(x, yh1, '--', color='g', alpha = 0.5, linewidth=2, label='Earthquake')
plt.plot(x, yh2, '--', color='b', alpha = 0.5, linewidth=2, label='P_arrival')
plt.plot(x, yh3, '--', color='r', alpha = 0.5, linewidth=2, label='S_arrival')
plt.tight_layout()
plt.ylim((-0.1, 1.1))
plt.xlim(0, 6000)
plt.ylabel('Probability', fontsize=12)
plt.xlabel('Sample', fontsize=12)
plt.yticks(np.arange(0, 1.1, step=0.2))
axes = plt.gca()
axes.yaxis.grid(color='lightgray')
ax = fig.add_subplot(spec5[6, 1])
custom_lines = [Line2D([0], [0], linestyle='--', color='g', lw=2),
Line2D([0], [0], linestyle='--', color='b', lw=2),
Line2D([0], [0], linestyle='--', color='r', lw=2)]
plt.legend(custom_lines, ['Earthquake', 'P_arrival', 'S_arrival'], fancybox=True, shadow=True)
plt.axis('off')
font = {'family': 'serif',
'color': 'dimgrey',
'style': 'italic',
'stretch': 'condensed',
'weight': 'normal',
'size': 12,
}
plt.text(1, 0.2, 'EQTransformer', fontdict=font)
if EQT_VERSION:
plt.text(2000, 0.05, str(EQT_VERSION), fontdict=font)
plt.xlim(0, 6000)
fig.tight_layout()
fig.savefig(os.path.join(save_figs, str(evi).replace(':', '-')+'.png'))
plt.close(fig)
plt.clf()
else:
########################################## ploting only in time domain
fig = plt.figure(constrained_layout=True)
widths = [1]
heights = [1.6, 1.6, 1.6, 2.5]
spec5 = fig.add_gridspec(ncols=1, nrows=4, width_ratios=widths,
height_ratios=heights)
ax = fig.add_subplot(spec5[0, 0])
plt.plot(data[:, 0], 'k')
x = np.arange(6000)
plt.xlim(0, 6000)
if platform.system() == 'Windows':
plt.title(save_figs.split("\\")[-2].split("_")[0]+":"+str(evi))
else:
plt.title(save_figs.split("/")[-2].split("_")[0]+":"+str(evi))
plt.ylabel('Amplitude\nCounts')
plt.rcParams["figure.figsize"] = (8,6)
legend_properties = {'weight':'bold'}
pl = sl = None
if len(spt) > 0 and np.count_nonzero(data[:, 0]) > 10:
ymin, ymax = ax.get_ylim()
for ipt, pt in enumerate(spt):
if pt and ipt == 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2, label='Picked P')
elif pt and ipt > 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2)
if len(sst) > 0 and np.count_nonzero(data[:, 0]) > 10:
for ist, st in enumerate(sst):
if st and ist == 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2, label='Picked S')
elif st and ist > 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2)
if pl or sl:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
custom_lines = [Line2D([0], [0], color='k', lw=0),
Line2D([0], [0], color='c', lw=2),
Line2D([0], [0], color='m', lw=2)]
plt.legend(custom_lines, ['E', 'Picked P', 'Picked S'],
loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
ax = fig.add_subplot(spec5[1, 0])
plt.plot(data[:, 1] , 'k')
plt.xlim(0, 6000)
plt.ylabel('Amplitude\nCounts')
if len(spt) > 0 and np.count_nonzero(data[:, 1]) > 10:
ymin, ymax = ax.get_ylim()
for ipt, pt in enumerate(spt):
if pt and ipt == 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2, label='Picked P')
elif pt and ipt > 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2)
if len(sst) > 0 and np.count_nonzero(data[:, 1]) > 10:
for ist, st in enumerate(sst):
if st and ist == 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2, label='Picked S')
elif st and ist > 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2)
if pl or sl:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
custom_lines = [Line2D([0], [0], color='k', lw=0),
Line2D([0], [0], color='c', lw=2),
Line2D([0], [0], color='m', lw=2)]
plt.legend(custom_lines, ['N', 'Picked P', 'Picked S'],
loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
ax = fig.add_subplot(spec5[2, 0])
plt.plot(data[:, 2], 'k')
plt.xlim(0, 6000)
plt.ylabel('Amplitude\nCounts')
ax.set_xticks([])
if len(spt) > 0 and np.count_nonzero(data[:, 2]) > 10:
ymin, ymax = ax.get_ylim()
for ipt, pt in enumerate(spt):
if pt and ipt == 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2, label='Picked P')
elif pt and ipt > 0:
pl = plt.vlines(int(pt), ymin, ymax, color='c', linewidth=2)
if len(sst) > 0 and np.count_nonzero(data[:, 2]) > 10:
for ist, st in enumerate(sst):
if st and ist == 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2, label='Picked S')
elif st and ist > 0:
sl = plt.vlines(int(st), ymin, ymax, color='m', linewidth=2)
if pl or sl:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
custom_lines = [Line2D([0], [0], color='k', lw=0),
Line2D([0], [0], color='c', lw=2),
Line2D([0], [0], color='m', lw=2)]
plt.legend(custom_lines, ['Z', 'Picked P', 'Picked S'],
loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
ax = fig.add_subplot(spec5[3, 0])
x = np.linspace(0, data.shape[0], data.shape[0], endpoint=True)
plt.plot(x, yh1, '--', color='g', alpha = 0.5, linewidth=1.5, label='Earthquake')
plt.plot(x, yh2, '--', color='b', alpha = 0.5, linewidth=1.5, label='P_arrival')
plt.plot(x, yh3, '--', color='r', alpha = 0.5, linewidth=1.5, label='S_arrival')
plt.tight_layout()
plt.ylim((-0.1, 1.1))
plt.xlim(0, 6000)
plt.ylabel('Probability')
plt.xlabel('Sample')
plt.legend(loc='lower center', bbox_to_anchor=(0., 1.17, 1., .102), ncol=3, mode="expand",
prop=legend_properties, borderaxespad=0., fancybox=True, shadow=True)
plt.yticks(np.arange(0, 1.1, step=0.2))
axes = plt.gca()
axes.yaxis.grid(color='lightgray')
font = {'family': 'serif',
'color': 'dimgrey',
'style': 'italic',
'stretch': 'condensed',
'weight': 'normal',
'size': 12,
}
plt.text(6500, 0.5, 'EQTransformer', fontdict=font)
if EQT_VERSION:
plt.text(7000, 0.1, str(EQT_VERSION), fontdict=font)
fig.tight_layout()
fig.savefig(os.path.join(save_figs, str(evi).replace(':', '-')+'.png'))
plt.close(fig)
plt.clf()
|
[
"pickle.dump",
"os.remove",
"numpy.abs",
"matplotlib.pyplot.clf",
"numpy.isnan",
"numpy.argsort",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"numpy.mean",
"obspy.core.Stream",
"numpy.arange",
"matplotlib.pyplot.gca",
"shutil.rmtree",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"numpy.round",
"matplotlib.lines.Line2D",
"numpy.std",
"matplotlib.pyplot.close",
"numpy.max",
"tensorflow.keras.optimizers.Adam",
"datetime.timedelta",
"numpy.linspace",
"datetime.datetime.now",
"h5py.File",
"tensorflow.keras.models.load_model",
"csv.writer",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"tensorflow.Session",
"numpy.hstack",
"matplotlib.pyplot.text",
"numpy.sort",
"datetime.datetime.strptime",
"matplotlib.use",
"numpy.percentile",
"matplotlib.pyplot.pcolormesh",
"platform.system",
"matplotlib.pyplot.ylabel",
"faulthandler.enable",
"os.listdir",
"numpy.vstack",
"matplotlib.pyplot.xlim",
"tqdm.tqdm.write",
"json.load",
"obspy.signal.trigger.trigger_onset",
"os.makedirs",
"logging.basicConfig",
"warnings.filterwarnings",
"os.path.isdir",
"os.getcwd",
"matplotlib.pyplot.plot",
"numpy.count_nonzero",
"numpy.zeros",
"matplotlib.pyplot.axis",
"time.time",
"numpy.where",
"numpy.array",
"numpy.atleast_1d",
"matplotlib.pyplot.xlabel",
"scipy.signal.stft",
"logging.getLogger"
] |
[((440, 461), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (454, 461), False, 'import matplotlib\n'), ((926, 947), 'faulthandler.enable', 'faulthandler.enable', ([], {}), '()\n', (945, 947), False, 'import faulthandler\n'), ((1100, 1133), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1123, 1133), False, 'import warnings\n'), ((6491, 6625), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s [%(levelname)s] [%(name)s] %(message)s"""', 'datefmt': '"""%m-%d %H:%M"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s [%(levelname)s] [%(name)s] %(message)s', datefmt='%m-%d %H:%M'\n )\n", (6510, 6625), False, 'import logging\n'), ((7147, 7181), 'logging.getLogger', 'logging.getLogger', (['"""EQTransformer"""'], {}), "('EQTransformer')\n", (7164, 7181), False, 'import logging\n'), ((7318, 7492), 'tensorflow.keras.models.load_model', 'load_model', (["args['input_model']"], {'custom_objects': "{'SeqSelfAttention': SeqSelfAttention, 'FeedForward': FeedForward,\n 'LayerNormalization': LayerNormalization, 'f1': f1}"}), "(args['input_model'], custom_objects={'SeqSelfAttention':\n SeqSelfAttention, 'FeedForward': FeedForward, 'LayerNormalization':\n LayerNormalization, 'f1': f1})\n", (7328, 7492), False, 'from tensorflow.keras.models import load_model\n'), ((8074, 8096), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (8087, 8096), False, 'import os\n'), ((17314, 17334), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (17323, 17334), False, 'import json\n'), ((17349, 17368), 'obspy.core.Stream', 'obspy.core.Stream', ([], {}), '()\n', (17366, 17368), False, 'import obspy\n'), ((31178, 31211), 'numpy.array', 'np.array', (['[[], [], []]'], {'dtype': 'int'}), '([[], [], []], dtype=int)\n', (31186, 31211), True, 'import numpy as np\n'), ((33883, 33959), 'obspy.signal.trigger.trigger_onset', 'trigger_onset', (['yh1', "args['detection_threshold']", "args['detection_threshold']"], {}), "(yh1, args['detection_threshold'], args['detection_threshold'])\n", (33896, 33959), False, 'from obspy.signal.trigger import trigger_onset\n'), ((39225, 39261), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (39232, 39261), True, 'import numpy as np\n'), ((6257, 6273), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (6271, 6273), True, 'import tensorflow as tf\n'), ((8029, 8040), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8038, 8040), False, 'import os\n'), ((8638, 8655), 'platform.system', 'platform.system', ([], {}), '()\n', (8653, 8655), False, 'import platform\n'), ((9230, 9285), 'os.path.join', 'os.path.join', (['save_dir', '"""prediction_probabilities.hdf5"""'], {}), "(save_dir, 'prediction_probabilities.hdf5')\n", (9242, 9285), False, 'import os\n'), ((9306, 9339), 'os.path.join', 'os.path.join', (['save_dir', '"""figures"""'], {}), "(save_dir, 'figures')\n", (9318, 9339), False, 'import os\n'), ((9352, 9375), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (9365, 9375), False, 'import os\n'), ((9423, 9444), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (9434, 9444), False, 'import os\n'), ((9856, 9934), 'csv.writer', 'csv.writer', (['csvPr_gen'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csvPr_gen, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (9866, 9934), False, 'import csv\n'), ((11066, 11077), 'time.time', 'time.time', ([], {}), '()\n', (11075, 11077), False, 'import time\n'), ((13897, 13908), 'time.time', 'time.time', ([], {}), '()\n', (13906, 13908), False, 'import time\n'), ((16968, 17019), 'pickle.dump', 'pickle.dump', (['data_track', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(data_track, f, pickle.HIGHEST_PROTOCOL)\n', (16979, 17019), False, 'import pickle\n'), ((18893, 18912), 'numpy.zeros', 'np.zeros', (['[6000, 3]'], {}), '([6000, 3])\n', (18901, 18912), True, 'import numpy as np\n'), ((22825, 22861), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (22832, 22861), True, 'import numpy as np\n'), ((23453, 23489), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 6000, 3)'], {}), '((self.batch_size, 6000, 3))\n', (23461, 23489), True, 'import numpy as np\n'), ((25226, 25279), 'datetime.datetime.strptime', 'datetime.strptime', (['start_time', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(start_time, '%Y-%m-%d %H:%M:%S.%f')\n", (25243, 25279), False, 'from datetime import datetime, timedelta\n'), ((30897, 30920), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (30905, 30920), True, 'import numpy as np\n'), ((31601, 31627), 'numpy.hstack', 'np.hstack', (['(ine, ire, ife)'], {}), '((ine, ire, ife))\n', (31610, 31627), True, 'import numpy as np\n'), ((32500, 32530), 'numpy.zeros', 'np.zeros', (['ind.size'], {'dtype': 'bool'}), '(ind.size, dtype=bool)\n', (32508, 32530), True, 'import numpy as np\n'), ((32940, 32959), 'numpy.sort', 'np.sort', (['ind[~idel]'], {}), '(ind[~idel])\n', (32947, 32959), True, 'import numpy as np\n'), ((39303, 39338), 'numpy.max', 'np.max', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (39309, 39338), True, 'import numpy as np\n'), ((41081, 41117), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(False)'}), '(constrained_layout=False)\n', (41091, 41117), True, 'import matplotlib.pyplot as plt\n'), ((41419, 41444), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', '"""k"""'], {}), "(data[:, 0], 'k')\n", (41427, 41444), True, 'import matplotlib.pyplot as plt\n'), ((41453, 41470), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (41461, 41470), True, 'import matplotlib.pyplot as plt\n'), ((41483, 41498), 'numpy.arange', 'np.arange', (['(6000)'], {}), '(6000)\n', (41492, 41498), True, 'import numpy as np\n'), ((43186, 43229), 'scipy.signal.stft', 'signal.stft', (['data[:, 0]'], {'fs': '(100)', 'nperseg': '(80)'}), '(data[:, 0], fs=100, nperseg=80)\n', (43197, 43229), False, 'from scipy import signal\n'), ((43244, 43255), 'numpy.abs', 'np.abs', (['Pxx'], {}), '(Pxx)\n', (43250, 43255), True, 'import numpy as np\n'), ((43287, 43374), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['t', 'f', 'Pxx'], {'alpha': 'None', 'cmap': '"""hot"""', 'shading': '"""flat"""', 'antialiased': '(True)'}), "(t, f, Pxx, alpha=None, cmap='hot', shading='flat',\n antialiased=True)\n", (43301, 43374), True, 'import matplotlib.pyplot as plt\n'), ((43379, 43394), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(40)'], {}), '(0, 40)\n', (43387, 43394), True, 'import matplotlib.pyplot as plt\n'), ((43403, 43441), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(1)', '"""STFT"""'], {'fontdict': 'font0'}), "(1, 1, 'STFT', fontdict=font0)\n", (43411, 43441), True, 'import matplotlib.pyplot as plt\n'), ((43450, 43479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hz"""'], {'fontsize': '(12)'}), "('Hz', fontsize=12)\n", (43460, 43479), True, 'import matplotlib.pyplot as plt\n'), ((43573, 43598), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 1]', '"""k"""'], {}), "(data[:, 1], 'k')\n", (43581, 43598), True, 'import matplotlib.pyplot as plt\n'), ((43608, 43625), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (43616, 43625), True, 'import matplotlib.pyplot as plt\n'), ((44927, 44970), 'scipy.signal.stft', 'signal.stft', (['data[:, 1]'], {'fs': '(100)', 'nperseg': '(80)'}), '(data[:, 1], fs=100, nperseg=80)\n', (44938, 44970), False, 'from scipy import signal\n'), ((44985, 44996), 'numpy.abs', 'np.abs', (['Pxx'], {}), '(Pxx)\n', (44991, 44996), True, 'import numpy as np\n'), ((45028, 45115), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['t', 'f', 'Pxx'], {'alpha': 'None', 'cmap': '"""hot"""', 'shading': '"""flat"""', 'antialiased': '(True)'}), "(t, f, Pxx, alpha=None, cmap='hot', shading='flat',\n antialiased=True)\n", (45042, 45115), True, 'import matplotlib.pyplot as plt\n'), ((45120, 45135), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(40)'], {}), '(0, 40)\n', (45128, 45135), True, 'import matplotlib.pyplot as plt\n'), ((45144, 45182), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(1)', '"""STFT"""'], {'fontdict': 'font0'}), "(1, 1, 'STFT', fontdict=font0)\n", (45152, 45182), True, 'import matplotlib.pyplot as plt\n'), ((45191, 45220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hz"""'], {'fontsize': '(12)'}), "('Hz', fontsize=12)\n", (45201, 45220), True, 'import matplotlib.pyplot as plt\n'), ((45339, 45364), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 2]', '"""k"""'], {}), "(data[:, 2], 'k')\n", (45347, 45364), True, 'import matplotlib.pyplot as plt\n'), ((45374, 45391), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (45382, 45391), True, 'import matplotlib.pyplot as plt\n'), ((46741, 46784), 'scipy.signal.stft', 'signal.stft', (['data[:, 2]'], {'fs': '(100)', 'nperseg': '(80)'}), '(data[:, 2], fs=100, nperseg=80)\n', (46752, 46784), False, 'from scipy import signal\n'), ((46799, 46810), 'numpy.abs', 'np.abs', (['Pxx'], {}), '(Pxx)\n', (46805, 46810), True, 'import numpy as np\n'), ((46842, 46929), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['t', 'f', 'Pxx'], {'alpha': 'None', 'cmap': '"""hot"""', 'shading': '"""flat"""', 'antialiased': '(True)'}), "(t, f, Pxx, alpha=None, cmap='hot', shading='flat',\n antialiased=True)\n", (46856, 46929), True, 'import matplotlib.pyplot as plt\n'), ((46934, 46949), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(40)'], {}), '(0, 40)\n', (46942, 46949), True, 'import matplotlib.pyplot as plt\n'), ((46958, 46996), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(1)', '"""STFT"""'], {'fontdict': 'font0'}), "(1, 1, 'STFT', fontdict=font0)\n", (46966, 46996), True, 'import matplotlib.pyplot as plt\n'), ((47005, 47034), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hz"""'], {'fontsize': '(12)'}), "('Hz', fontsize=12)\n", (47015, 47034), True, 'import matplotlib.pyplot as plt\n'), ((47147, 47206), 'numpy.linspace', 'np.linspace', (['(0)', 'data.shape[0]', 'data.shape[0]'], {'endpoint': '(True)'}), '(0, data.shape[0], data.shape[0], endpoint=True)\n', (47158, 47206), True, 'import numpy as np\n'), ((47247, 47324), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yh1', '"""--"""'], {'color': '"""g"""', 'alpha': '(0.5)', 'linewidth': '(2)', 'label': '"""Earthquake"""'}), "(x, yh1, '--', color='g', alpha=0.5, linewidth=2, label='Earthquake')\n", (47255, 47324), True, 'import matplotlib.pyplot as plt\n'), ((47335, 47411), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yh2', '"""--"""'], {'color': '"""b"""', 'alpha': '(0.5)', 'linewidth': '(2)', 'label': '"""P_arrival"""'}), "(x, yh2, '--', color='b', alpha=0.5, linewidth=2, label='P_arrival')\n", (47343, 47411), True, 'import matplotlib.pyplot as plt\n'), ((47422, 47498), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yh3', '"""--"""'], {'color': '"""r"""', 'alpha': '(0.5)', 'linewidth': '(2)', 'label': '"""S_arrival"""'}), "(x, yh3, '--', color='r', alpha=0.5, linewidth=2, label='S_arrival')\n", (47430, 47498), True, 'import matplotlib.pyplot as plt\n'), ((47509, 47527), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (47525, 47527), True, 'import matplotlib.pyplot as plt\n'), ((47543, 47564), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1, 1.1)'], {}), '((-0.1, 1.1))\n', (47551, 47564), True, 'import matplotlib.pyplot as plt\n'), ((47574, 47591), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (47582, 47591), True, 'import matplotlib.pyplot as plt\n'), ((47600, 47638), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {'fontsize': '(12)'}), "('Probability', fontsize=12)\n", (47610, 47638), True, 'import matplotlib.pyplot as plt\n'), ((47648, 47681), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample"""'], {'fontsize': '(12)'}), "('Sample', fontsize=12)\n", (47658, 47681), True, 'import matplotlib.pyplot as plt\n'), ((47746, 47755), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (47753, 47755), True, 'import matplotlib.pyplot as plt\n'), ((48089, 48188), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['Earthquake', 'P_arrival', 'S_arrival']"], {'fancybox': '(True)', 'shadow': '(True)'}), "(custom_lines, ['Earthquake', 'P_arrival', 'S_arrival'], fancybox\n =True, shadow=True)\n", (48099, 48188), True, 'import matplotlib.pyplot as plt\n'), ((48192, 48207), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (48200, 48207), True, 'import matplotlib.pyplot as plt\n'), ((48490, 48538), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(0.2)', '"""EQTransformer"""'], {'fontdict': 'font'}), "(1, 0.2, 'EQTransformer', fontdict=font)\n", (48498, 48538), True, 'import matplotlib.pyplot as plt\n'), ((48650, 48667), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (48658, 48667), True, 'import matplotlib.pyplot as plt\n'), ((48784, 48798), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (48793, 48798), True, 'import matplotlib.pyplot as plt\n'), ((48807, 48816), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (48814, 48816), True, 'import matplotlib.pyplot as plt\n'), ((48943, 48978), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (48953, 48978), True, 'import matplotlib.pyplot as plt\n'), ((49232, 49257), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', '"""k"""'], {}), "(data[:, 0], 'k')\n", (49240, 49257), True, 'import matplotlib.pyplot as plt\n'), ((49270, 49285), 'numpy.arange', 'np.arange', (['(6000)'], {}), '(6000)\n', (49279, 49285), True, 'import numpy as np\n'), ((49294, 49311), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (49302, 49311), True, 'import matplotlib.pyplot as plt\n'), ((49541, 49572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude\nCounts"""'], {}), "('Amplitude\\nCounts')\n", (49551, 49572), True, 'import matplotlib.pyplot as plt\n'), ((51162, 51187), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 1]', '"""k"""'], {}), "(data[:, 1], 'k')\n", (51170, 51187), True, 'import matplotlib.pyplot as plt\n'), ((51197, 51214), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (51205, 51214), True, 'import matplotlib.pyplot as plt\n'), ((51235, 51266), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude\nCounts"""'], {}), "('Amplitude\\nCounts')\n", (51245, 51266), True, 'import matplotlib.pyplot as plt\n'), ((52669, 52694), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 2]', '"""k"""'], {}), "(data[:, 2], 'k')\n", (52677, 52694), True, 'import matplotlib.pyplot as plt\n'), ((52704, 52721), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (52712, 52721), True, 'import matplotlib.pyplot as plt\n'), ((52750, 52781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude\nCounts"""'], {}), "('Amplitude\\nCounts')\n", (52760, 52781), True, 'import matplotlib.pyplot as plt\n'), ((54235, 54294), 'numpy.linspace', 'np.linspace', (['(0)', 'data.shape[0]', 'data.shape[0]'], {'endpoint': '(True)'}), '(0, data.shape[0], data.shape[0], endpoint=True)\n', (54246, 54294), True, 'import numpy as np\n'), ((54332, 54411), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yh1', '"""--"""'], {'color': '"""g"""', 'alpha': '(0.5)', 'linewidth': '(1.5)', 'label': '"""Earthquake"""'}), "(x, yh1, '--', color='g', alpha=0.5, linewidth=1.5, label='Earthquake')\n", (54340, 54411), True, 'import matplotlib.pyplot as plt\n'), ((54422, 54500), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yh2', '"""--"""'], {'color': '"""b"""', 'alpha': '(0.5)', 'linewidth': '(1.5)', 'label': '"""P_arrival"""'}), "(x, yh2, '--', color='b', alpha=0.5, linewidth=1.5, label='P_arrival')\n", (54430, 54500), True, 'import matplotlib.pyplot as plt\n'), ((54511, 54589), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yh3', '"""--"""'], {'color': '"""r"""', 'alpha': '(0.5)', 'linewidth': '(1.5)', 'label': '"""S_arrival"""'}), "(x, yh3, '--', color='r', alpha=0.5, linewidth=1.5, label='S_arrival')\n", (54519, 54589), True, 'import matplotlib.pyplot as plt\n'), ((54613, 54631), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (54629, 54631), True, 'import matplotlib.pyplot as plt\n'), ((54647, 54668), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1, 1.1)'], {}), '((-0.1, 1.1))\n', (54655, 54668), True, 'import matplotlib.pyplot as plt\n'), ((54678, 54695), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(6000)'], {}), '(0, 6000)\n', (54686, 54695), True, 'import matplotlib.pyplot as plt\n'), ((54748, 54773), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (54758, 54773), True, 'import matplotlib.pyplot as plt\n'), ((54783, 54803), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample"""'], {}), "('Sample')\n", (54793, 54803), True, 'import matplotlib.pyplot as plt\n'), ((54814, 54988), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""', 'bbox_to_anchor': '(0.0, 1.17, 1.0, 0.102)', 'ncol': '(3)', 'mode': '"""expand"""', 'prop': 'legend_properties', 'borderaxespad': '(0.0)', 'fancybox': '(True)', 'shadow': '(True)'}), "(loc='lower center', bbox_to_anchor=(0.0, 1.17, 1.0, 0.102), ncol\n =3, mode='expand', prop=legend_properties, borderaxespad=0.0, fancybox=\n True, shadow=True)\n", (54824, 54988), True, 'import matplotlib.pyplot as plt\n'), ((55062, 55071), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (55069, 55071), True, 'import matplotlib.pyplot as plt\n'), ((55393, 55444), 'matplotlib.pyplot.text', 'plt.text', (['(6500)', '(0.5)', '"""EQTransformer"""'], {'fontdict': 'font'}), "(6500, 0.5, 'EQTransformer', fontdict=font)\n", (55401, 55444), True, 'import matplotlib.pyplot as plt\n'), ((55663, 55677), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (55672, 55677), True, 'import matplotlib.pyplot as plt\n'), ((55686, 55695), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (55693, 55695), True, 'import matplotlib.pyplot as plt\n'), ((6449, 6474), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (6459, 6474), True, 'import tensorflow as tf\n'), ((7900, 7914), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (7904, 7914), False, 'from tensorflow.keras.optimizers import Adam\n'), ((8507, 8529), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (8520, 8529), False, 'import shutil\n'), ((8544, 8564), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (8555, 8564), False, 'import os\n'), ((9389, 9412), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (9402, 9412), False, 'import shutil\n'), ((9471, 9491), 'os.remove', 'os.remove', (['out_probs'], {}), '(out_probs)\n', (9480, 9491), False, 'import os\n'), ((9584, 9606), 'os.makedirs', 'os.makedirs', (['save_figs'], {}), '(save_figs)\n', (9595, 9606), False, 'import os\n'), ((9683, 9708), 'h5py.File', 'h5py.File', (['out_probs', '"""a"""'], {}), "(out_probs, 'a')\n", (9692, 9708), False, 'import h5py\n'), ((9765, 9815), 'os.path.join', 'os.path.join', (['save_dir', '"""X_prediction_results.csv"""'], {}), "(save_dir, 'X_prediction_results.csv')\n", (9777, 9815), False, 'import os\n'), ((11096, 11113), 'platform.system', 'platform.system', ([], {}), '()\n', (11111, 11113), False, 'import platform\n'), ((14281, 14331), 'os.path.join', 'os.path.join', (['save_dir', '"""X_prediction_results.csv"""'], {}), "(save_dir, 'X_prediction_results.csv')\n", (14293, 14331), False, 'import os\n'), ((22911, 22946), 'numpy.max', 'np.max', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (22917, 22946), True, 'import numpy as np\n'), ((25323, 25373), 'datetime.datetime.strptime', 'datetime.strptime', (['start_time', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(start_time, '%Y-%m-%d %H:%M:%S')\n", (25340, 25373), False, 'from datetime import datetime, timedelta\n'), ((25817, 25847), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(match / 100)'}), '(seconds=match / 100)\n', (25826, 25847), False, 'from datetime import datetime, timedelta\n'), ((25875, 25914), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(match_value[0] / 100)'}), '(seconds=match_value[0] / 100)\n', (25884, 25914), False, 'from datetime import datetime, timedelta\n'), ((30828, 30844), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (30841, 30844), True, 'import numpy as np\n'), ((31048, 31059), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (31056, 31059), True, 'import numpy as np\n'), ((32213, 32266), 'numpy.vstack', 'np.vstack', (['[x[ind] - x[ind - 1], x[ind] - x[ind + 1]]'], {}), '([x[ind] - x[ind - 1], x[ind] - x[ind + 1]])\n', (32222, 32266), True, 'import numpy as np\n'), ((35031, 35078), 'numpy.mean', 'np.mean', (['yh1[detection[ev][0]:detection[ev][1]]'], {}), '(yh1[detection[ev][0]:detection[ev][1]])\n', (35038, 35078), True, 'import numpy as np\n'), ((35100, 35119), 'numpy.round', 'np.round', (['D_prob', '(3)'], {}), '(D_prob, 3)\n', (35108, 35119), True, 'import numpy as np\n'), ((39526, 39561), 'numpy.std', 'np.std', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (39532, 39561), True, 'import numpy as np\n'), ((41510, 41527), 'platform.system', 'platform.system', ([], {}), '()\n', (41525, 41527), False, 'import platform\n'), ((42994, 43081), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['E', 'Picked P', 'Picked S']"], {'fancybox': '(True)', 'shadow': '(True)'}), "(custom_lines, ['E', 'Picked P', 'Picked S'], fancybox=True,\n shadow=True)\n", (43004, 43081), True, 'import matplotlib.pyplot as plt\n'), ((43090, 43105), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (43098, 43105), True, 'import matplotlib.pyplot as plt\n'), ((44742, 44829), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['N', 'Picked P', 'Picked S']"], {'fancybox': '(True)', 'shadow': '(True)'}), "(custom_lines, ['N', 'Picked P', 'Picked S'], fancybox=True,\n shadow=True)\n", (44752, 44829), True, 'import matplotlib.pyplot as plt\n'), ((44838, 44853), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (44846, 44853), True, 'import matplotlib.pyplot as plt\n'), ((46545, 46632), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['Z', 'Picked P', 'Picked S']"], {'fancybox': '(True)', 'shadow': '(True)'}), "(custom_lines, ['Z', 'Picked P', 'Picked S'], fancybox=True,\n shadow=True)\n", (46555, 46632), True, 'import matplotlib.pyplot as plt\n'), ((46641, 46656), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (46649, 46656), True, 'import matplotlib.pyplot as plt\n'), ((47702, 47729), 'numpy.arange', 'np.arange', (['(0)', '(1.1)'], {'step': '(0.2)'}), '(0, 1.1, step=0.2)\n', (47711, 47729), True, 'import numpy as np\n'), ((47880, 47929), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'linestyle': '"""--"""', 'color': '"""g"""', 'lw': '(2)'}), "([0], [0], linestyle='--', color='g', lw=2)\n", (47886, 47929), False, 'from matplotlib.lines import Line2D\n'), ((47955, 48004), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'linestyle': '"""--"""', 'color': '"""b"""', 'lw': '(2)'}), "([0], [0], linestyle='--', color='b', lw=2)\n", (47961, 48004), False, 'from matplotlib.lines import Line2D\n'), ((48030, 48079), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'linestyle': '"""--"""', 'color': '"""r"""', 'lw': '(2)'}), "([0], [0], linestyle='--', color='r', lw=2)\n", (48036, 48079), False, 'from matplotlib.lines import Line2D\n'), ((49333, 49350), 'platform.system', 'platform.system', ([], {}), '()\n', (49348, 49350), False, 'import platform\n'), ((50889, 51020), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['E', 'Picked P', 'Picked S']"], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)', 'fancybox': '(True)', 'shadow': '(True)'}), "(custom_lines, ['E', 'Picked P', 'Picked S'], loc='center left',\n bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)\n", (50899, 51020), True, 'import matplotlib.pyplot as plt\n'), ((52416, 52547), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['N', 'Picked P', 'Picked S']"], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)', 'fancybox': '(True)', 'shadow': '(True)'}), "(custom_lines, ['N', 'Picked P', 'Picked S'], loc='center left',\n bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)\n", (52426, 52547), True, 'import matplotlib.pyplot as plt\n'), ((53978, 54109), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['Z', 'Picked P', 'Picked S']"], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)', 'fancybox': '(True)', 'shadow': '(True)'}), "(custom_lines, ['Z', 'Picked P', 'Picked S'], loc='center left',\n bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)\n", (53988, 54109), True, 'import matplotlib.pyplot as plt\n'), ((55018, 55045), 'numpy.arange', 'np.arange', (['(0)', '(1.1)'], {'step': '(0.2)'}), '(0, 1.1, step=0.2)\n', (55027, 55045), True, 'import numpy as np\n'), ((6197, 6238), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(True)'}), '(log_device_placement=True)\n', (6211, 6238), True, 'import tensorflow as tf\n'), ((6916, 6945), 'tqdm.tqdm.write', 'tqdm.write', (['x'], {'file': 'self.file'}), '(x, file=self.file)\n', (6926, 6945), False, 'from tqdm import tqdm\n'), ((8721, 8747), 'os.listdir', 'listdir', (["args['input_dir']"], {}), "(args['input_dir'])\n", (8728, 8747), False, 'from os import listdir\n'), ((8853, 8879), 'os.listdir', 'listdir', (["args['input_dir']"], {}), "(args['input_dir'])\n", (8860, 8879), False, 'from os import listdir\n'), ((11153, 11165), 'os.path.join', 'join', (['st', 'ev'], {}), '(st, ev)\n', (11157, 11165), False, 'from os.path import join\n'), ((11309, 11321), 'os.path.join', 'join', (['st', 'ev'], {}), '(st, ev)\n', (11313, 11321), False, 'from os.path import join\n'), ((14977, 15015), 'os.path.join', 'os.path.join', (['save_dir', '"""X_report.txt"""'], {}), "(save_dir, 'X_report.txt')\n", (14989, 15015), False, 'import os\n'), ((23154, 23189), 'numpy.std', 'np.std', (['data'], {'axis': '(0)', 'keepdims': '(True)'}), '(data, axis=0, keepdims=True)\n', (23160, 23189), True, 'import numpy as np\n'), ((25529, 25570), 'datetime.datetime.strptime', 'datetime.strptime', (['r', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(r, '%Y-%m-%d %H:%M:%S')\n", (25546, 25570), False, 'from datetime import datetime, timedelta\n'), ((25613, 25657), 'datetime.datetime.strptime', 'datetime.strptime', (['r', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(r, '%Y-%m-%d %H:%M:%S.%f')\n", (25630, 25657), False, 'from datetime import datetime, timedelta\n'), ((32297, 32321), 'numpy.where', 'np.where', (['(dx < threshold)'], {}), '(dx < threshold)\n', (32305, 32321), True, 'import numpy as np\n'), ((32432, 32450), 'numpy.argsort', 'np.argsort', (['x[ind]'], {}), '(x[ind])\n', (32442, 32450), True, 'import numpy as np\n'), ((41946, 41974), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 0]'], {}), '(data[:, 0])\n', (41962, 41974), True, 'import numpy as np\n'), ((42365, 42393), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 0]'], {}), '(data[:, 0])\n', (42381, 42393), True, 'import numpy as np\n'), ((42821, 42854), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'lw': '(0)'}), "([0], [0], color='k', lw=0)\n", (42827, 42854), False, 'from matplotlib.lines import Line2D\n'), ((42884, 42917), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""c"""', 'lw': '(2)'}), "([0], [0], color='c', lw=2)\n", (42890, 42917), False, 'from matplotlib.lines import Line2D\n'), ((42947, 42980), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""m"""', 'lw': '(2)'}), "([0], [0], color='m', lw=2)\n", (42953, 42980), False, 'from matplotlib.lines import Line2D\n'), ((43695, 43723), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 1]'], {}), '(data[:, 1])\n', (43711, 43723), True, 'import numpy as np\n'), ((44115, 44143), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 1]'], {}), '(data[:, 1])\n', (44131, 44143), True, 'import numpy as np\n'), ((44569, 44602), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'lw': '(0)'}), "([0], [0], color='k', lw=0)\n", (44575, 44602), False, 'from matplotlib.lines import Line2D\n'), ((44632, 44665), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""c"""', 'lw': '(2)'}), "([0], [0], color='c', lw=2)\n", (44638, 44665), False, 'from matplotlib.lines import Line2D\n'), ((44695, 44728), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""m"""', 'lw': '(2)'}), "([0], [0], color='m', lw=2)\n", (44701, 44728), False, 'from matplotlib.lines import Line2D\n'), ((45477, 45505), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 2]'], {}), '(data[:, 2])\n', (45493, 45505), True, 'import numpy as np\n'), ((45897, 45925), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 2]'], {}), '(data[:, 2])\n', (45913, 45925), True, 'import numpy as np\n'), ((46372, 46405), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'lw': '(0)'}), "([0], [0], color='k', lw=0)\n", (46378, 46405), False, 'from matplotlib.lines import Line2D\n'), ((46435, 46468), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""c"""', 'lw': '(2)'}), "([0], [0], color='c', lw=2)\n", (46441, 46468), False, 'from matplotlib.lines import Line2D\n'), ((46498, 46531), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""m"""', 'lw': '(2)'}), "([0], [0], color='m', lw=2)\n", (46504, 46531), False, 'from matplotlib.lines import Line2D\n'), ((49779, 49807), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 0]'], {}), '(data[:, 0])\n', (49795, 49807), True, 'import numpy as np\n'), ((50198, 50226), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 0]'], {}), '(data[:, 0])\n', (50214, 50226), True, 'import numpy as np\n'), ((50716, 50749), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'lw': '(0)'}), "([0], [0], color='k', lw=0)\n", (50722, 50749), False, 'from matplotlib.lines import Line2D\n'), ((50779, 50812), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""c"""', 'lw': '(2)'}), "([0], [0], color='c', lw=2)\n", (50785, 50812), False, 'from matplotlib.lines import Line2D\n'), ((50842, 50875), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""m"""', 'lw': '(2)'}), "([0], [0], color='m', lw=2)\n", (50848, 50875), False, 'from matplotlib.lines import Line2D\n'), ((51326, 51354), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 1]'], {}), '(data[:, 1])\n', (51342, 51354), True, 'import numpy as np\n'), ((51745, 51773), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 1]'], {}), '(data[:, 1])\n', (51761, 51773), True, 'import numpy as np\n'), ((52243, 52276), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'lw': '(0)'}), "([0], [0], color='k', lw=0)\n", (52249, 52276), False, 'from matplotlib.lines import Line2D\n'), ((52306, 52339), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""c"""', 'lw': '(2)'}), "([0], [0], color='c', lw=2)\n", (52312, 52339), False, 'from matplotlib.lines import Line2D\n'), ((52369, 52402), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""m"""', 'lw': '(2)'}), "([0], [0], color='m', lw=2)\n", (52375, 52402), False, 'from matplotlib.lines import Line2D\n'), ((52869, 52897), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 2]'], {}), '(data[:, 2])\n', (52885, 52897), True, 'import numpy as np\n'), ((53288, 53316), 'numpy.count_nonzero', 'np.count_nonzero', (['data[:, 2]'], {}), '(data[:, 2])\n', (53304, 53316), True, 'import numpy as np\n'), ((53805, 53838), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'lw': '(0)'}), "([0], [0], color='k', lw=0)\n", (53811, 53838), False, 'from matplotlib.lines import Line2D\n'), ((53868, 53901), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""c"""', 'lw': '(2)'}), "([0], [0], color='c', lw=2)\n", (53874, 53901), False, 'from matplotlib.lines import Line2D\n'), ((53931, 53964), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""m"""', 'lw': '(2)'}), "([0], [0], color='m', lw=2)\n", (53937, 53964), False, 'from matplotlib.lines import Line2D\n'), ((11176, 11214), 'os.listdir', 'listdir', (["(args['input_dir'] + '\\\\' + st)"], {}), "(args['input_dir'] + '\\\\' + st)\n", (11183, 11214), False, 'from os import listdir\n'), ((11332, 11369), 'os.listdir', 'listdir', (["(args['input_dir'] + '/' + st)"], {}), "(args['input_dir'] + '/' + st)\n", (11339, 11369), False, 'from os import listdir\n'), ((26195, 26234), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(match_value[3] / 100)'}), '(seconds=match_value[3] / 100)\n', (26204, 26234), False, 'from datetime import datetime, timedelta\n'), ((26480, 26519), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(match_value[6] / 100)'}), '(seconds=match_value[6] / 100)\n', (26489, 26519), False, 'from datetime import datetime, timedelta\n'), ((31131, 31143), 'numpy.isnan', 'np.isnan', (['dx'], {}), '(dx)\n', (31139, 31143), True, 'import numpy as np\n'), ((31780, 31823), 'numpy.hstack', 'np.hstack', (['(indnan, indnan - 1, indnan + 1)'], {}), '((indnan, indnan - 1, indnan + 1))\n', (31789, 31823), True, 'import numpy as np\n'), ((31253, 31271), 'numpy.hstack', 'np.hstack', (['(dx, 0)'], {}), '((dx, 0))\n', (31262, 31271), True, 'import numpy as np\n'), ((31280, 31298), 'numpy.hstack', 'np.hstack', (['(0, dx)'], {}), '((0, dx))\n', (31289, 31298), True, 'import numpy as np\n'), ((15309, 15323), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15321, 15323), False, 'from datetime import datetime, timedelta\n'), ((31393, 31411), 'numpy.hstack', 'np.hstack', (['(dx, 0)'], {}), '((dx, 0))\n', (31402, 31411), True, 'import numpy as np\n'), ((31421, 31439), 'numpy.hstack', 'np.hstack', (['(0, dx)'], {}), '((0, dx))\n', (31430, 31439), True, 'import numpy as np\n'), ((31525, 31543), 'numpy.hstack', 'np.hstack', (['(dx, 0)'], {}), '((dx, 0))\n', (31534, 31543), True, 'import numpy as np\n'), ((31552, 31570), 'numpy.hstack', 'np.hstack', (['(0, dx)'], {}), '((0, dx))\n', (31561, 31570), True, 'import numpy as np\n'), ((28786, 28808), 'numpy.percentile', 'np.percentile', (['sw1', '(95)'], {}), '(sw1, 95)\n', (28799, 28808), True, 'import numpy as np\n'), ((28808, 28830), 'numpy.percentile', 'np.percentile', (['nw1', '(95)'], {}), '(nw1, 95)\n', (28821, 28830), True, 'import numpy as np\n'), ((29109, 29131), 'numpy.percentile', 'np.percentile', (['sw1', '(95)'], {}), '(sw1, 95)\n', (29122, 29131), True, 'import numpy as np\n'), ((29131, 29153), 'numpy.percentile', 'np.percentile', (['nw1', '(95)'], {}), '(nw1, 95)\n', (29144, 29153), True, 'import numpy as np\n'), ((29409, 29431), 'numpy.percentile', 'np.percentile', (['sw1', '(95)'], {}), '(sw1, 95)\n', (29422, 29431), True, 'import numpy as np\n'), ((29431, 29453), 'numpy.percentile', 'np.percentile', (['nw1', '(95)'], {}), '(nw1, 95)\n', (29444, 29453), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
import os
import torch
from torch.autograd import Variable
from skimage import io
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from geotnf.transformation import GeometricTnf
from geotnf.flow import read_flo_file
class TSSDataset(Dataset):
"""
TSS image pair dataset
http://taniai.space/projects/cvpr16_dccs/
Args:
csv_file (string): Path to the csv file with image names and annotation files.
dataset_path (string): Directory with the images.
output_size (2-tuple): Desired output size
transform (callable): Transformation for post-processing the training pair (eg. image normalization)
"""
def __init__(self, csv_file, dataset_path,output_size=(240,240),transform=None):
self.out_h, self.out_w = output_size
self.pairs = pd.read_csv(csv_file)
self.img_A_names = self.pairs.iloc[:,0]
self.img_B_names = self.pairs.iloc[:,1]
self.flow_direction = self.pairs.iloc[:, 2].values.astype('int')
self.flip_img_A = self.pairs.iloc[:, 3].values.astype('int')
self.pair_category = self.pairs.iloc[:, 4].values.astype('int')
self.dataset_path = dataset_path
self.transform = transform
# no cuda as dataset is called from CPU threads in dataloader and produces confilct
self.affineTnf = GeometricTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False)
def __len__(self):
return len(self.pairs)
def __getitem__(self, idx):
# get pre-processed images
flip_img_A = self.flip_img_A[idx]
image_A,im_size_A = self.get_image(self.img_A_names,idx,flip_img_A)
image_B,im_size_B = self.get_image(self.img_B_names,idx)
# get flow output path
flow_path = self.get_GT_flow_relative_path(idx)
sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'flow_path': flow_path}
# # get ground-truth flow
# flow = self.get_GT_flow(idx)
# sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'flow_GT': flow}
if self.transform:
sample = self.transform(sample)
return sample
def get_image(self,img_name_list,idx,flip=False):
img_name = os.path.join(self.dataset_path, img_name_list[idx])
image = io.imread(img_name)
# if grayscale convert to 3-channel image
if image.ndim==2:
image=np.repeat(np.expand_dims(image,2),axis=2,repeats=3)
# flip horizontally if needed
if flip:
image=np.flip(image,1)
# get image size
im_size = np.asarray(image.shape)
# convert to torch Variable
image = np.expand_dims(image.transpose((2,0,1)),0)
image = torch.Tensor(image.astype(np.float32))
image_var = Variable(image,requires_grad=False)
# Resize image using bilinear sampling with identity affine tnf
image = self.affineTnf(image_var).data.squeeze(0)
im_size = torch.Tensor(im_size.astype(np.float32))
return (image, im_size)
def get_GT_flow(self,idx):
img_folder = os.path.dirname(self.img_A_names[idx])
flow_dir = self.flow_direction[idx]
flow_file = 'flow'+str(flow_dir)+'.flo'
flow_file_path = os.path.join(self.dataset_path, img_folder , flow_file)
flow = torch.FloatTensor(read_flo_file(flow_file_path))
return flow
def get_GT_flow_relative_path(self,idx):
img_folder = os.path.dirname(self.img_A_names[idx])
flow_dir = self.flow_direction[idx]
flow_file = 'flow'+str(flow_dir)+'.flo'
flow_file_path = os.path.join(img_folder , flow_file)
return flow_file_path
|
[
"numpy.flip",
"pandas.read_csv",
"torch.autograd.Variable",
"numpy.asarray",
"os.path.dirname",
"numpy.expand_dims",
"geotnf.flow.read_flo_file",
"geotnf.transformation.GeometricTnf",
"os.path.join",
"skimage.io.imread"
] |
[((906, 927), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (917, 927), True, 'import pandas as pd\n'), ((1440, 1504), 'geotnf.transformation.GeometricTnf', 'GeometricTnf', ([], {'out_h': 'self.out_h', 'out_w': 'self.out_w', 'use_cuda': '(False)'}), '(out_h=self.out_h, out_w=self.out_w, use_cuda=False)\n', (1452, 1504), False, 'from geotnf.transformation import GeometricTnf\n'), ((2480, 2531), 'os.path.join', 'os.path.join', (['self.dataset_path', 'img_name_list[idx]'], {}), '(self.dataset_path, img_name_list[idx])\n', (2492, 2531), False, 'import os\n'), ((2548, 2567), 'skimage.io.imread', 'io.imread', (['img_name'], {}), '(img_name)\n', (2557, 2567), False, 'from skimage import io\n'), ((2883, 2906), 'numpy.asarray', 'np.asarray', (['image.shape'], {}), '(image.shape)\n', (2893, 2906), True, 'import numpy as np\n'), ((3086, 3122), 'torch.autograd.Variable', 'Variable', (['image'], {'requires_grad': '(False)'}), '(image, requires_grad=False)\n', (3094, 3122), False, 'from torch.autograd import Variable\n'), ((3427, 3465), 'os.path.dirname', 'os.path.dirname', (['self.img_A_names[idx]'], {}), '(self.img_A_names[idx])\n', (3442, 3465), False, 'import os\n'), ((3583, 3637), 'os.path.join', 'os.path.join', (['self.dataset_path', 'img_folder', 'flow_file'], {}), '(self.dataset_path, img_folder, flow_file)\n', (3595, 3637), False, 'import os\n'), ((3804, 3842), 'os.path.dirname', 'os.path.dirname', (['self.img_A_names[idx]'], {}), '(self.img_A_names[idx])\n', (3819, 3842), False, 'import os\n'), ((3960, 3995), 'os.path.join', 'os.path.join', (['img_folder', 'flow_file'], {}), '(img_folder, flow_file)\n', (3972, 3995), False, 'import os\n'), ((2810, 2827), 'numpy.flip', 'np.flip', (['image', '(1)'], {}), '(image, 1)\n', (2817, 2827), True, 'import numpy as np\n'), ((3681, 3710), 'geotnf.flow.read_flo_file', 'read_flo_file', (['flow_file_path'], {}), '(flow_file_path)\n', (3694, 3710), False, 'from geotnf.flow import read_flo_file\n'), ((2682, 2706), 'numpy.expand_dims', 'np.expand_dims', (['image', '(2)'], {}), '(image, 2)\n', (2696, 2706), True, 'import numpy as np\n')]
|
# ~~~
# This file is part of the PhD-thesis:
#
# "Adaptive Reduced Basis Methods for Multiscale Problems
# and Large-scale PDE-constrained Optimization"
#
# by: <NAME>
#
# https://github.com/TiKeil/Supplementary-Material-for-PhD-thesis
#
# Copyright 2019-2022 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# <NAME> (2022)
# ~~~
import numpy as np
from pymor.discretizers.builtin import discretize_stationary_cg
from pymor.analyticalproblems.functions import ConstantFunction, LincombFunction
from pymor.discretizers.builtin.grids.referenceelements import square
from pymor.operators.constructions import VectorOperator, ComponentProjectionOperator
from pymor.operators.numpy import NumpyMatrixOperator
from pymor.discretizers.builtin.cg import (BoundaryDirichletFunctional, L2ProductFunctionalQ1,
L2ProductP1, L2ProductQ1, InterpolationOperator,
BoundaryL2ProductFunctional)
from pymor.operators.constructions import LincombOperator, ZeroOperator
from pymor.parameters.functionals import ConstantParameterFunctional
from pymor.parameters.base import ParametricObject
from pymor.vectorarrays.numpy import NumpyVectorSpace
from pymor.discretizers.builtin.grids.rect import RectGrid
from pdeopt.model import QuadraticPdeoptStationaryModel
from pdeopt.gridlod_model import FEMGridlodModel, GridlodModel
from gridlod import util
from gridlod.world import World
def _construct_mu_bar(problem):
mu_bar = []
for key, size in sorted(problem.parameter_space.parameters.items()):
range_ = problem.parameter_space.ranges[key]
if range_[0] == 0:
value = 10**(np.log10(range_[1])/2)
else:
value = 10**((np.log10(range_[0]) + np.log10(range_[1]))/2)
for i in range(size):
mu_bar.append(value)
return problem.parameters.parse(mu_bar)
def discretize_gridlod_fem(problem, fine_diameter):
n = int(1/fine_diameter * np.sqrt(2))
assert n % 2 == 0
N = 2
NFine = np.array([n, n])
NWorldCoarse = np.array([N, N])
g = problem.dirichlet_data
dom = problem.domain
assert not dom.has_robin
a = 0 if dom.left == "dirichlet" else 1
b = 0 if dom.right == "dirichlet" else 1
c = 0 if dom.top == "dirichlet" else 1
d = 0 if dom.bottom == "dirichlet" else 1
boundaryConditions = np.array([[a, b], [c, d]])
NCoarseElement = NFine // NWorldCoarse
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
xt = util.tCoordinates(NFine)
NtFine = xt.shape[0]
xp = util.pCoordinates(NFine)
NpFine = xp.shape[0]
# simplify the data structure
if isinstance(problem.diffusion, LincombFunction):
data = [ZeroOperator(NumpyVectorSpace(1), NumpyVectorSpace(NtFine))]
coefficients = [1.]
for (func, coef) in zip(problem.diffusion.functions, problem.diffusion.coefficients):
if isinstance(coef, ParametricObject):
data.append(NumpyMatrixOperator(func(xt)))
coefficients.append(coef)
else:
data[0] += coef * NumpyMatrixOperator(func(xt))
else:
data = [NumpyMatrixOperator(problem.diffusion(xt))]
coefficients = [1.]
data[0] = data[0].assemble()
lhs_data = LincombOperator(data, coefficients)
if isinstance(problem.rhs, LincombFunction):
data = [ZeroOperator(NumpyVectorSpace(1), NumpyVectorSpace(NpFine))]
coefficients = [1.]
for (func, coef) in zip(problem.rhs.functions, problem.rhs.coefficients):
if isinstance(coef, ParametricObject):
data.append(NumpyMatrixOperator(func(xp)))
coefficients.append(coef)
else:
data[0] += coef * NumpyMatrixOperator(func(xp))
else:
data = [NumpyMatrixOperator(problem.rhs(xp))]
coefficients = [1.]
data[0] = data[0].assemble()
rhs_data = LincombOperator(data, coefficients)
fem_with_gridlod = FEMGridlodModel(lhs_data, rhs_data, boundaryConditions, world, g)
return fem_with_gridlod
def discretize_gridlod(problem, fine_diameter, coarse_elements, pool=None, counter=None, save_correctors=True,
store_in_tmp=False, mu_energy_product=None, use_fine_mesh=True, aFine_constructor=None,
print_on_ranks=True, construct_aFine_globally=False):
n = int(1/fine_diameter * np.sqrt(2))
assert n % coarse_elements == 0
N = coarse_elements
coarse_diameter = 1./N * np.sqrt(2) + 1e-8
coarse_pymor_model, coarse_data = discretize_stationary_cg(problem, diameter=coarse_diameter, grid_type=RectGrid,
preassemble=False)
coarse_grid = coarse_data['grid']
assert coarse_grid.num_intervals[0] == N
coarse_pymor_rhs = coarse_pymor_model.rhs
ops, coefs = [], []
for op, coef in zip(coarse_pymor_rhs.operators, coarse_pymor_rhs.coefficients):
if isinstance(op, L2ProductFunctionalQ1):
ops.append(op.with_(dirichlet_clear_dofs=False))
coefs.append(coef)
elif isinstance(op, BoundaryDirichletFunctional):
pass
elif isinstance(op, BoundaryL2ProductFunctional):
pass
else:
assert 0, "this should not happen!"
filtered_coarse_pymor_rhs = LincombOperator(ops, coefs)
NFine = np.array([n, n])
NWorldCoarse = np.array([N, N])
# g = problem.dirichlet_data
g = None
dom = problem.domain
assert not dom.has_robin
a = 0 if dom.left == "dirichlet" else 1
b = 0 if dom.right == "dirichlet" else 1
c = 0 if dom.top == "dirichlet" else 1
d = 0 if dom.bottom == "dirichlet" else 1
boundaryConditions = np.array([[a, b], [c, d]])
assert np.sum(boundaryConditions) == 0, 'The other cases are not tested at the moment!!'
NCoarseElement = NFine // NWorldCoarse
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
middle_coarse_index = np.prod(world.NWorldCoarse) // 2 + world.NWorldCoarse[0] // 2
from gridlod.world import Patch
k = int(np.ceil(np.abs(np.log(np.sqrt(2 * (1.0 / world.NWorldCoarse[0] ** 2))))))
print(f"max fine dofs per patch: {Patch(world, k, middle_coarse_index).len_fine} with k={k}\n")
# assert 0
if use_fine_mesh:
xt = util.tCoordinates(NFine)
NtFine = xt.shape[0]
xp = util.pCoordinates(NFine)
NpFine = xp.shape[0]
if construct_aFine_globally:
# extract data for gridlod model
if isinstance(problem.diffusion, LincombFunction):
data = []
for func in problem.diffusion.functions:
op = NumpyMatrixOperator(func(xt))
data.append(op)
coefs = problem.diffusion.coefficients
else:
data = [NumpyMatrixOperator(problem.diffusion(xt))]
coefs = [1.]
lhs_data = LincombOperator(data, coefs)
else:
lhs_data = None
if isinstance(problem.rhs, LincombFunction):
data = []
for func in problem.rhs.functions:
data.append(NumpyMatrixOperator(func(xp)))
coefs = problem.rhs.coefficients
else:
data = [NumpyMatrixOperator(problem.rhs(xp))]
coefs = [1.]
rhs_data = LincombOperator(data, coefs)
else:
lhs_data, rhs_data = None, None
gridlod_model = GridlodModel(lhs_data, rhs_data, boundaryConditions, world,
g, pool, counter, save_correctors=save_correctors,
coarse_pymor_rhs=filtered_coarse_pymor_rhs,
store_in_tmp=store_in_tmp, use_fine_mesh=use_fine_mesh,
aFine_local_constructor=aFine_constructor,
parameters=problem.parameters,
aFineCoefficients=problem.diffusion.coefficients,
print_on_ranks=print_on_ranks)
if mu_energy_product:
# we have to do this one more time with preassemble=True. it is not too expensive since it is a coarse discretizer
coarse_pymor_model, _ = discretize_stationary_cg(problem, diameter=coarse_diameter,
grid_type=RectGrid,
preassemble=True,
mu_energy_product=mu_energy_product)
coarse_product = coarse_pymor_model.products['energy']
else:
coarse_product = None
return gridlod_model, coarse_grid, coarse_pymor_model, coarse_product, coarse_data['boundary_info']
def discretize_quadratic_pdeopt_with_gridlod(problem, diameter=np.sqrt(2)/200., coarse_elements=2, weights=None,
domain_of_interest=None, desired_temperature=None, mu_for_u_d=None,
mu_for_tikhonov=None, pool=None, counter=None, save_correctors=True,
store_in_tmp=False, coarse_J=False, use_fine_mesh=True,
aFine_constructor=None, u_d=None, print_on_ranks=True):
mu_bar = _construct_mu_bar(problem)
if use_fine_mesh:
primal_fom, data = discretize_stationary_cg(problem, diameter=diameter,
grid_type=RectGrid, mu_energy_product=mu_bar)
gridlod_fom, coarse_grid, coarse_model, coarse_opt_product, coarse_bi = discretize_gridlod(
problem, diameter, coarse_elements, pool, counter, save_correctors,
store_in_tmp=store_in_tmp, mu_energy_product=mu_bar, use_fine_mesh=use_fine_mesh,
aFine_constructor=aFine_constructor, print_on_ranks=print_on_ranks)
coarse_space = coarse_model.solution_space
if use_fine_mesh:
grid = data['grid']
else:
grid = coarse_grid
data = {'grid': coarse_grid}
d = grid.dim
# prepare data functions
domain_of_interest = domain_of_interest or ConstantFunction(1., d)
if u_d is None:
u_desired = ConstantFunction(desired_temperature, d) if desired_temperature is not None else None
if mu_for_u_d is not None:
modifified_mu = mu_for_u_d.copy()
for key in mu_for_u_d.keys():
if len(mu_for_u_d[key]) == 0:
modifified_mu.pop(key)
if use_fine_mesh:
u_d = primal_fom.solve(modifified_mu)
else:
u_d = gridlod_fom.solve(modifified_mu)
else:
assert desired_temperature is not None
u_d = InterpolationOperator(grid, u_desired).as_vector()
if grid.reference_element is square:
L2_OP = L2ProductQ1
else:
L2_OP = L2ProductP1
Restricted_L2_OP_on_coarse = L2_OP(coarse_grid, coarse_bi, dirichlet_clear_rows=False,
coefficient_function=domain_of_interest)
if use_fine_mesh:
coarse_proj = ComponentProjectionOperator(gridlod_fom.CoarseDofsInFine, primal_fom.solution_space,
range_id=coarse_space.id)
Restricted_L2_OP_coarse = ComponentProjectionFromBothSides(Restricted_L2_OP_on_coarse, coarse_proj)
u_d_on_coarse = coarse_proj.apply(u_d)
else:
coarse_proj = None
if coarse_J:
if use_fine_mesh:
Restricted_L2_OP = Restricted_L2_OP_coarse
else:
Restricted_L2_OP = Restricted_L2_OP_on_coarse
else:
Restricted_L2_OP = L2_OP(grid, data['boundary_info'], dirichlet_clear_rows=False,
coefficient_function=domain_of_interest)
coarse_proj = None
l2_u_d_squared = Restricted_L2_OP.apply2(u_d, u_d)[0][0]
constant_part = 0.5 * l2_u_d_squared
# assemble output functional
from pdeopt.theta import build_output_coefficient
if weights is not None:
weight_for_J = weights.pop('sigma_u')
else:
weight_for_J = 1.
state_functional = ConstantParameterFunctional(weight_for_J)
if mu_for_tikhonov:
if mu_for_u_d is not None:
mu_for_tikhonov = mu_for_u_d
else:
assert isinstance(mu_for_tikhonov, dict)
output_coefficient = build_output_coefficient(gridlod_fom.parameters, weights, mu_for_tikhonov,
None, state_functional, constant_part)
output_functional = {}
output_functional['output_coefficient'] = output_coefficient
output_functional['linear_part'] = LincombOperator(
[VectorOperator(Restricted_L2_OP.apply(u_d))],[-state_functional]) # j(.)
output_functional['bilinear_part'] = LincombOperator(
[Restricted_L2_OP],[0.5*state_functional]) # k(.,.)
output_functional['d_u_linear_part'] = LincombOperator(
[VectorOperator(Restricted_L2_OP.apply(u_d))],[-state_functional]) # j(.)
output_functional['d_u_bilinear_part'] = LincombOperator(
[Restricted_L2_OP], [state_functional]) # 2k(.,.)
if use_fine_mesh:
output_functional['linear_part_coarse'] = LincombOperator(
[VectorOperator(Restricted_L2_OP_coarse.apply(u_d))],[-state_functional]) # j(.)
output_functional['bilinear_part_coarse'] = LincombOperator(
[Restricted_L2_OP_coarse],[0.5*state_functional]) # k(.,.)
output_functional['d_u_linear_part_coarse'] = LincombOperator(
[VectorOperator(Restricted_L2_OP_coarse.apply(u_d))],[-state_functional]) # j(.)
output_functional['d_u_bilinear_part_coarse'] = LincombOperator(
[Restricted_L2_OP_coarse], [state_functional]) # 2k(.,.)
output_functional['linear_part_coarse_full'] = LincombOperator(
[VectorOperator(Restricted_L2_OP_on_coarse.apply(u_d_on_coarse))],[-state_functional]) # j(.)
output_functional['bilinear_part_coarse_full'] = LincombOperator(
[Restricted_L2_OP_on_coarse],[0.5*state_functional]) # k(.,.)
output_functional['d_u_linear_part_coarse_full'] = LincombOperator(
[VectorOperator(Restricted_L2_OP_on_coarse.apply(u_d_on_coarse))],[-state_functional]) # j(.)
output_functional['d_u_bilinear_part_coarse_full'] = LincombOperator(
[Restricted_L2_OP_on_coarse], [state_functional]) # 2k(.,.)
output_functional['coarse_opt_product'] = coarse_opt_product
C = domain_of_interest(grid.centers(2)) # <== these are the vertices!
C = np.nonzero(C)[0]
doI = ComponentProjectionOperator(C, Restricted_L2_OP.source)
output_functional['sigma_u'] = state_functional
output_functional['u_d'] = u_d
output_functional['DoI'] = doI
if use_fine_mesh:
opt_product = primal_fom.energy_product # energy w.r.t. mu_bar (see above)
primal_fom = primal_fom.with_(products=dict(opt=opt_product, **primal_fom.products))
else:
primal_fom = None
opt_product = coarse_opt_product
fom = primal_fom or coarse_model
pde_opt_fom = QuadraticPdeoptStationaryModel(fom, output_functional, opt_product=opt_product,
use_corrected_functional=False, adjoint_approach=False,
optional_forward_model=gridlod_fom,
coarse_projection=coarse_proj,
fine_prolongation=None
)
return pde_opt_fom, data, mu_bar
from pymor.operators.constructions import ConcatenationOperator
class ComponentProjectionFromBothSides(ConcatenationOperator):
def __init__(self, operator, comp_proj):
super().__init__([operator, comp_proj])
self.range = comp_proj.source
self.__auto_init(locals())
def apply2(self, V, U, mu=None):
return super().apply2(self.comp_proj.apply(V), U, mu)
def apply_adjoint(self, V, mu=None):
return super().apply_adjoint(self.comp_proj.apply(V), mu)
|
[
"numpy.sum",
"pymor.discretizers.builtin.discretize_stationary_cg",
"pymor.vectorarrays.numpy.NumpyVectorSpace",
"numpy.prod",
"pymor.parameters.functionals.ConstantParameterFunctional",
"pdeopt.gridlod_model.GridlodModel",
"pdeopt.model.QuadraticPdeoptStationaryModel",
"numpy.log10",
"pdeopt.gridlod_model.FEMGridlodModel",
"pymor.analyticalproblems.functions.ConstantFunction",
"gridlod.world.World",
"pymor.operators.constructions.LincombOperator",
"gridlod.world.Patch",
"pdeopt.theta.build_output_coefficient",
"gridlod.util.pCoordinates",
"pymor.discretizers.builtin.cg.InterpolationOperator",
"gridlod.util.tCoordinates",
"numpy.nonzero",
"numpy.array",
"pymor.operators.constructions.ComponentProjectionOperator",
"numpy.sqrt"
] |
[((2150, 2166), 'numpy.array', 'np.array', (['[n, n]'], {}), '([n, n])\n', (2158, 2166), True, 'import numpy as np\n'), ((2186, 2202), 'numpy.array', 'np.array', (['[N, N]'], {}), '([N, N])\n', (2194, 2202), True, 'import numpy as np\n'), ((2493, 2519), 'numpy.array', 'np.array', (['[[a, b], [c, d]]'], {}), '([[a, b], [c, d]])\n', (2501, 2519), True, 'import numpy as np\n'), ((2576, 2631), 'gridlod.world.World', 'World', (['NWorldCoarse', 'NCoarseElement', 'boundaryConditions'], {}), '(NWorldCoarse, NCoarseElement, boundaryConditions)\n', (2581, 2631), False, 'from gridlod.world import World\n'), ((2642, 2666), 'gridlod.util.tCoordinates', 'util.tCoordinates', (['NFine'], {}), '(NFine)\n', (2659, 2666), False, 'from gridlod import util\n'), ((2701, 2725), 'gridlod.util.pCoordinates', 'util.pCoordinates', (['NFine'], {}), '(NFine)\n', (2718, 2725), False, 'from gridlod import util\n'), ((3420, 3455), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['data', 'coefficients'], {}), '(data, coefficients)\n', (3435, 3455), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((4067, 4102), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['data', 'coefficients'], {}), '(data, coefficients)\n', (4082, 4102), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((4127, 4192), 'pdeopt.gridlod_model.FEMGridlodModel', 'FEMGridlodModel', (['lhs_data', 'rhs_data', 'boundaryConditions', 'world', 'g'], {}), '(lhs_data, rhs_data, boundaryConditions, world, g)\n', (4142, 4192), False, 'from pdeopt.gridlod_model import FEMGridlodModel, GridlodModel\n'), ((4711, 4814), 'pymor.discretizers.builtin.discretize_stationary_cg', 'discretize_stationary_cg', (['problem'], {'diameter': 'coarse_diameter', 'grid_type': 'RectGrid', 'preassemble': '(False)'}), '(problem, diameter=coarse_diameter, grid_type=\n RectGrid, preassemble=False)\n', (4735, 4814), False, 'from pymor.discretizers.builtin import discretize_stationary_cg\n'), ((5497, 5524), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['ops', 'coefs'], {}), '(ops, coefs)\n', (5512, 5524), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((5538, 5554), 'numpy.array', 'np.array', (['[n, n]'], {}), '([n, n])\n', (5546, 5554), True, 'import numpy as np\n'), ((5574, 5590), 'numpy.array', 'np.array', (['[N, N]'], {}), '([N, N])\n', (5582, 5590), True, 'import numpy as np\n'), ((5896, 5922), 'numpy.array', 'np.array', (['[[a, b], [c, d]]'], {}), '([[a, b], [c, d]])\n', (5904, 5922), True, 'import numpy as np\n'), ((6072, 6127), 'gridlod.world.World', 'World', (['NWorldCoarse', 'NCoarseElement', 'boundaryConditions'], {}), '(NWorldCoarse, NCoarseElement, boundaryConditions)\n', (6077, 6127), False, 'from gridlod.world import World\n'), ((7639, 8027), 'pdeopt.gridlod_model.GridlodModel', 'GridlodModel', (['lhs_data', 'rhs_data', 'boundaryConditions', 'world', 'g', 'pool', 'counter'], {'save_correctors': 'save_correctors', 'coarse_pymor_rhs': 'filtered_coarse_pymor_rhs', 'store_in_tmp': 'store_in_tmp', 'use_fine_mesh': 'use_fine_mesh', 'aFine_local_constructor': 'aFine_constructor', 'parameters': 'problem.parameters', 'aFineCoefficients': 'problem.diffusion.coefficients', 'print_on_ranks': 'print_on_ranks'}), '(lhs_data, rhs_data, boundaryConditions, world, g, pool,\n counter, save_correctors=save_correctors, coarse_pymor_rhs=\n filtered_coarse_pymor_rhs, store_in_tmp=store_in_tmp, use_fine_mesh=\n use_fine_mesh, aFine_local_constructor=aFine_constructor, parameters=\n problem.parameters, aFineCoefficients=problem.diffusion.coefficients,\n print_on_ranks=print_on_ranks)\n', (7651, 8027), False, 'from pdeopt.gridlod_model import FEMGridlodModel, GridlodModel\n'), ((12379, 12420), 'pymor.parameters.functionals.ConstantParameterFunctional', 'ConstantParameterFunctional', (['weight_for_J'], {}), '(weight_for_J)\n', (12406, 12420), False, 'from pymor.parameters.functionals import ConstantParameterFunctional\n'), ((12614, 12731), 'pdeopt.theta.build_output_coefficient', 'build_output_coefficient', (['gridlod_fom.parameters', 'weights', 'mu_for_tikhonov', 'None', 'state_functional', 'constant_part'], {}), '(gridlod_fom.parameters, weights, mu_for_tikhonov,\n None, state_functional, constant_part)\n', (12638, 12731), False, 'from pdeopt.theta import build_output_coefficient\n'), ((13056, 13117), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['[Restricted_L2_OP]', '[0.5 * state_functional]'], {}), '([Restricted_L2_OP], [0.5 * state_functional])\n', (13071, 13117), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((13354, 13409), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['[Restricted_L2_OP]', '[state_functional]'], {}), '([Restricted_L2_OP], [state_functional])\n', (13369, 13409), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((15088, 15143), 'pymor.operators.constructions.ComponentProjectionOperator', 'ComponentProjectionOperator', (['C', 'Restricted_L2_OP.source'], {}), '(C, Restricted_L2_OP.source)\n', (15115, 15143), False, 'from pymor.operators.constructions import VectorOperator, ComponentProjectionOperator\n'), ((15631, 15870), 'pdeopt.model.QuadraticPdeoptStationaryModel', 'QuadraticPdeoptStationaryModel', (['fom', 'output_functional'], {'opt_product': 'opt_product', 'use_corrected_functional': '(False)', 'adjoint_approach': '(False)', 'optional_forward_model': 'gridlod_fom', 'coarse_projection': 'coarse_proj', 'fine_prolongation': 'None'}), '(fom, output_functional, opt_product=\n opt_product, use_corrected_functional=False, adjoint_approach=False,\n optional_forward_model=gridlod_fom, coarse_projection=coarse_proj,\n fine_prolongation=None)\n', (15661, 15870), False, 'from pdeopt.model import QuadraticPdeoptStationaryModel\n'), ((5934, 5960), 'numpy.sum', 'np.sum', (['boundaryConditions'], {}), '(boundaryConditions)\n', (5940, 5960), True, 'import numpy as np\n'), ((6492, 6516), 'gridlod.util.tCoordinates', 'util.tCoordinates', (['NFine'], {}), '(NFine)\n', (6509, 6516), False, 'from gridlod import util\n'), ((6559, 6583), 'gridlod.util.pCoordinates', 'util.pCoordinates', (['NFine'], {}), '(NFine)\n', (6576, 6583), False, 'from gridlod import util\n'), ((7539, 7567), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['data', 'coefs'], {}), '(data, coefs)\n', (7554, 7567), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((8418, 8557), 'pymor.discretizers.builtin.discretize_stationary_cg', 'discretize_stationary_cg', (['problem'], {'diameter': 'coarse_diameter', 'grid_type': 'RectGrid', 'preassemble': '(True)', 'mu_energy_product': 'mu_energy_product'}), '(problem, diameter=coarse_diameter, grid_type=\n RectGrid, preassemble=True, mu_energy_product=mu_energy_product)\n', (8442, 8557), False, 'from pymor.discretizers.builtin import discretize_stationary_cg\n'), ((9027, 9037), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9034, 9037), True, 'import numpy as np\n'), ((9596, 9698), 'pymor.discretizers.builtin.discretize_stationary_cg', 'discretize_stationary_cg', (['problem'], {'diameter': 'diameter', 'grid_type': 'RectGrid', 'mu_energy_product': 'mu_bar'}), '(problem, diameter=diameter, grid_type=RectGrid,\n mu_energy_product=mu_bar)\n', (9620, 9698), False, 'from pymor.discretizers.builtin import discretize_stationary_cg\n'), ((10352, 10376), 'pymor.analyticalproblems.functions.ConstantFunction', 'ConstantFunction', (['(1.0)', 'd'], {}), '(1.0, d)\n', (10368, 10376), False, 'from pymor.analyticalproblems.functions import ConstantFunction, LincombFunction\n'), ((11330, 11445), 'pymor.operators.constructions.ComponentProjectionOperator', 'ComponentProjectionOperator', (['gridlod_fom.CoarseDofsInFine', 'primal_fom.solution_space'], {'range_id': 'coarse_space.id'}), '(gridlod_fom.CoarseDofsInFine, primal_fom.\n solution_space, range_id=coarse_space.id)\n', (11357, 11445), False, 'from pymor.operators.constructions import VectorOperator, ComponentProjectionOperator\n'), ((13701, 13769), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['[Restricted_L2_OP_coarse]', '[0.5 * state_functional]'], {}), '([Restricted_L2_OP_coarse], [0.5 * state_functional])\n', (13716, 13769), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((14043, 14105), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['[Restricted_L2_OP_coarse]', '[state_functional]'], {}), '([Restricted_L2_OP_coarse], [state_functional])\n', (14058, 14105), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((14402, 14473), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['[Restricted_L2_OP_on_coarse]', '[0.5 * state_functional]'], {}), '([Restricted_L2_OP_on_coarse], [0.5 * state_functional])\n', (14417, 14473), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((14780, 14845), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['[Restricted_L2_OP_on_coarse]', '[state_functional]'], {}), '([Restricted_L2_OP_on_coarse], [state_functional])\n', (14795, 14845), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((15061, 15074), 'numpy.nonzero', 'np.nonzero', (['C'], {}), '(C)\n', (15071, 15074), True, 'import numpy as np\n'), ((2093, 2103), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2100, 2103), True, 'import numpy as np\n'), ((4553, 4563), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4560, 4563), True, 'import numpy as np\n'), ((4655, 4665), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4662, 4665), True, 'import numpy as np\n'), ((6155, 6182), 'numpy.prod', 'np.prod', (['world.NWorldCoarse'], {}), '(world.NWorldCoarse)\n', (6162, 6182), True, 'import numpy as np\n'), ((7125, 7153), 'pymor.operators.constructions.LincombOperator', 'LincombOperator', (['data', 'coefs'], {}), '(data, coefs)\n', (7140, 7153), False, 'from pymor.operators.constructions import LincombOperator, ZeroOperator\n'), ((10417, 10457), 'pymor.analyticalproblems.functions.ConstantFunction', 'ConstantFunction', (['desired_temperature', 'd'], {}), '(desired_temperature, d)\n', (10433, 10457), False, 'from pymor.analyticalproblems.functions import ConstantFunction, LincombFunction\n'), ((2870, 2889), 'pymor.vectorarrays.numpy.NumpyVectorSpace', 'NumpyVectorSpace', (['(1)'], {}), '(1)\n', (2886, 2889), False, 'from pymor.vectorarrays.numpy import NumpyVectorSpace\n'), ((2891, 2915), 'pymor.vectorarrays.numpy.NumpyVectorSpace', 'NumpyVectorSpace', (['NtFine'], {}), '(NtFine)\n', (2907, 2915), False, 'from pymor.vectorarrays.numpy import NumpyVectorSpace\n'), ((3535, 3554), 'pymor.vectorarrays.numpy.NumpyVectorSpace', 'NumpyVectorSpace', (['(1)'], {}), '(1)\n', (3551, 3554), False, 'from pymor.vectorarrays.numpy import NumpyVectorSpace\n'), ((3556, 3580), 'pymor.vectorarrays.numpy.NumpyVectorSpace', 'NumpyVectorSpace', (['NpFine'], {}), '(NpFine)\n', (3572, 3580), False, 'from pymor.vectorarrays.numpy import NumpyVectorSpace\n'), ((1794, 1813), 'numpy.log10', 'np.log10', (['range_[1]'], {}), '(range_[1])\n', (1802, 1813), True, 'import numpy as np\n'), ((6287, 6334), 'numpy.sqrt', 'np.sqrt', (['(2 * (1.0 / world.NWorldCoarse[0] ** 2))'], {}), '(2 * (1.0 / world.NWorldCoarse[0] ** 2))\n', (6294, 6334), True, 'import numpy as np\n'), ((6380, 6416), 'gridlod.world.Patch', 'Patch', (['world', 'k', 'middle_coarse_index'], {}), '(world, k, middle_coarse_index)\n', (6385, 6416), False, 'from gridlod.world import Patch\n'), ((10955, 10993), 'pymor.discretizers.builtin.cg.InterpolationOperator', 'InterpolationOperator', (['grid', 'u_desired'], {}), '(grid, u_desired)\n', (10976, 10993), False, 'from pymor.discretizers.builtin.cg import BoundaryDirichletFunctional, L2ProductFunctionalQ1, L2ProductP1, L2ProductQ1, InterpolationOperator, BoundaryL2ProductFunctional\n'), ((1857, 1876), 'numpy.log10', 'np.log10', (['range_[0]'], {}), '(range_[0])\n', (1865, 1876), True, 'import numpy as np\n'), ((1879, 1898), 'numpy.log10', 'np.log10', (['range_[1]'], {}), '(range_[1])\n', (1887, 1898), True, 'import numpy as np\n')]
|
# Code by <NAME>
# TODO
# [X] - Generate a random sample with mean = 5, std. dev. = 2.
# [X] - Plot the distribution.
# [X] - Give the summary statistics
import numpy as np
import matplotlib.pyplot as plt
import random
# Input number of samples
numberSamples = int(input("Enter number of samples in the sample list: "))
# Generate sample list
sampleList = [random.normalvariate(5, 2) for x in range(numberSamples)]
# Printing details
print('\nGenerated sample list containing {} elements: '.format(numberSamples))
print(sampleList)
print('\n\nCalculated Mean = {:.3f}\nRounded Calculated Mean = {}\n\nCalculated Std. Deviation = {:.3f}\nRounded Calculated Std. Deviation = {}'.format(
np.mean(sampleList), round(np.mean(sampleList)), np.std(sampleList), round(np.std(sampleList))
)
)
plt.hist(sampleList)
plt.show()
# Reference:
# https://stackoverflow.com/questions/51515423/generate-sample-data-with-an-exact-mean-and-standard-deviation
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"random.normalvariate",
"numpy.std",
"numpy.mean"
] |
[((800, 820), 'matplotlib.pyplot.hist', 'plt.hist', (['sampleList'], {}), '(sampleList)\n', (808, 820), True, 'import matplotlib.pyplot as plt\n'), ((821, 831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (829, 831), True, 'import matplotlib.pyplot as plt\n'), ((362, 388), 'random.normalvariate', 'random.normalvariate', (['(5)', '(2)'], {}), '(5, 2)\n', (382, 388), False, 'import random\n'), ((695, 714), 'numpy.mean', 'np.mean', (['sampleList'], {}), '(sampleList)\n', (702, 714), True, 'import numpy as np\n'), ((744, 762), 'numpy.std', 'np.std', (['sampleList'], {}), '(sampleList)\n', (750, 762), True, 'import numpy as np\n'), ((722, 741), 'numpy.mean', 'np.mean', (['sampleList'], {}), '(sampleList)\n', (729, 741), True, 'import numpy as np\n'), ((770, 788), 'numpy.std', 'np.std', (['sampleList'], {}), '(sampleList)\n', (776, 788), True, 'import numpy as np\n')]
|
import cv2
import glob
import numpy as np
np.set_printoptions(threshold=np.nan)
images = []
images_name = images_name = glob.glob('sample/*')
for index, image in enumerate(images_name):
print('Reading ' + str(index) + ' of ' + str(len(images_name)))
img = cv2.imread(image)
img = cv2.resize(img, (1024, 768), interpolation=cv2.INTER_AREA)
height, width, depth = img.shape
norm = img.copy()
b, g, r = cv2.split(img)
sum = b+g+r
norm[:, :, 0] = b/sum*255.0
norm[:, :, 1] = g/sum*255.0
norm[:, :, 2] = r/sum*255.0
# if(index == len(images_name) - 1):
cv2.imshow(image, norm)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"numpy.set_printoptions",
"cv2.waitKey",
"cv2.imshow",
"cv2.imread",
"cv2.split",
"glob.glob",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((43, 80), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (62, 80), True, 'import numpy as np\n'), ((121, 142), 'glob.glob', 'glob.glob', (['"""sample/*"""'], {}), "('sample/*')\n", (130, 142), False, 'import glob\n'), ((625, 639), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (636, 639), False, 'import cv2\n'), ((640, 663), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (661, 663), False, 'import cv2\n'), ((266, 283), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (276, 283), False, 'import cv2\n'), ((294, 352), 'cv2.resize', 'cv2.resize', (['img', '(1024, 768)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (1024, 768), interpolation=cv2.INTER_AREA)\n', (304, 352), False, 'import cv2\n'), ((428, 442), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (437, 442), False, 'import cv2\n'), ((600, 623), 'cv2.imshow', 'cv2.imshow', (['image', 'norm'], {}), '(image, norm)\n', (610, 623), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
from datetime import datetime
import os
import re
import h5py
from lib.pb_io import attrs2dict
from lib.pb_sat import planck_r2t
from lib.read_base import ReadL1
import numpy as np
import pandas as pd
__description__ = 'MERSI传感器读取类'
__author__ = 'wangpeng'
__date__ = '2018-08-28'
__version__ = '1.0.0_beat'
g_main_path, g_main_file = os.path.split(os.path.realpath(__file__))
class ReadMersiL1(ReadL1):
"""
读取 MERSI 传感器的 L1 数据
分辨率:1000m
卫星: [FY3A FY3B FY3C]
通道数量:20
可见光通道:1,2,3,4,6~20
红外通道:5
分辨率:1000m
卫星: [FY3D]
通道数量:25
可见光通道:1~20
红外通道:20~25
分辨率:250
卫星:
通道数量:
可见光通道:
红外通道:
"""
def __init__(self, in_file, geo_file=None, cloud_file=None, in_ir_file=None, in_vis_file=None, coef_txt_flag=None):
sensor = 'MERSI'
self.in_ir_file = in_ir_file
self.in_vis_file = in_vis_file
self.coef_txt_flag = coef_txt_flag
super(ReadMersiL1, self).__init__(in_file, sensor)
self.geo_file = geo_file
self.cloud_file = cloud_file
def set_resolution(self):
"""
use filename set self.resolution
:return:
"""
file_name = os.path.basename(self.in_file)
if '1000M' in file_name:
self.resolution = 1000
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
def set_satellite(self):
"""
use filename set self.satellite
:return:
"""
file_name = os.path.basename(self.in_file)
pattern = r'([A-Z0-9]+)_%s.*' % self.sensor
m = re.match(pattern, file_name)
if m:
self.satellite = m.groups()[0]
else:
raise ValueError('Cant get the satellite name from file name.')
def set_ymd_hms(self):
"""
use filename set self.ymd self.hms
"""
file_name = os.path.basename(self.in_file)
pat = '\w{4}_\w{5}_\w{4}_L1_(\d{8})_(\d{4})_\w{5}_MS.HDF$'
g = re.match(pat, file_name)
if g:
self.ymd = g.group(1)
self.hms = g.group(2) + '00'
else:
raise ValueError('Cant get the ymdhms from file name.')
def set_file_attr(self):
"""
get hdf5 file attrs self.file_attr
:return:
"""
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C', 'FY3D']
if self.satellite in satellite_type1:
with h5py.File(self.in_file, 'r') as h5r:
self.file_attr = attrs2dict(h5r.attrs)
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
"Cant handle this resolution: ".format(self.resolution))
def set_data_shape(self):
"""
use dataset set self.data_shape
:return:
"""
# 如果分辨率是 1000 米
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C', 'FY3D']
if self.satellite in satellite_type1:
self.data_shape = (2000, 2048)
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# elif self.resolution == 250:
else:
raise ValueError(
"Cant handle this resolution: ".format(self.resolution))
def set_channels(self):
"""
return sensor channels
"""
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
satellite_type2 = ['FY3D']
if self.satellite in satellite_type1:
self.channels = 20
elif self.satellite in satellite_type2:
self.channels = 25
# elif self.resolution == 250:
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
def __get_geo_file(self):
"""
return 定位文件
"""
if self.geo_file is not None:
return self.geo_file
else:
if self.resolution == 1000:
satellite_type1 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
geo_file = self.in_file[:-12] + 'GEO1K_MS.HDF'
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
"Cant handle this resolution: ".format(self.resolution))
return geo_file
def __get_clm_file(self):
"""
return 定位文件
"""
if self.cloud_file is not None:
return self.cloud_file
else:
if self.resolution == 1000:
satellite_type1 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
clm_file = self.in_file.replace(
'GBAL_L1', 'ORBT_L2_CLM_MLT_NUL')
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
"Cant handle this resolution: ".format(self.resolution))
return clm_file
def get_cloudmask(self):
data = None
clm_flag = np.full(self.data_shape, -999)
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C', 'FY3D']
if self.satellite in satellite_type1:
in_file = self.__get_clm_file()
with h5py.File(in_file, 'r') as h5r:
data_pre = h5r.get('Cloud_Mask')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
z = data_pre[0, :, :]
# 0 表示无效值
z0 = z & 0b1
z12 = (z >> 1) & 0b11
z4 = (z >> 4) & 0b1
z67 = (z >> 6) & 0b11
# Invalid 0
mask = (z == 0)
idx = np.where(mask)
clm_flag[idx] = 0
# Coastlines
mask = (z67 == 0b01)
idx = np.where(mask)
clm_flag[idx] = 1
# Uncertain
mask = (z12 == 0b01) & (z0 == 0b1)
idx = np.where(mask)
clm_flag[idx] = 2
# Cloud
mask = (z12 == 0b00) & (z0 == 0b1)
idx = np.where(mask)
clm_flag[idx] = 3
# Poss Land Clear
mask = ((z67 == 0b11) | (z67 == 0b10)) & (
z12 == 0b10) & (z0 == 0b1)
idx = np.where(mask)
clm_flag[idx] = 4
# Land Clear
mask = ((z67 == 0b11) | (z67 == 0b10)) & (
z12 == 0b11) & (z0 == 0b1)
idx = np.where(mask)
clm_flag[idx] = 5
# Poss Sea Clear
mask = (z67 == 0b00) & (z12 == 0b10) & (z0 == 0b1)
idx = np.where(mask)
clm_flag[idx] = 6
# Sea Clear
mask = (z67 == 0b00) & (z12 == 0b11) & (z4 == 0b1) & (z0 == 0b1)
idx = np.where(mask)
clm_flag[idx] = 7
# Sun Glint
mask = (z67 == 0b00) & (z12 == 0b11) & (z4 == 0b0) & (z0 == 0b1)
idx = np.where(mask)
clm_flag[idx] = 8
data = clm_flag
return data
def get_dn(self):
"""
return DN
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
satellite_type3 = ['FY3D']
if self.satellite in satellite_type1:
data_file = self.in_file
with h5py.File(data_file, 'r') as h5r:
ary_ch1 = h5r.get('/EV_250_Aggr.1KM_RefSB')[:]
ary_ch5 = h5r.get('/EV_250_Aggr.1KM_Emissive')[:]
ary_ch6 = h5r.get('/EV_1KM_RefSB')[:]
vmin = 0
vmax = 10000
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i < 4:
k = i
data_pre = ary_ch1[k]
# 开始处理
elif i == 4:
data_pre = ary_ch5
else:
k = i - 5
data_pre = ary_ch6[k]
data_pre = data_pre.astype(np.float32)
invalid_index = np.logical_or(
data_pre <= vmin, data_pre > vmax)
data_pre[invalid_index] = np.nan
data[band] = data_pre
elif self.satellite in satellite_type2:
data_file = self.in_file
with h5py.File(data_file, 'r') as h5r:
ary_ch1 = h5r.get('/Data/EV_250_Aggr.1KM_RefSB')[:]
ary_ch5 = h5r.get('/Data/EV_250_Aggr.1KM_Emissive')[:]
ary_ch6 = h5r.get('/Data/EV_1KM_RefSB')[:]
vmin = 0
vmax = 10000
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i < 4:
k = i
data_pre = ary_ch1[k]
# 开始处理
elif i == 4:
data_pre = ary_ch5
else:
k = i - 5
data_pre = ary_ch6[k]
data_pre = data_pre.astype(np.float32)
invalid_index = np.logical_or(
data_pre <= vmin, data_pre > vmax)
data_pre[invalid_index] = np.nan
data[band] = data_pre
elif self.satellite in satellite_type3:
data_file = self.in_file
with h5py.File(data_file, 'r') as h5r:
ary_ch1_4 = h5r.get('/Data/EV_250_Aggr.1KM_RefSB')[:]
ary_ch5_19 = h5r.get('/Data/EV_1KM_RefSB')[:]
ary_ch20_23 = h5r.get('/Data/EV_1KM_Emissive')[:]
ary_ch24_25 = h5r.get('/Data/EV_250_Aggr.1KM_Emissive')[:]
vmin = 0
vmax = 65000
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i < 4:
k = i
data_pre = ary_ch1_4[k]
# 开始处理
elif i >= 4 and i < 19:
k = i - 4
data_pre = ary_ch5_19[k]
elif i >= 19 and i < 23:
k = i - 19
data_pre = ary_ch20_23[k]
else:
k = i - 23
data_pre = ary_ch24_25[k]
data_pre = data_pre.astype(np.float32)
invalid_index = np.logical_or(
data_pre <= vmin, data_pre > vmax)
data_pre[invalid_index] = np.nan
data[band] = data_pre
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_k0_from_txt(self):
k0_vis = k0_ir = None
if self.in_vis_file is not None:
k0_k1_vis_df = pd.read_table(self.in_vis_file, sep='\t')
k0_vis = k0_k1_vis_df.iloc[:, [0, 1]].to_numpy()
if self.in_ir_file is not None:
k0_k1_ir_df = pd.read_table(self.in_ir_file, sep='\t')
k0_ir = k0_k1_ir_df.iloc[:, [0, 1]].to_numpy()
return k0_vis, k0_ir
def get_k1_from_txt(self):
k1_vis = k1_ir = None
if self.in_vis_file is not None:
k0_k1_vis_df = pd.read_table(self.in_vis_file, sep='\t')
k1_vis = k0_k1_vis_df.iloc[:, [0, 2]].to_numpy()
if self.in_ir_file is not None:
k0_k1_ir_df = pd.read_table(self.in_ir_file, sep='\t')
k1_ir = k0_k1_ir_df.iloc[:, [0, 2]].to_numpy()
return k1_vis, k1_ir
def get_k2_from_txt(self):
k2_vis = k2_ir = None
if self.in_vis_file is not None:
k0_k1_vis_df = pd.read_table(self.in_vis_file, sep='\t')
k2_vis = k0_k1_vis_df.iloc[:, [0, 3]].to_numpy()
if self.in_ir_file is not None:
k0_k1_ir_df = pd.read_table(self.in_ir_file, sep='\t')
k2_ir = k0_k1_ir_df.iloc[:, [0, 3]].to_numpy()
return k2_vis, k2_ir
def get_k0(self):
"""
return K0
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
satellite_type3 = ['FY3D']
# FY3AB
if self.satellite in satellite_type1:
# vis_k, 54 = 19*3 (19通道 3个系数)
ary_vis_coeff = self.file_attr['VIR_Cal_Coeff']
K = np.full((19, 3), 0.)
for i in range(19):
for j in range(3):
K[i, j] = ary_vis_coeff[i * 3 + j]
# 变成20*3 k0,k1,k2
values = np.array([0, 1, 0])
K = np.insert(K, 4, values, 0)
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
# k0
channel_data = np.full(
self.data_shape, K[i, 0], dtype=np.float32)
data[band] = channel_data
# FY3C
elif self.satellite in satellite_type2:
with h5py.File(self.in_file, 'r') as h5r:
ary_vis_coeff = h5r.get('/Calibration/VIS_Cal_Coeff')[:]
# 19*3 变成20*3 红外通道给定值不影响原dn值
values = np.array([0, 1, 0])
K = np.insert(ary_vis_coeff, 4, values, 0)
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
# k0
channel_data = np.full(
self.data_shape, K[i, 0], dtype=np.float32)
data[band] = channel_data
# FY3D
elif self.satellite in satellite_type3:
with h5py.File(self.in_file, 'r') as h5r:
ary_ir_coeff = h5r.get('/Calibration/IR_Cal_Coeff')[:]
ary_vis_coeff = h5r.get('/Calibration/VIS_Cal_Coeff')[:]
# 转维度
s = self.data_shape
ary_vis_coeff1 = np.repeat(
ary_vis_coeff[:, 0], s[0] * s[1])
ary_ir_coeff1 = np.repeat(
ary_ir_coeff[:, 0, :], 10 * s[1], axis=1)
# 转维度 19*2000*2048,6*2000*2048
ary_vis_coeff2 = ary_vis_coeff1.reshape(
(-1,) + self.data_shape)
ary_ir_coeff2 = ary_ir_coeff1.reshape(
(-1,) + self.data_shape)
# 逐个通道处理
s = self.data_shape
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i < 19:
k = i
data[band] = ary_vis_coeff2[k]
else:
k = i - 19
data[band] = ary_ir_coeff2[k]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
if self.coef_txt_flag: # 在这处理,达到的效果是,如果有某些通道不需要重新定标也可以处理
k0_vis, k0_ir = self.get_k0_from_txt()
if k0_vis is not None:
for channel_name, k0 in k0_vis:
if channel_name in data:
data[channel_name][:] = k0
if k0_ir is not None:
for channel_name, k0 in k0_vis:
if channel_name in data:
data[channel_name][:] = k0
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_k1(self):
"""
return K1
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
satellite_type3 = ['FY3D']
# FY3AB
if self.satellite in satellite_type1:
# vis_k, 54 = 19*3 (19通道 3个系数)
ary_vis_coeff = self.file_attr['VIR_Cal_Coeff']
K = np.full((19, 3), 0.)
for i in range(19):
for j in range(3):
K[i, j] = ary_vis_coeff[i * 3 + j]
# 变成20*3 k0,k1,k2
values = np.array([0, 1, 0])
K = np.insert(K, 4, values, 0)
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
# k0
channel_data = np.full(
self.data_shape, K[i, 1], dtype=np.float32)
data[band] = channel_data
# FY3C
elif self.satellite in satellite_type2:
with h5py.File(self.in_file, 'r') as h5r:
ary_vis_coeff = h5r.get('/Calibration/VIS_Cal_Coeff')[:]
# 19*3 变成20*3 红外通道给定值不影响原dn值
values = np.array([0, 1, 0])
K = np.insert(ary_vis_coeff, 4, values, 0)
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
# k0
channel_data = np.full(
self.data_shape, K[i, 1], dtype=np.float32)
data[band] = channel_data
# FY3D
elif self.satellite in satellite_type3:
with h5py.File(self.in_file, 'r') as h5r:
ary_ir_coeff = h5r.get('/Calibration/IR_Cal_Coeff')[:]
ary_vis_coeff = h5r.get('/Calibration/VIS_Cal_Coeff')[:]
# 转维度
s = self.data_shape
ary_vis_coeff1 = np.repeat(
ary_vis_coeff[:, 1], s[0] * s[1])
ary_ir_coeff1 = np.repeat(
ary_ir_coeff[:, 1, :], 10 * s[1], axis=1)
# 转维度 19*2000*2048,6*2000*2048
ary_vis_coeff2 = ary_vis_coeff1.reshape(
(-1,) + self.data_shape)
ary_ir_coeff2 = ary_ir_coeff1.reshape(
(-1,) + self.data_shape)
# 逐个通道处理
s = self.data_shape
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i < 19:
k = i
data[band] = ary_vis_coeff2[k]
else:
k = i - 19
data[band] = ary_ir_coeff2[k]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
if self.coef_txt_flag: # 在这处理,达到的效果是,如果有某些通道不需要重新定标也可以处理
k1_vis, k1_ir = self.get_k1_from_txt()
if k1_vis is not None:
for channel_name, k1 in k1_vis:
if channel_name in data:
data[channel_name][:] = k1
if k1_ir is not None:
for channel_name, k1 in k1_vis:
if channel_name in data:
data[channel_name][:] = k1
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_k2(self):
"""
return K2
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
satellite_type3 = ['FY3D']
# FY3AB
if self.satellite in satellite_type1:
# vis_k, 54 = 19*3 (19通道 3个系数)
ary_vis_coeff = self.file_attr['VIR_Cal_Coeff']
K = np.full((19, 3), 0.)
for i in range(19):
for j in range(3):
K[i, j] = ary_vis_coeff[i * 3 + j]
# 变成20*3 k0,k1,k2
values = np.array([0, 1, 0])
K = np.insert(K, 4, values, 0)
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
# k0
channel_data = np.full(
self.data_shape, K[i, 2], dtype=np.float32)
data[band] = channel_data
# FY3C
elif self.satellite in satellite_type2:
with h5py.File(self.in_file, 'r') as h5r:
ary_vis_coeff = h5r.get('/Calibration/VIS_Cal_Coeff')[:]
# 19*3 变成20*3 红外通道给定值不影响原dn值
values = np.array([0, 1, 0])
K = np.insert(ary_vis_coeff, 4, values, 0)
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
# k0
channel_data = np.full(
self.data_shape, K[i, 2], dtype=np.float32)
data[band] = channel_data
# FY3D
elif self.satellite in satellite_type3:
with h5py.File(self.in_file, 'r') as h5r:
ary_ir_coeff = h5r.get('/Calibration/IR_Cal_Coeff')[:]
ary_vis_coeff = h5r.get('/Calibration/VIS_Cal_Coeff')[:]
# 转维度
s = self.data_shape
ary_vis_coeff1 = np.repeat(
ary_vis_coeff[:, 2], s[0] * s[1])
ary_ir_coeff1 = np.repeat(
ary_ir_coeff[:, 2, :], 10 * s[1], axis=1)
# 转维度 19*2000*2048,6*2000*2048
ary_vis_coeff2 = ary_vis_coeff1.reshape(
(-1,) + self.data_shape)
ary_ir_coeff2 = ary_ir_coeff1.reshape(
(-1,) + self.data_shape)
# 逐个通道处理
s = self.data_shape
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i < 19:
k = i
data[band] = ary_vis_coeff2[k]
else:
k = i - 19
data[band] = ary_ir_coeff2[k]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
if self.coef_txt_flag: # 在这处理,达到的效果是,如果有某些通道不需要重新定标也可以处理
k2_vis, k2_ir = self.get_k2_from_txt()
if k2_vis is not None:
for channel_name, k2 in k2_vis:
if channel_name in data:
data[channel_name][:] = k2
if k2_ir is not None:
for channel_name, k2 in k2_vis:
if channel_name in data:
data[channel_name][:] = k2
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_k3(self):
pass
def get_ref(self):
"""
return Ref
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
satellite_type2 = ['FY3D']
# FY3A/B/C
if self.satellite in satellite_type1:
dn = self.get_dn()
k0 = self.get_k0()
k1 = self.get_k1()
k2 = self.get_k2()
if 'FY3B' in self.satellite:
if int(self.ymd + self.hms) <= 20130306001500:
scales = 100.
else:
scales = 10000.
else:
scales = 100.
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if 'CH_05' in band:
continue
channel_data = dn[band] ** 2 * k2[band] + dn[band] * \
k1[band] + k0[band]
pre_data = channel_data / scales
idx = np.where(pre_data < 0.)
if len(idx[0] > 0):
pre_data[idx] = np.nan
data[band] = pre_data
# FY3D
elif self.satellite in satellite_type2:
dn = self.get_dn()
k0 = self.get_k0()
k1 = self.get_k1()
k2 = self.get_k2()
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i < 19:
pre_data = dn[band] ** 2 * k2[band] + dn[band] * \
k1[band] + k0[band]
idx = np.where(pre_data < 0.)
if len(idx[0] > 0):
pre_data[idx] = np.nan
data[band] = pre_data / 100.
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_rad(self):
"""
return rad
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
satellite_type2 = ['FY3D']
if self.satellite in satellite_type1:
dn = self.get_dn()
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if 'CH_05' in band:
data[band] = dn[band] / 100.
elif self.satellite in satellite_type2:
dn = self.get_dn()
with h5py.File(self.in_file, 'r') as h5r:
ary_a1 = h5r.get('/Data/EV_1KM_Emissive').attrs['Slope']
ary_b1 = h5r.get(
'/Data/EV_1KM_Emissive').attrs['Intercept']
ary_a2 = h5r.get(
'/Data/EV_250_Aggr.1KM_Emissive').attrs['Slope']
ary_b2 = h5r.get(
'/Data/EV_250_Aggr.1KM_Emissive').attrs['Intercept']
a = np.concatenate((ary_a1, ary_a2))
b = np.concatenate((ary_b1, ary_b2))
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if i >= 19:
k = i - 19
data[band] = dn[band] * a[k] + b[k]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_tbb_k1(self):
"""
return tbb_k1 dict one value
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
satellite_type2 = ['FY3D']
if self.satellite in satellite_type1:
data['CH_05'] = 1
elif self.satellite in satellite_type2:
data['CH_20'] = 1.00103
data['CH_21'] = 1.00085
data['CH_22'] = 1.00125
data['CH_23'] = 1.00030
data['CH_24'] = 1.00133
data['CH_25'] = 1.00065
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_tbb_k0(self):
"""
return tbb_k0 dict one value
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
satellite_type2 = ['FY3D']
if self.satellite in satellite_type1:
data['CH_05'] = 0
elif self.satellite in satellite_type2:
data['CH_20'] = -0.4759
data['CH_21'] = -0.3139
data['CH_22'] = -0.2662
data['CH_23'] = -0.0513
data['CH_24'] = -0.0734
data['CH_25'] = 0.0875
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_tbb(self):
"""
return tbb
"""
data = dict()
if self.resolution == 1000: # 分辨率为 1000
satellite_type1 = ['FY3A', 'FY3B', 'FY3C', 'FY3D']
if self.satellite in satellite_type1:
# rad转tbb的修正系数,所有时次都是固定值
tbb_k0 = self.get_tbb_k0()
tbb_k1 = self.get_tbb_k1()
rads = self.get_rad()
central_wave_numbers = self.get_central_wave_number()
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
if band in list(rads.keys()):
k0 = tbb_k0[band]
k1 = tbb_k1[band]
central_wave_number = central_wave_numbers[band]
rad = rads[band]
tbb = planck_r2t(rad, central_wave_number)
data[band] = tbb * k1 + k0
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_sv(self):
"""
return sv
"""
data = dict()
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
try:
data_pre = h5r.get('/SV_DN_average')[:]
# 过滤无效值
invalid_index = np.logical_or(
data_pre <= 0, data_pre > 4095)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
channel_data = np.full(
self.data_shape, np.nan, dtype=np.float32)
channel_data[:] = data_pre[i, :].reshape(-1, 1)
data[band] = channel_data
except Exception as e:
print(str(e))
elif self.satellite in satellite_type2:
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get(
'/Calibration/SV_DN_average')[:]
# 过滤无效值
invalid_index = np.logical_or(data_pre <= 0, data_pre > 4095)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
s0 = data_pre.shape[1]
print(data_pre.shape)
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
channel_data = np.full(
self.data_shape, np.nan, dtype=np.float32)
# 把200 转成2000
if s0 == 200:
data_pre_new = np.repeat(data_pre[i, :], 10)
elif s0 == 2000:
data_pre_new = data_pre[i, :]
else:
raise ValueError(
'Cant read this satellite`s dataset sv .: {}'.format(self.satellite))
channel_data[:] = data_pre_new.reshape(-1, 1)
data[band] = channel_data
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_bb(self):
"""
return bb
"""
data = dict()
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
try:
data_pre = h5r.get('/BB_DN_average')[:]
# 过滤无效值
invalid_index = np.logical_or(
data_pre <= 0, data_pre > 4095)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
channel_data = np.full(
self.data_shape, np.nan, dtype=np.float32)
channel_data[:] = data_pre[i, :].reshape(-1, 1)
data[band] = channel_data
except Exception as e:
print(str(e))
elif self.satellite in satellite_type2:
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get(
'/Calibration/BB_DN_average')[:]
# 过滤无效值
invalid_index = np.logical_or(data_pre <= 0, data_pre > 4095)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
# 逐个通道处理
s0 = data_pre.shape[1]
# 逐个通道处理
for i in range(self.channels):
band = 'CH_{:02d}'.format(i + 1)
channel_data = np.full(
self.data_shape, np.nan, dtype=np.float32)
# 把200 转成2000
if s0 == 200:
data_pre_new = np.repeat(data_pre[i, :], 10)
elif s0 == 2000:
data_pre_new = data_pre[i, :]
else:
raise ValueError(
'Cant read this satellite`s dataset bb .: {}'.format(self.satellite))
channel_data[:] = data_pre_new.reshape(-1, 1)
data[band] = channel_data
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_longitude(self):
"""
return longitude
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/Longitude')[:]
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/Longitude')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < -180, data_pre > 180)
data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre
return data
def get_latitude(self):
"""
return latitude
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/Latitude')[:]
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/Latitude')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < -90, data_pre > 90)
data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre
return data
def get_land_sea_mask(self):
"""
return land_sea_mask
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/LandSeaMask')[:]
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/LandSeaMask')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < 0, data_pre > 7)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre
return data
def get_height(self):
"""
return land_sea_mask
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/DEM')[:]
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/DEM')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < -400, data_pre > 10000)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre
return data
def get_land_cover(self):
"""
return land_cover
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/LandCover')[:]
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/LandCover')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < 0, data_pre > 17)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre
return data
def get_sensor_azimuth(self):
"""
return sensor_azimuth
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/SensorAzimuth')[:]
vmin = -18000
vmax = 18000
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get(
'/Geolocation/SensorAzimuth')[:]
if 'FY3D' in self.satellite:
vmin = 0
vmax = 36000
else:
vmin = -18000
vmax = 18000
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < vmin, data_pre > vmax)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre / 100.
return data
def get_sensor_zenith(self):
"""
return sensor_zenith
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/SensorZenith')[:]
vmin = 0
vmax = 18000
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/SensorZenith')[:]
vmin = 0
vmax = 18000
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < vmin, data_pre > vmax)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre / 100.
return data
def get_solar_azimuth(self):
"""
return solar_azimuth
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/SolarAzimuth')[:]
vmin = -18000
vmax = 18000
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/SolarAzimuth')[:]
if 'FY3D' in self.satellite:
vmin = 0
vmax = 36000
else:
vmin = -18000
vmax = 18000
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < vmin, data_pre > vmax)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre / 100.
return data
def get_solar_zenith(self):
"""
return solar_zenith
"""
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/SolarZenith')[:]
vmin = 0
vmax = 18000
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/SolarZenith')[:]
vmin = 0
vmax = 18000
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < vmin, data_pre > vmax)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre / 100.
return data
def get_hight(self):
"""
return hight
"""
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
# s = self.data_shape # FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/DEM')[:]
vmin = -400
vmax = 10000
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Geolocation/DEM')[:]
vmin = -400
vmax = 10000
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
# invalid_index = np.logical_or(data_pre < vmin, data_pre > vmax)
# data_pre = data_pre.astype(np.float32)
# data_pre[invalid_index] = np.nan
data = data_pre
return data
def get_day_night_flag(self):
"""
Nadir Day(0) Night(1) or Mix(2) Flag
return day_night_flag
"""
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
return
elif self.satellite in satellite_type2:
geo_file = self.__get_geo_file()
with h5py.File(geo_file, 'r') as h5r:
data_pre = h5r.get('/Timedata/DayNightFlag')[:]
vmin = 0
vmax = 2
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < vmin, data_pre > vmax)
data_pre = data_pre.astype(np.float)
data_pre[invalid_index] = np.nan
data = data_pre
return data
def get_mirror_side(self):
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C', 'FY3D']
if self.satellite in satellite_type1:
return
elif self.satellite in satellite_type2:
with h5py.File(self.in_file, 'r') as h5r:
data_pre = h5r.get('/Calibration/Kmirror_Side')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
data = data_pre
return data
def get_timestamp(self):
"""
return from 1970-01-01 00:00:00 seconds
"""
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C', 'FY3D']
if self.satellite in satellite_type1:
seconds_of_file = 300 # 一个时次持续 300 秒
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
file_date = datetime.strptime(self.ymd + self.hms, '%Y%m%d%H%M%S')
timestamp = (
file_date - datetime(1970, 1, 1, 0, 0, 0)).total_seconds()
row_length = self.data_shape[0]
delta = np.linspace(0, seconds_of_file - 1, row_length)
data = np.full(self.data_shape, np.nan, dtype=np.float64)
data[:] = (delta + timestamp).reshape(-1, 1)
data = data.astype(np.int32)
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_central_wave_number(self):
"""
return 中心波数
central_wave_number
wn(cm-1) = 10 ^ 7 / wave_length(nm)
"""
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
satellite_type2 = ['FY3D']
if self.satellite in satellite_type1:
data = {'CH_05': 869.565}
elif self.satellite in satellite_type2:
data = {'CH_20': 2634.359, 'CH_21': 2471.654, 'CH_22':
1382.621, 'CH_23': 1168.182, 'CH_24': 933.364, 'CH_25': 836.941}
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_spectral_response(self):
"""
return 光谱波数和响应值,两个字典
"""
data1 = dict()
data2 = dict()
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C', 'FY3D']
if self.satellite in satellite_type1:
dtype = {
'names': ('wave_length', 'response'), 'formats': ('f4', 'f4')}
for i in range(self.channels):
k = i + 1
band = "CH_{:02d}".format(k)
file_name = '{}_{}_SRF_CH{:02d}_Pub.txt'.format(
self.satellite, self.sensor, k)
data_file = os.path.join(g_main_path, 'SRF', file_name)
if not os.path.isfile(data_file):
continue
datas = np.loadtxt(data_file, dtype=dtype)
# 波长转波数
wave_length = datas['wave_length'][::-1]
wave_number = 10 ** 7 / wave_length
# 响应
response = datas['response'][::-1]
data1[band] = wave_number
data2[band] = response
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data1, data2
if __name__ == '__main__':
# L1File = 'D:/data/MERSI/FY3A_MERSI_GBAL_L1_20141230_1145_1000M_MS.HDF'
# L1File = 'D:/data/MERSI/FY3B_MERSI_GBAL_L1_20130101_0005_1000M_MS.HDF'
L1File = 'D:/data/MERSI/FY3D_MERSI_GBAL_L1_20181001_0020_1000M_MS.HDF'
L1File = 'D:/data/MERSI/FY3D_MERSI_GBAL_L1_20190825_1755_1000M_MS.HDF'
mersi = ReadMersiL1(L1File)
print(mersi.satellite) # 卫星名
print(mersi.sensor) # 传感器名
print(mersi.ymd) # L1 文件年月日 YYYYMMDD
print(mersi.hms) # L1 文件时分秒 HHMMSS
print(mersi.resolution) # 分辨率
print(mersi.channels) # 通道数量
print(mersi.data_shape)
print(type(mersi.file_attr))
def print_data_status(datas, name=None):
data_shape = datas.shape
data_min = np.nanmin(datas)
data_max = np.nanmax(datas)
data_mean = np.nanmean(datas)
data_median = np.nanmedian(datas)
print("{}: shape: {}, min: {}, max: {}, mean: {}, median: {}".format(
name, data_shape, data_min, data_max, data_mean, data_median))
def print_channel_data(datas):
if not isinstance(datas, dict):
return
keys = list(datas.keys())
keys.sort()
for t_channel_name in keys:
channel_data = datas[t_channel_name]
print_data_status(channel_data, name=t_channel_name)
# print('cloud mask')
# t_data = mersi.get_cloudmask()
# print('dn:')
# t_data = mersi.get_dn()
# print_channel_data(t_data)
# print('k0:')
# t_data = mersi.get_k0()
# print_channel_data(t_data)
# print('k1:')
# t_data = mersi.get_k1()
# print_channel_data(t_data)
# print('k2:')
# t_data = mersi.get_k2()
# print_channel_data(t_data)
# print('ref:')
# t_data = mersi.get_ref()
# print_channel_data(t_data)
#
# print('rad:')
# t_data = mersi.get_rad()
# print_channel_data(t_data)
#
# print('tbb:')
# t_data = mersi.get_tbb()
# print_channel_data(t_data)
# print(t_data['CH_24'][1000, 1000])
#
# print('sv:')
# t_data = mersi.get_sv()
# print_channel_data(t_data)
#
# print('bb:')
# t_data = mersi.get_bb()
# print_channel_data(t_data)
#
# print('longitude:')
# t_data = mersi.get_longitude()
# print_data_status(t_data)
#
# print('latitude:')
# t_data = mersi.get_latitude()
# print_data_status(t_data)
#
# print('land_sea_mask:')
# t_data = mersi.get_land_sea_mask()
# print_data_status(t_data)
#
# print('land_cover:')
# t_data = mersi.get_land_cover()
# print_data_status(t_data)
#
# print('sensor_azimuth:')
# t_data = mersi.get_sensor_azimuth()
# print_data_status(t_data)
# print('sensor_zenith:')
# t_data = mersi.get_sensor_zenith()
# print_data_status(t_data)
# print('solar_azimuth:')
# t_data = mersi.get_solar_azimuth()
# print_data_status(t_data)
# print('solar_zenith:')
# t_data = mersi.get_solar_zenith()
# print_data_status(t_data)
# print('timestamp:')
# t_data = mersi.get_timestamp()
# print_data_status(t_data)
#
# print('get_spectral_response:')
# wavenums, wave_spec = mersi.get_spectral_response()
# print_channel_data(wavenums)
# print_channel_data(wave_spec)
print('ref:')
t_data = mersi.get_ref()
for key in sorted(t_data.keys()):
print("%s, %0.6f %0.6f" % (key, np.nanmin(t_data[key]), np.nanmax(t_data[key])))
print('rad:')
t_data = mersi.get_rad()
for key in sorted(t_data.keys()):
print("%s, %0.6f %0.6f" % (key, np.nanmin(t_data[key]), np.nanmax(t_data[key])))
print('tbb:')
t_data = mersi.get_tbb()
for key in sorted(t_data.keys()):
print("%s, %0.6f %0.6f" % (key, np.nanmin(t_data[key]), np.nanmax(t_data[key])))
|
[
"numpy.nanmedian",
"os.path.isfile",
"pandas.read_table",
"os.path.join",
"numpy.nanmean",
"numpy.full",
"numpy.insert",
"numpy.loadtxt",
"numpy.linspace",
"numpy.repeat",
"h5py.File",
"os.path.basename",
"os.path.realpath",
"re.match",
"datetime.datetime",
"datetime.datetime.strptime",
"numpy.nanmax",
"numpy.concatenate",
"numpy.nanmin",
"numpy.where",
"numpy.array",
"numpy.logical_or",
"lib.pb_sat.planck_r2t",
"lib.pb_io.attrs2dict"
] |
[((378, 404), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (394, 404), False, 'import os\n'), ((1211, 1241), 'os.path.basename', 'os.path.basename', (['self.in_file'], {}), '(self.in_file)\n', (1227, 1241), False, 'import os\n'), ((1578, 1608), 'os.path.basename', 'os.path.basename', (['self.in_file'], {}), '(self.in_file)\n', (1594, 1608), False, 'import os\n'), ((1673, 1701), 're.match', 're.match', (['pattern', 'file_name'], {}), '(pattern, file_name)\n', (1681, 1701), False, 'import re\n'), ((1965, 1995), 'os.path.basename', 'os.path.basename', (['self.in_file'], {}), '(self.in_file)\n', (1981, 1995), False, 'import os\n'), ((2075, 2099), 're.match', 're.match', (['pat', 'file_name'], {}), '(pat, file_name)\n', (2083, 2099), False, 'import re\n'), ((5557, 5587), 'numpy.full', 'np.full', (['self.data_shape', '(-999)'], {}), '(self.data_shape, -999)\n', (5564, 5587), True, 'import numpy as np\n'), ((55000, 55016), 'numpy.nanmin', 'np.nanmin', (['datas'], {}), '(datas)\n', (55009, 55016), True, 'import numpy as np\n'), ((55036, 55052), 'numpy.nanmax', 'np.nanmax', (['datas'], {}), '(datas)\n', (55045, 55052), True, 'import numpy as np\n'), ((55073, 55090), 'numpy.nanmean', 'np.nanmean', (['datas'], {}), '(datas)\n', (55083, 55090), True, 'import numpy as np\n'), ((55113, 55132), 'numpy.nanmedian', 'np.nanmedian', (['datas'], {}), '(datas)\n', (55125, 55132), True, 'import numpy as np\n'), ((6305, 6319), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (6313, 6319), True, 'import numpy as np\n'), ((6427, 6441), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (6435, 6441), True, 'import numpy as np\n'), ((6562, 6576), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (6570, 6576), True, 'import numpy as np\n'), ((6693, 6707), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (6701, 6707), True, 'import numpy as np\n'), ((6889, 6903), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (6897, 6903), True, 'import numpy as np\n'), ((7080, 7094), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (7088, 7094), True, 'import numpy as np\n'), ((7236, 7250), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (7244, 7250), True, 'import numpy as np\n'), ((7401, 7415), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (7409, 7415), True, 'import numpy as np\n'), ((7566, 7580), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (7574, 7580), True, 'import numpy as np\n'), ((12208, 12249), 'pandas.read_table', 'pd.read_table', (['self.in_vis_file'], {'sep': '"""\t"""'}), "(self.in_vis_file, sep='\\t')\n", (12221, 12249), True, 'import pandas as pd\n'), ((12378, 12418), 'pandas.read_table', 'pd.read_table', (['self.in_ir_file'], {'sep': '"""\t"""'}), "(self.in_ir_file, sep='\\t')\n", (12391, 12418), True, 'import pandas as pd\n'), ((12638, 12679), 'pandas.read_table', 'pd.read_table', (['self.in_vis_file'], {'sep': '"""\t"""'}), "(self.in_vis_file, sep='\\t')\n", (12651, 12679), True, 'import pandas as pd\n'), ((12808, 12848), 'pandas.read_table', 'pd.read_table', (['self.in_ir_file'], {'sep': '"""\t"""'}), "(self.in_ir_file, sep='\\t')\n", (12821, 12848), True, 'import pandas as pd\n'), ((13068, 13109), 'pandas.read_table', 'pd.read_table', (['self.in_vis_file'], {'sep': '"""\t"""'}), "(self.in_vis_file, sep='\\t')\n", (13081, 13109), True, 'import pandas as pd\n'), ((13238, 13278), 'pandas.read_table', 'pd.read_table', (['self.in_ir_file'], {'sep': '"""\t"""'}), "(self.in_ir_file, sep='\\t')\n", (13251, 13278), True, 'import pandas as pd\n'), ((49987, 50034), 'numpy.logical_or', 'np.logical_or', (['(data_pre < vmin)', '(data_pre > vmax)'], {}), '(data_pre < vmin, data_pre > vmax)\n', (50000, 50034), True, 'import numpy as np\n'), ((51287, 51341), 'datetime.datetime.strptime', 'datetime.strptime', (['(self.ymd + self.hms)', '"""%Y%m%d%H%M%S"""'], {}), "(self.ymd + self.hms, '%Y%m%d%H%M%S')\n", (51304, 51341), False, 'from datetime import datetime\n'), ((51511, 51558), 'numpy.linspace', 'np.linspace', (['(0)', '(seconds_of_file - 1)', 'row_length'], {}), '(0, seconds_of_file - 1, row_length)\n', (51522, 51558), True, 'import numpy as np\n'), ((51578, 51628), 'numpy.full', 'np.full', (['self.data_shape', 'np.nan'], {'dtype': 'np.float64'}), '(self.data_shape, np.nan, dtype=np.float64)\n', (51585, 51628), True, 'import numpy as np\n'), ((13833, 13854), 'numpy.full', 'np.full', (['(19, 3)', '(0.0)'], {}), '((19, 3), 0.0)\n', (13840, 13854), True, 'import numpy as np\n'), ((14048, 14067), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (14056, 14067), True, 'import numpy as np\n'), ((14088, 14114), 'numpy.insert', 'np.insert', (['K', '(4)', 'values', '(0)'], {}), '(K, 4, values, 0)\n', (14097, 14114), True, 'import numpy as np\n'), ((17632, 17653), 'numpy.full', 'np.full', (['(19, 3)', '(0.0)'], {}), '((19, 3), 0.0)\n', (17639, 17653), True, 'import numpy as np\n'), ((17847, 17866), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (17855, 17866), True, 'import numpy as np\n'), ((17887, 17913), 'numpy.insert', 'np.insert', (['K', '(4)', 'values', '(0)'], {}), '(K, 4, values, 0)\n', (17896, 17913), True, 'import numpy as np\n'), ((21431, 21452), 'numpy.full', 'np.full', (['(19, 3)', '(0.0)'], {}), '((19, 3), 0.0)\n', (21438, 21452), True, 'import numpy as np\n'), ((21646, 21665), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (21654, 21665), True, 'import numpy as np\n'), ((21686, 21712), 'numpy.insert', 'np.insert', (['K', '(4)', 'values', '(0)'], {}), '(K, 4, values, 0)\n', (21695, 21712), True, 'import numpy as np\n'), ((2555, 2583), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (2564, 2583), False, 'import h5py\n'), ((2629, 2650), 'lib.pb_io.attrs2dict', 'attrs2dict', (['h5r.attrs'], {}), '(h5r.attrs)\n', (2639, 2650), False, 'from lib.pb_io import attrs2dict\n'), ((5806, 5829), 'h5py.File', 'h5py.File', (['in_file', '"""r"""'], {}), "(in_file, 'r')\n", (5815, 5829), False, 'import h5py\n'), ((8034, 8059), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (8043, 8059), False, 'import h5py\n'), ((14300, 14351), 'numpy.full', 'np.full', (['self.data_shape', 'K[i, 0]'], {'dtype': 'np.float32'}), '(self.data_shape, K[i, 0], dtype=np.float32)\n', (14307, 14351), True, 'import numpy as np\n'), ((14700, 14719), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (14708, 14719), True, 'import numpy as np\n'), ((14740, 14778), 'numpy.insert', 'np.insert', (['ary_vis_coeff', '(4)', 'values', '(0)'], {}), '(ary_vis_coeff, 4, values, 0)\n', (14749, 14778), True, 'import numpy as np\n'), ((18099, 18150), 'numpy.full', 'np.full', (['self.data_shape', 'K[i, 1]'], {'dtype': 'np.float32'}), '(self.data_shape, K[i, 1], dtype=np.float32)\n', (18106, 18150), True, 'import numpy as np\n'), ((18499, 18518), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (18507, 18518), True, 'import numpy as np\n'), ((18539, 18577), 'numpy.insert', 'np.insert', (['ary_vis_coeff', '(4)', 'values', '(0)'], {}), '(ary_vis_coeff, 4, values, 0)\n', (18548, 18577), True, 'import numpy as np\n'), ((21898, 21949), 'numpy.full', 'np.full', (['self.data_shape', 'K[i, 2]'], {'dtype': 'np.float32'}), '(self.data_shape, K[i, 2], dtype=np.float32)\n', (21905, 21949), True, 'import numpy as np\n'), ((22298, 22317), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (22306, 22317), True, 'import numpy as np\n'), ((22338, 22376), 'numpy.insert', 'np.insert', (['ary_vis_coeff', '(4)', 'values', '(0)'], {}), '(ary_vis_coeff, 4, values, 0)\n', (22347, 22376), True, 'import numpy as np\n'), ((25930, 25954), 'numpy.where', 'np.where', (['(pre_data < 0.0)'], {}), '(pre_data < 0.0)\n', (25938, 25954), True, 'import numpy as np\n'), ((32316, 32344), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (32325, 32344), False, 'import h5py\n'), ((33463, 33508), 'numpy.logical_or', 'np.logical_or', (['(data_pre <= 0)', '(data_pre > 4095)'], {}), '(data_pre <= 0, data_pre > 4095)\n', (33476, 33508), True, 'import numpy as np\n'), ((35095, 35123), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (35104, 35123), False, 'import h5py\n'), ((36240, 36285), 'numpy.logical_or', 'np.logical_or', (['(data_pre <= 0)', '(data_pre > 4095)'], {}), '(data_pre <= 0, data_pre > 4095)\n', (36253, 36285), True, 'import numpy as np\n'), ((37869, 37897), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (37878, 37897), False, 'import h5py\n'), ((38944, 38972), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (38953, 38972), False, 'import h5py\n'), ((40025, 40053), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (40034, 40053), False, 'import h5py\n'), ((41104, 41132), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (41113, 41132), False, 'import h5py\n'), ((42175, 42203), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (42184, 42203), False, 'import h5py\n'), ((43260, 43288), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (43269, 43288), False, 'import h5py\n'), ((44644, 44672), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (44653, 44672), False, 'import h5py\n'), ((45854, 45882), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (45863, 45882), False, 'import h5py\n'), ((47211, 47239), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (47220, 47239), False, 'import h5py\n'), ((48384, 48412), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (48393, 48412), False, 'import h5py\n'), ((53440, 53483), 'os.path.join', 'os.path.join', (['g_main_path', '"""SRF"""', 'file_name'], {}), "(g_main_path, 'SRF', file_name)\n", (53452, 53483), False, 'import os\n'), ((53600, 53634), 'numpy.loadtxt', 'np.loadtxt', (['data_file'], {'dtype': 'dtype'}), '(data_file, dtype=dtype)\n', (53610, 53634), True, 'import numpy as np\n'), ((57684, 57706), 'numpy.nanmin', 'np.nanmin', (['t_data[key]'], {}), '(t_data[key])\n', (57693, 57706), True, 'import numpy as np\n'), ((57708, 57730), 'numpy.nanmax', 'np.nanmax', (['t_data[key]'], {}), '(t_data[key])\n', (57717, 57730), True, 'import numpy as np\n'), ((57859, 57881), 'numpy.nanmin', 'np.nanmin', (['t_data[key]'], {}), '(t_data[key])\n', (57868, 57881), True, 'import numpy as np\n'), ((57883, 57905), 'numpy.nanmax', 'np.nanmax', (['t_data[key]'], {}), '(t_data[key])\n', (57892, 57905), True, 'import numpy as np\n'), ((58034, 58056), 'numpy.nanmin', 'np.nanmin', (['t_data[key]'], {}), '(t_data[key])\n', (58043, 58056), True, 'import numpy as np\n'), ((58058, 58080), 'numpy.nanmax', 'np.nanmax', (['t_data[key]'], {}), '(t_data[key])\n', (58067, 58080), True, 'import numpy as np\n'), ((8923, 8971), 'numpy.logical_or', 'np.logical_or', (['(data_pre <= vmin)', '(data_pre > vmax)'], {}), '(data_pre <= vmin, data_pre > vmax)\n', (8936, 8971), True, 'import numpy as np\n'), ((9219, 9244), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (9228, 9244), False, 'import h5py\n'), ((14515, 14543), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (14524, 14543), False, 'import h5py\n'), ((14964, 15015), 'numpy.full', 'np.full', (['self.data_shape', 'K[i, 0]'], {'dtype': 'np.float32'}), '(self.data_shape, K[i, 0], dtype=np.float32)\n', (14971, 15015), True, 'import numpy as np\n'), ((18314, 18342), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (18323, 18342), False, 'import h5py\n'), ((18763, 18814), 'numpy.full', 'np.full', (['self.data_shape', 'K[i, 1]'], {'dtype': 'np.float32'}), '(self.data_shape, K[i, 1], dtype=np.float32)\n', (18770, 18814), True, 'import numpy as np\n'), ((22113, 22141), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (22122, 22141), False, 'import h5py\n'), ((22562, 22613), 'numpy.full', 'np.full', (['self.data_shape', 'K[i, 2]'], {'dtype': 'np.float32'}), '(self.data_shape, K[i, 2], dtype=np.float32)\n', (22569, 22613), True, 'import numpy as np\n'), ((27722, 27750), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (27731, 27750), False, 'import h5py\n'), ((28194, 28226), 'numpy.concatenate', 'np.concatenate', (['(ary_a1, ary_a2)'], {}), '((ary_a1, ary_a2))\n', (28208, 28226), True, 'import numpy as np\n'), ((28251, 28283), 'numpy.concatenate', 'np.concatenate', (['(ary_b1, ary_b2)'], {}), '((ary_b1, ary_b2))\n', (28265, 28283), True, 'import numpy as np\n'), ((31567, 31603), 'lib.pb_sat.planck_r2t', 'planck_r2t', (['rad', 'central_wave_number'], {}), '(rad, central_wave_number)\n', (31577, 31603), False, 'from lib.pb_sat import planck_r2t\n'), ((32515, 32560), 'numpy.logical_or', 'np.logical_or', (['(data_pre <= 0)', '(data_pre > 4095)'], {}), '(data_pre <= 0, data_pre > 4095)\n', (32528, 32560), True, 'import numpy as np\n'), ((33272, 33300), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (33281, 33300), False, 'import h5py\n'), ((33851, 33901), 'numpy.full', 'np.full', (['self.data_shape', 'np.nan'], {'dtype': 'np.float32'}), '(self.data_shape, np.nan, dtype=np.float32)\n', (33858, 33901), True, 'import numpy as np\n'), ((35293, 35338), 'numpy.logical_or', 'np.logical_or', (['(data_pre <= 0)', '(data_pre > 4095)'], {}), '(data_pre <= 0, data_pre > 4095)\n', (35306, 35338), True, 'import numpy as np\n'), ((36049, 36077), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (36058, 36077), False, 'import h5py\n'), ((36614, 36664), 'numpy.full', 'np.full', (['self.data_shape', 'np.nan'], {'dtype': 'np.float32'}), '(self.data_shape, np.nan, dtype=np.float32)\n', (36621, 36664), True, 'import numpy as np\n'), ((38084, 38108), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (38093, 38108), False, 'import h5py\n'), ((39158, 39182), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (39167, 39182), False, 'import h5py\n'), ((40242, 40266), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (40251, 40266), False, 'import h5py\n'), ((41313, 41337), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (41322, 41337), False, 'import h5py\n'), ((42390, 42414), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (42399, 42414), False, 'import h5py\n'), ((43538, 43562), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (43547, 43562), False, 'import h5py\n'), ((44917, 44941), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (44926, 44941), False, 'import h5py\n'), ((46133, 46157), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (46142, 46157), False, 'import h5py\n'), ((47483, 47507), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (47492, 47507), False, 'import h5py\n'), ((48651, 48675), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (48660, 48675), False, 'import h5py\n'), ((49650, 49674), 'h5py.File', 'h5py.File', (['geo_file', '"""r"""'], {}), "(geo_file, 'r')\n", (49659, 49674), False, 'import h5py\n'), ((50510, 50538), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (50519, 50538), False, 'import h5py\n'), ((51400, 51429), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0, 0)\n', (51408, 51429), False, 'from datetime import datetime\n'), ((53512, 53537), 'os.path.isfile', 'os.path.isfile', (['data_file'], {}), '(data_file)\n', (53526, 53537), False, 'import os\n'), ((10123, 10171), 'numpy.logical_or', 'np.logical_or', (['(data_pre <= vmin)', '(data_pre > vmax)'], {}), '(data_pre <= vmin, data_pre > vmax)\n', (10136, 10171), True, 'import numpy as np\n'), ((10419, 10444), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (10428, 10444), False, 'import h5py\n'), ((15180, 15208), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (15189, 15208), False, 'import h5py\n'), ((15474, 15517), 'numpy.repeat', 'np.repeat', (['ary_vis_coeff[:, 0]', '(s[0] * s[1])'], {}), '(ary_vis_coeff[:, 0], s[0] * s[1])\n', (15483, 15517), True, 'import numpy as np\n'), ((15579, 15630), 'numpy.repeat', 'np.repeat', (['ary_ir_coeff[:, 0, :]', '(10 * s[1])'], {'axis': '(1)'}), '(ary_ir_coeff[:, 0, :], 10 * s[1], axis=1)\n', (15588, 15630), True, 'import numpy as np\n'), ((18979, 19007), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (18988, 19007), False, 'import h5py\n'), ((19273, 19316), 'numpy.repeat', 'np.repeat', (['ary_vis_coeff[:, 1]', '(s[0] * s[1])'], {}), '(ary_vis_coeff[:, 1], s[0] * s[1])\n', (19282, 19316), True, 'import numpy as np\n'), ((19378, 19429), 'numpy.repeat', 'np.repeat', (['ary_ir_coeff[:, 1, :]', '(10 * s[1])'], {'axis': '(1)'}), '(ary_ir_coeff[:, 1, :], 10 * s[1], axis=1)\n', (19387, 19429), True, 'import numpy as np\n'), ((22778, 22806), 'h5py.File', 'h5py.File', (['self.in_file', '"""r"""'], {}), "(self.in_file, 'r')\n", (22787, 22806), False, 'import h5py\n'), ((23072, 23115), 'numpy.repeat', 'np.repeat', (['ary_vis_coeff[:, 2]', '(s[0] * s[1])'], {}), '(ary_vis_coeff[:, 2], s[0] * s[1])\n', (23081, 23115), True, 'import numpy as np\n'), ((23177, 23228), 'numpy.repeat', 'np.repeat', (['ary_ir_coeff[:, 2, :]', '(10 * s[1])'], {'axis': '(1)'}), '(ary_ir_coeff[:, 2, :], 10 * s[1], axis=1)\n', (23186, 23228), True, 'import numpy as np\n'), ((26614, 26638), 'numpy.where', 'np.where', (['(pre_data < 0.0)'], {}), '(pre_data < 0.0)\n', (26622, 26638), True, 'import numpy as np\n'), ((32902, 32952), 'numpy.full', 'np.full', (['self.data_shape', 'np.nan'], {'dtype': 'np.float32'}), '(self.data_shape, np.nan, dtype=np.float32)\n', (32909, 32952), True, 'import numpy as np\n'), ((34034, 34063), 'numpy.repeat', 'np.repeat', (['data_pre[i, :]', '(10)'], {}), '(data_pre[i, :], 10)\n', (34043, 34063), True, 'import numpy as np\n'), ((35680, 35730), 'numpy.full', 'np.full', (['self.data_shape', 'np.nan'], {'dtype': 'np.float32'}), '(self.data_shape, np.nan, dtype=np.float32)\n', (35687, 35730), True, 'import numpy as np\n'), ((36797, 36826), 'numpy.repeat', 'np.repeat', (['data_pre[i, :]', '(10)'], {}), '(data_pre[i, :], 10)\n', (36806, 36826), True, 'import numpy as np\n'), ((11604, 11652), 'numpy.logical_or', 'np.logical_or', (['(data_pre <= vmin)', '(data_pre > vmax)'], {}), '(data_pre <= vmin, data_pre > vmax)\n', (11617, 11652), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 24 15:59:00 2019
@author: hsteffens
"""
import json, os, time, warnings
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
from copy import deepcopy
from enum import Enum
from scipy import special
from scipy.optimize import curve_fit
# Enum constants
class FILE_ID(Enum):
JITTER = {"type":"Jitter", "version":"2021-04-10"}
_SEP = "."
@staticmethod
# use method to receive file id. e.g: FILE_ID.get_id(FILE_ID.CONFIG)
def get_id(fid):
return fid.value["type"] + FILE_ID._SEP.value + fid.value["version"]
@staticmethod
def get_sep():
return FILE_ID._SEP.value
class JsonEnc(json.JSONEncoder):
"""
Extends the standard JSONEncoder to support additional datatypes.
Keywords strings as dict keys are used to identify instances of the
additional types.
Additional datatype | keyword
---------------------|------------
pandas DataFrame | @DataFrame
pandas Series | @Series
numpy array | @np.array
datetime.datetime | @datetime
datetime.timedelta | @timedelta
Of course, the regular JSON datatypes are supported, too:
int, float, str, bool, None, list, (tuple), dict
Example usage:
# Encode data object to json_str
json_str = json.dumps(data, cls=JsonEnc)
# Decode json_str to a data object
data_copy = json.loads(json_str, cls=JsonDec)
"""
def default(self, obj):
if isinstance(obj, pd.DataFrame):
return {"@DataFrame": {"columns": list(obj.columns),
"index": list(obj.index),
"data": obj.values.tolist()}}
if isinstance(obj, pd.Series):
return {"@Series": {"name": obj.name,
"index": list(obj.index),
"data": obj.values.tolist()}}
if isinstance(obj, np.ndarray):
return {"@np.array": obj.tolist()}
if isinstance(obj, dt.datetime):
return {"@datetime": obj.strftime('%Y-%m-%d %H:%M:%S.%f')}
if isinstance(obj, dt.timedelta):
return {"@timedelta": obj.total_seconds()}
return json.JSONEncoder.default(self, obj)
class JsonDec(json.JSONDecoder):
"""
Extends the standard JSONDecoder to support additional datatypes.
Additional types are recognized by dict key keywords, which are injected
by the JsonEnc.
Additional datatype | keyword
---------------------|------------
pandas DataFrame | @DataFrame
pandas Series | @Series
numpy array | @np.array
datetime.datetime | @datetime
datetime.timedelta | @timedelta
Of course, the regular JSON datatypes are supported, too:
int, float, str, bool, None, list, (tuple), dict
Example usage:
# Encode data object to json_str
json_str = json.dumps(data, cls=JsonEnc)
# Decode json_str to a data object
data_copy = json.loads(json_str, cls=JsonDec)
"""
def __init__(self, *args, **kwargs):
super().__init__(object_hook=JsonDec.custom_hook, *args, **kwargs)
@staticmethod
def custom_hook(dct):
if len(dct) == 1: # add. datatypes are coded in dict of len=1
if "@np.array" in dct:
return np.array(dct["@np.array"])
if "@DataFrame" in dct:
return pd.DataFrame(data=dct["@DataFrame"]["data"],
columns=dct["@DataFrame"]["columns"],
index=dct["@DataFrame"]["index"])
if "@Series" in dct:
return pd.Series(data=dct["@Series"]["data"],
name=dct["@Series"]["name"],
index=dct["@Series"]["index"])
if "@datetime" in dct:
return dt.datetime.strptime(dct["@datetime"],
'%Y-%m-%d %H:%M:%S.%f')
if "@timedelta" in dct:
return dt.timedelta(seconds=dct["@timedelta"])
return dct
class JitterEstimator():
@staticmethod
def jitter_func_inv(ber, sigma, mu):
"""
Jitter model function based on scipy inverse complemetary error function
input <np.array> ber: bit error ratio data from the HTester
input <float> sigma: Width of the gaussian distribution.
input <float> mu: Offset of the gaussian distribution.
return <np.array> horz_offs: sample points within the unit interval
scipy inverse complemetary error function doc:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfcinv.html
"""
return special.erfcinv(4*ber) * np.sqrt(2) * sigma + mu
@staticmethod
def jitter_func(x, sigma, mu):
"""
Jitter model function based on scipy complemetary error function
input <np.array> x: sample points within the unit interval (-0.5 ... 0.5)
input <float> sigma: Width of the gaussian distribution.
input <float> mu: Offset of the gaussian distribution.
return <np.array> BER: bit error ratio
scipy inverse complemetary error function doc:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erfc.html
"""
return special.erfc((x - mu) / (np.sqrt(2)*sigma)) / 4
def __init__(self, d):
"""
JitterEstimator class
:input <dict> d, having at least two items:
'setup': {'chs': [8, 9], 'horStepCnt': 32, 'verStepSize': 0, 'verScale': 2, 'targetBER': 1e-06
'eyescan': {8: DataFrame8, 9: DataFrama9}
"""
self.d = d
def __repr__(self):
s = "JitterEstimator\nsetup:"
for key, value in self.d["setup"].items():
if "BER" in key or "linerate" in key:
s += "\n- {}: {}".format(key, value)
if "jitter" in self.d:
jtable = pd.DataFrame(self.d["jitter"])
if len(jtable):
s += "\njitter fit table:\n" + str(jtable)
else:
s += "\nEmpty jitter fit table"
return s
def fit(self, specifiedBER=None, thresholdBER=None):
"""
Apply Gaussian tail fit to internal eyescan data (self.d["eyescan"])
:input <float> specifiedBER: BER (bit error ratio) to which the TJ (total jitter) is estimated, typically 1e-12
:input <float> thresholdBER: BER values below thresholdBER are used for fitting, typically 1e-3
"""
if specifiedBER is not None:
self.d["setup"]["BERs"] = specifiedBER
if thresholdBER is not None:
self.d["setup"]["BERt"] = thresholdBER
self.d["fit"] = {}
self.d["jitter"] = {}
for ch, ber in self.d["eyescan"].items():
fit = {"success": True}
for side in ("left", "right"):
if side == "left":
mask = ber.index < 0
initial = ( 0.02, -0.3) # inital guess for sigma and mu
bounds = ((0, -0.5), (1, 0)) # bounds (min, max)
else:
mask = ber.index >= 0
initial = (-0.02, 0.3) # sigma is fitted to negative values on the right
bounds = ((-1, 0), (0, 0.5)) # bounds (min, max)
ss_ber = ber[mask] # single sided BER
if sum(ss_ber < self.d["setup"]["BERt"]) >= 2: # at least two samples required for fitting
ss_ber = ss_ber[ss_ber < self.d["setup"]["BERt"]]
else: # force fit by using the two samples with highes BER
ss_ber.sort_values(inplace=True)
ss_ber = ss_ber.iloc[:2]
try:
with warnings.catch_warnings(): # suppress the OptimizeWarning tha curve_fit sometimes raises
warnings.simplefilter("ignore")
(sigma, mu), _ = curve_fit(self.jitter_func_inv, ss_ber.values, ss_ber.index,
p0=initial, bounds=bounds)
except Exception as e:
print(e)
fit["success"] = False
else:
fit[side] = {"sigma": sigma, "mu": mu}
jitter = {}
if fit["success"]:
jitter["RJrms"] = fit["left"]["sigma"] - fit["right"]["sigma"] # '-' because right sigma is negative
jitter["RJpp"] = self.jitter_func_inv(self.d["setup"]["BERs"], sigma=jitter["RJrms"], mu=0)
jitter["DJpp"] = 0.5 + fit["left"]["mu"] + 0.5 - fit["right"]["mu"]
jitter["TJpp"] = jitter["RJpp"] + jitter["DJpp"]
eye_left = self.jitter_func_inv(self.d["setup"]["BERs"], sigma=fit["left"]["sigma"], mu=fit["left"]["mu"])
eye_right= self.jitter_func_inv(self.d["setup"]["BERs"], sigma=fit["right"]["sigma"], mu=fit["right"]["mu"])
jitter["center"] = (eye_left + eye_right) /2
self.d["jitter"][ch] = jitter
self.d["fit"][ch] = fit
def to_json(self, path_=None):
""" Stores the JitterEstimator instance into a json using the custom JsonEnc """
if path_ is None:
fn = time.strftime('%Y%m%d_%H%M%S') + "_jitter.json"
path_ = os.path.join(fn)
dct = deepcopy(FILE_ID.JITTER.value) # file meto data, like type and version
dct["content"] = self.d
with open(path_, "w") as f:
json.dump(dct, f, cls=JsonEnc)
@classmethod
def from_json(class_, path_):
""" Alternative constructor from json file using the custom JsonDev """
with open(path_, "r") as f:
dct = json.load(f, cls=JsonDec)
# check meta
if dct["type"] != FILE_ID.JITTER.value["type"]:
msg = "Can't open file type '{}' when '{}' is expected".format(
dct["type"], FILE_ID.JITTER.value["type"])
raise Exception(msg)
return class_(dct["content"])
def plot_jitter_fit(fns, filesDir, exclude_chs=None, refit=False, figsize=(12, 3.5)):
"""
Creates 'plt' plots from jitter.json Files
:input fns <str> filename or <list> of <str> filenames
:input filesDir <str> directory of the fns
:input exclude_chs <list> of <int>: excludes channels from the plot
:input refit <bool> refits the Gaussian to the eyescan data if True
:input figsize <tuple>
"""
if type(fns) == str: # this allows the fns to be a <str> filename or <list> of filenames
fns = [fns]
if exclude_chs is None:
exclude_chs = []
else:
exclude_chs = [str(ch) for ch in exclude_chs] # make sure list entries are str type
for fn in fns:
jitter = JitterEstimator.from_json(os.path.join(filesDir, fn))
if refit:
jitter.fit()
BERs = jitter.d["setup"]["BERs"]
BERt = jitter.d["setup"]["BERt"]
y_scale = (BERs, 1)
x_scale = (-0.5, 0.5)
for ch, linerate in jitter.d["setup"]["linerates"].items():
if ch not in exclude_chs:
try:
tj = jitter.d["jitter"][ch]["TJpp"]
dj = jitter.d["jitter"][ch]["DJpp"]
rj = jitter.d["jitter"][ch]["RJpp"]
rj_rms = jitter.d["jitter"][ch]["RJrms"]
center = jitter.d["jitter"][ch]["center"]
meas = jitter.d["eyescan"][ch]
fit = jitter.d["fit"][ch]
plt.figure(figsize=figsize)
plt.semilogy(meas.index, meas.values, "bo", label="measurements")
plt.semilogy(x_scale, [BERt]*2, ":m", label="threshold={:.0e}".format(BERt))
plt.semilogy(x_scale, [BERs]*2, ":c", label="BERs={:.0e}".format(BERs))
for key, values in fit.items():
if key in ("left", "right"):
label = {"left": None, "right": "fitted Gaussian"}[key]
x = {"left": np.linspace(-0.5, 0), "right": np.linspace(0, 0.5)}[key]
y = JitterEstimator.jitter_func(x, sigma=values["sigma"], mu=values["mu"])
plt.semilogy(x, y, "b:", label=label)
label = {"left": None, "right": "DJpp={:.3f}UI".format(dj)}[key]
x = {"left": [-0.5, values["mu"]], "right": [values["mu"], 0.5]}[key]
plt.fill_between(x, [0.25]*len(x), [BERt]*len(x), color="m", linewidth=0, label=label)
label = {"left": None, "right": "RJpp={:.3f}UI".format(rj)}[key]
x_start = JitterEstimator.jitter_func_inv(BERs, sigma=values["sigma"], mu=values["mu"])
x = np.linspace(x_start, values["mu"])
y = np.minimum(BERt, JitterEstimator.jitter_func(x, sigma=values["sigma"], mu=values["mu"]))
plt.fill_between(x, y, color="c", linewidth=0, label=label)
plt.fill_between([10], [1], color="white", label="RJrms={:.3f}UI".format(rj_rms))
plt.fill_between([10], [1], color="white", label="TJpp={:.3f}UI".format(tj))
plt.semilogy([center]*2, y_scale, "k", label="center={:.3f}UI".format(center))
plt.ylim(y_scale), plt.xlim(x_scale)
plt.ylabel("BER"), plt.xlabel("x [UI]")
plt.title("C{} {}Mb/s {}".format(ch, linerate, fn))
plt.legend(loc='upper center'), plt.grid();
except Exception as e:
print("Exception during {}, C{} occured:\n{}".format(fn, ch, e))
def plot_jitter_overlay(fns, filesDir, exclude_chs=None, refit=False, figsize=(12, 5)):
"""
Creates 'plt' plots from jitter.json Files
:input fns <str> filename or <list> of <str> filenames
:input filesDir <str> directory of the fns
:input exclude_chs <list> of <int>: excludes channels from the plot
:input refit <bool> refits the Gaussian to the eyescan data if True
:input figsize <tuple>
"""
class ColorNames():
COLORS = ['orangered', 'orange', 'blue', 'skyblue', 'limegreen', 'lime', 'blueviolet', 'magenta', 'navy', 'royalblue', 'red', 'gold', 'green', 'yellowgreen', 'maroon', 'salmon', 'darkgrey', 'silver', 'peru', 'cyan', 'teal']
def __init__(self):
self.i = -1
def same(self):
"""Returns the color name <str> of the current index"""
if self.i == -1:
self.i = 0
return self.COLORS[self.i % len(self.COLORS)]
def next(self):
"""Inclrements the index and returns the color name <str> of the current index"""
self.i += 1
return self.same()
plt.figure(figsize=figsize)
colors = ColorNames()
if type(fns) == str: # this allows the fns to be a <str> filename or <list> of filenames
fns = [fns]
if exclude_chs is None:
exclude_chs = []
else:
exclude_chs = [str(ch) for ch in exclude_chs] # make sure list entries are str type
for fn in fns:
jitter = JitterEstimator.from_json(os.path.join(filesDir, fn))
if refit:
jitter.fit()
BERs = jitter.d["setup"]["BERs"]
BERt = jitter.d["setup"]["BERt"]
y_scale = (BERs, 1)
x_scale = (-0.5, 0.5)
for ch, linerate in jitter.d["setup"]["linerates"].items():
if ch not in exclude_chs:
try:
label = "{} C{} {}Mb/s".format(fn[:15], ch, linerate)
meas = jitter.d["eyescan"][ch]
high = meas[meas > BERt]
plt.semilogy(high.index, high.values, marker=".", color=colors.next(), linewidth=0)
low = meas[meas <= BERt]
plt.semilogy(low.index, low.values, marker="x", markersize=8, color=colors.same(), linewidth=0, label=label)
for key, values in jitter.d["fit"][ch].items():
if key in ("left", "right"):
x = {"left": np.linspace(-0.5, 0), "right": np.linspace(0, 0.5)}[key]
y = JitterEstimator.jitter_func(x, sigma=values["sigma"], mu=values["mu"])
plt.semilogy(x, y, ":", color=colors.same())
except Exception as e:
print("Exception during {}, C{} occured:\n{}".format(fn, ch, e))
plt.ylim(y_scale), plt.xlim(x_scale)
plt.ylabel("BER"), plt.xlabel("x [UI]")
plt.title("C{} {}Mb/s {}".format(ch, linerate, fn))
plt.legend(loc='upper center'), plt.grid();
if __name__ == "__main__":
pass
|
[
"time.strftime",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.fill_between",
"os.path.join",
"pandas.DataFrame",
"warnings.simplefilter",
"datetime.timedelta",
"warnings.catch_warnings",
"numpy.linspace",
"matplotlib.pyplot.semilogy",
"json.JSONEncoder.default",
"json.dump",
"copy.deepcopy",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"scipy.optimize.curve_fit",
"scipy.special.erfcinv",
"datetime.datetime.strptime",
"pandas.Series",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlim",
"json.load",
"numpy.array",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((15408, 15435), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (15418, 15435), True, 'import matplotlib.pyplot as plt\n'), ((2360, 2395), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (2384, 2395), False, 'import json, os, time, warnings\n'), ((9838, 9868), 'copy.deepcopy', 'deepcopy', (['FILE_ID.JITTER.value'], {}), '(FILE_ID.JITTER.value)\n', (9846, 9868), False, 'from copy import deepcopy\n'), ((17171, 17188), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""BER"""'], {}), "('BER')\n", (17181, 17188), True, 'import matplotlib.pyplot as plt\n'), ((17190, 17210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x [UI]"""'], {}), "('x [UI]')\n", (17200, 17210), True, 'import matplotlib.pyplot as plt\n'), ((17271, 17301), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""'}), "(loc='upper center')\n", (17281, 17301), True, 'import matplotlib.pyplot as plt\n'), ((17303, 17313), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (17311, 17313), True, 'import matplotlib.pyplot as plt\n'), ((6262, 6292), 'pandas.DataFrame', 'pd.DataFrame', (["self.d['jitter']"], {}), "(self.d['jitter'])\n", (6274, 6292), True, 'import pandas as pd\n'), ((9806, 9822), 'os.path.join', 'os.path.join', (['fn'], {}), '(fn)\n', (9818, 9822), False, 'import json, os, time, warnings\n'), ((9991, 10021), 'json.dump', 'json.dump', (['dct', 'f'], {'cls': 'JsonEnc'}), '(dct, f, cls=JsonEnc)\n', (10000, 10021), False, 'import json, os, time, warnings\n'), ((10209, 10234), 'json.load', 'json.load', (['f'], {'cls': 'JsonDec'}), '(f, cls=JsonDec)\n', (10218, 10234), False, 'import json, os, time, warnings\n'), ((11303, 11329), 'os.path.join', 'os.path.join', (['filesDir', 'fn'], {}), '(filesDir, fn)\n', (11315, 11329), False, 'import json, os, time, warnings\n'), ((15799, 15825), 'os.path.join', 'os.path.join', (['filesDir', 'fn'], {}), '(filesDir, fn)\n', (15811, 15825), False, 'import json, os, time, warnings\n'), ((17130, 17147), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_scale'], {}), '(y_scale)\n', (17138, 17147), True, 'import matplotlib.pyplot as plt\n'), ((17149, 17166), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_scale'], {}), '(x_scale)\n', (17157, 17166), True, 'import matplotlib.pyplot as plt\n'), ((3535, 3561), 'numpy.array', 'np.array', (["dct['@np.array']"], {}), "(dct['@np.array'])\n", (3543, 3561), True, 'import numpy as np\n'), ((3634, 3755), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "dct['@DataFrame']['data']", 'columns': "dct['@DataFrame']['columns']", 'index': "dct['@DataFrame']['index']"}), "(data=dct['@DataFrame']['data'], columns=dct['@DataFrame'][\n 'columns'], index=dct['@DataFrame']['index'])\n", (3646, 3755), True, 'import pandas as pd\n'), ((3892, 3995), 'pandas.Series', 'pd.Series', ([], {'data': "dct['@Series']['data']", 'name': "dct['@Series']['name']", 'index': "dct['@Series']['index']"}), "(data=dct['@Series']['data'], name=dct['@Series']['name'], index=\n dct['@Series']['index'])\n", (3901, 3995), True, 'import pandas as pd\n'), ((4128, 4190), 'datetime.datetime.strptime', 'dt.datetime.strptime', (["dct['@datetime']", '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(dct['@datetime'], '%Y-%m-%d %H:%M:%S.%f')\n", (4148, 4190), True, 'import datetime as dt\n'), ((4307, 4346), 'datetime.timedelta', 'dt.timedelta', ([], {'seconds': "dct['@timedelta']"}), "(seconds=dct['@timedelta'])\n", (4319, 4346), True, 'import datetime as dt\n'), ((9738, 9768), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (9751, 9768), False, 'import json, os, time, warnings\n'), ((5003, 5027), 'scipy.special.erfcinv', 'special.erfcinv', (['(4 * ber)'], {}), '(4 * ber)\n', (5018, 5027), False, 'from scipy import special\n'), ((5028, 5038), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5035, 5038), True, 'import numpy as np\n'), ((12051, 12078), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (12061, 12078), True, 'import matplotlib.pyplot as plt\n'), ((12099, 12164), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['meas.index', 'meas.values', '"""bo"""'], {'label': '"""measurements"""'}), "(meas.index, meas.values, 'bo', label='measurements')\n", (12111, 12164), True, 'import matplotlib.pyplot as plt\n'), ((5644, 5654), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5651, 5654), True, 'import numpy as np\n'), ((8186, 8211), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8209, 8211), False, 'import json, os, time, warnings\n'), ((8299, 8330), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (8320, 8330), False, 'import json, os, time, warnings\n'), ((8372, 8463), 'scipy.optimize.curve_fit', 'curve_fit', (['self.jitter_func_inv', 'ss_ber.values', 'ss_ber.index'], {'p0': 'initial', 'bounds': 'bounds'}), '(self.jitter_func_inv, ss_ber.values, ss_ber.index, p0=initial,\n bounds=bounds)\n', (8381, 8463), False, 'from scipy.optimize import curve_fit\n'), ((13925, 13942), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_scale'], {}), '(y_scale)\n', (13933, 13942), True, 'import matplotlib.pyplot as plt\n'), ((13944, 13961), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_scale'], {}), '(x_scale)\n', (13952, 13961), True, 'import matplotlib.pyplot as plt\n'), ((13982, 13999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""BER"""'], {}), "('BER')\n", (13992, 13999), True, 'import matplotlib.pyplot as plt\n'), ((14001, 14021), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x [UI]"""'], {}), "('x [UI]')\n", (14011, 14021), True, 'import matplotlib.pyplot as plt\n'), ((14114, 14144), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""'}), "(loc='upper center')\n", (14124, 14144), True, 'import matplotlib.pyplot as plt\n'), ((14146, 14156), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (14154, 14156), True, 'import matplotlib.pyplot as plt\n'), ((12774, 12811), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x', 'y', '"""b:"""'], {'label': 'label'}), "(x, y, 'b:', label=label)\n", (12786, 12811), True, 'import matplotlib.pyplot as plt\n'), ((13361, 13395), 'numpy.linspace', 'np.linspace', (['x_start', "values['mu']"], {}), "(x_start, values['mu'])\n", (13372, 13395), True, 'import numpy as np\n'), ((13545, 13604), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'y'], {'color': '"""c"""', 'linewidth': '(0)', 'label': 'label'}), "(x, y, color='c', linewidth=0, label=label)\n", (13561, 13604), True, 'import matplotlib.pyplot as plt\n'), ((12585, 12605), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0)'], {}), '(-0.5, 0)\n', (12596, 12605), True, 'import numpy as np\n'), ((12616, 12635), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (12627, 12635), True, 'import numpy as np\n'), ((16762, 16782), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0)'], {}), '(-0.5, 0)\n', (16773, 16782), True, 'import numpy as np\n'), ((16793, 16812), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (16804, 16812), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# fnirt.py - Functions for working with FNIRT non-linear transformations.
#
# Author: <NAME> <<EMAIL>>
#
"""This module contains functions for working with FNIRT non-linear
transformations. The following functions are available:
.. autosummary::
:nosignatures:
readFnirt
toFnirt
fromFnirt
Non-linear registration using FNIRT
-----------------------------------
FNIRT is used to calculate a non-linear registration from a source image to a
reference image. FNIRT outputs the resulting non-linear transformation as
either:
- A deformation/warp field which contains displacements or coordinates.
- A coefficient field which can be used to generate a warp field.
Non-linear registration using FNIRT generally follows the process depicted
here:
.. image:: images/nonlinear_registration_process.png
:width: 80%
:align: center
First, an initial linear registration is performed from the source image to
the reference image using FLIRT; this provides an initial global alignment
which can be used as the starting point for the non-linear registration. Next,
FNIRT is used to non-linearly register the aligned source image to the
reference image. Importantly, both of these steps are performed using FSL
coordinates.
Note that we have three spaces, and three sets of coordinate systems, to
consider:
1. Source image space - the source image, before initial linear registration
to the reference image
2. Aligned-source image space - the source image, after it has been linearly
transformed to the reference image space
3. Reference image space
The initial affine registration calculates a transformation between spaces 1
and 2, and the non-linear registration calculates a transformation between
spaces 2 and 3. Note that the fields-of-view for spaces 2 and 3 are
equivalent.
The non-linear transformation file generated by FNIRT will contain the initial
linear registration, with it either being encoded directly into the warps (for
a warp field), or being stored in the NIfTI header (for a coefficient field).
FNIRT warp fields
^^^^^^^^^^^^^^^^^
A FNIRT deformation field (a.k.a. warp field) is defined in the same space as
the reference image, and may contain:
- *relative displacements*, where each voxel in the warp field contains an
offset which can be added to the reference image coordinates for that
voxel, in order to calculate the corresponding source image coordinates.
- *absolute coordinates*, where each voxel in the warp field simply contains
the source image coordinates which correspond to those reference image
coordinates.
.. note:: FNIRT deformation field files give no indication as to whether they
contain relative displacements or absolute coordinates, so heuristics
must be used to infer what is stored in a a particular file. The
:func:`.nonlinear.detectDeformationType` function can be used to
determine whether a file contains relative displacements or absolute
coordinates.
If an initial linear registration was used as the starting point for FNIRT,
this is encoded into the displacements/coordinates themselves, so they can be
used to transform from the reference image to the *original* source image
space.
.. image:: images/fnirt_warp_field.png
:width: 80%
:align: center
FNIRT coefficient fields
^^^^^^^^^^^^^^^^^^^^^^^^
A FNIRT coefficient field contains the coefficients of a set of quadratic or
cubic B-spline functions defined on a regular 3D grid overlaid on the
reference image voxel coordinate system. Each coefficient in this grid may be
referred to as a *control point* or a *knot*.
Evaluating the spline functions at a particular location in the grid will
result in a relative displacement which can be added to that location's
reference image coordinates, in order to determine the corresponding source
image coordinates.
If an initial linear registration was used as the starting point for FNIRT,
the generated displacement field will encode a transformation to *aligned*
source image coordinates, and the initial affine will be stored in the NIfTI
header of the coefficient field file.
.. image:: images/fnirt_coefficient_field.png
:width: 80%
:align: center
"""
import logging
import nibabel as nib
import numpy as np
import fsl.data.constants as constants
import fsl.data.image as fslimage
from . import affine
from . import nonlinear
log = logging.getLogger(__name__)
def _readFnirtDeformationField(fname, img, src, ref, defType=None):
"""Loads ``fname``, assumed to be a FNIRT deformation field.
:arg fname: File name of FNIRT deformation field
:arg img: ``fname`` loaded as an :class:`.Image`
:arg src: Source image
:arg ref: Reference image
:arg defType: Deformation type - either ``'absolute'`` or ``'relative'``.
If not provided, is automatically inferred from the data.
:return: A :class:`.DeformationField` object
"""
return nonlinear.DeformationField(fname,
src,
ref,
srcSpace='fsl',
refSpace='fsl',
defType=defType)
def _readFnirtCoefficientField(fname, img, src, ref):
"""Loads ``fname``, assumed to be a FNIRT coefficient field.
:arg fname: File name of FNIRT coefficient field
:arg img: ``fname`` loaded as an :class:`.Image`
:arg src: Source image
:arg ref: Reference image
:return: A :class:`.CoefficientField`
"""
# FNIRT uses NIFTI header fields in
# non-standard ways to store some
# additional information about the
# coefficient field. See
# $FSLDIR/src/fnirt/fnirt_file_writer.cpp
# for more details.
# The field type (quadratic, cubic,
# or discrete-cosine-transform) is
# inferred from the intent. There is
# no support in this implementation
# for DCT fields
cubics = (constants.FSL_CUBIC_SPLINE_COEFFICIENTS,
constants.FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS)
quads = (constants.FSL_QUADRATIC_SPLINE_COEFFICIENTS,
constants.FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS)
if img.intent in cubics:
fieldType = 'cubic'
elif img.intent in quads:
fieldType = 'quadratic'
else:
fieldType = 'cubic'
log.warning('Unrecognised/unsupported coefficient '
'field type (assuming cubic b-spline): '
'{}'.format(img.intent))
# Knot spacing (in voxels) is
# stored in the pixdims
knotSpacing = img.pixdim[:3]
# The sform contains an initial
# global src-to-ref affine
# (the starting point for the
# non-linear registration). This
# is encoded as a flirt matrix,
# i.e. it transforms from
# source-scaled-voxels to
# ref-scaled-voxels
srcToRefMat = img.header.get_sform()
# The fieldToRefMat affine tells
# the CoefficientField class how
# to transform coefficient field
# voxel coordinates into
# displacement field/reference
# image voxel coordinates.
fieldToRefMat = affine.scaleOffsetXform(knotSpacing, 0)
# But if the provided reference has
# different resolution to the
# reference that was originally
# used to generate the warp field,
# we need to adjust the field
# accordingly. We assume that the
# references are aligned in the FSL
# coordinate system, so simply apply
# a scaling factor calculated by
# dividing the original reference
# pixdims by the provided reference
# pixdims.
refPixdims = np.array([img.header['intent_p1'],
img.header['intent_p2'],
img.header['intent_p3']])
if not np.all(np.isclose(ref.pixdim[:3], refPixdims)):
fieldToRefMat = affine.concat(
affine.scaleOffsetXform(refPixdims / ref.pixdim[:3], 0),
fieldToRefMat)
return nonlinear.CoefficientField(fname,
src,
ref,
srcSpace='fsl',
refSpace='fsl',
fieldType=fieldType,
knotSpacing=knotSpacing,
srcToRefMat=srcToRefMat,
fieldToRefMat=fieldToRefMat)
def readFnirt(fname, src, ref, defType=None, intent=None):
"""Reads a non-linear FNIRT transformation image, returning
a :class:`.DeformationField` or :class:`.CoefficientField` depending
on the file type.
:arg fname: File name of FNIRT transformation
:arg src: Source image
:arg ref: Reference image
:arg defType: Deformation type - either ``'absolute'`` or ``'relative'``.
Only used if the file is a deformation field. If not
provided, is automatically inferred from the data.
:arg intent: NIFTI intent code of ``fname``. e.g.
:attr:`.constants.FSL_FNIRT_DISPLACEMENT_FIELD`. If not
provided, the intent is read from the image header.
"""
if defType not in (None, 'absolute', 'relative'):
raise ValueError('defType must be None, "absolute" or "relative" '
'(passed in as {})'.format(defType))
# Figure out whether the file is a
# deformation field or a coefficient
# field by checking the intent code.
# If the intent is provided, assume
# that the caller knows the type of
# the field.
img = fslimage.Image(fname, loadData=False)
intent = intent or img.intent
disps = (constants.FSL_FNIRT_DISPLACEMENT_FIELD,
constants.FSL_TOPUP_FIELD)
coefs = (constants.FSL_CUBIC_SPLINE_COEFFICIENTS,
constants.FSL_DCT_COEFFICIENTS,
constants.FSL_QUADRATIC_SPLINE_COEFFICIENTS,
constants.FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS,
constants.FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS)
if intent in disps:
return _readFnirtDeformationField(fname, img, src, ref, defType)
elif intent in coefs:
return _readFnirtCoefficientField(fname, img, src, ref)
else:
raise ValueError('Cannot determine type of nonlinear warp field '
'{} (intent code: {})'.format(fname, intent))
def toFnirt(field):
"""Convert a :class:`.NonLinearTransform` to a FNIRT-compatible
:class:`.DeformationField` or :class:`.CoefficientField`.
:arg field: :class:`.NonLinearTransform` to convert
:return: A FNIRT-compatible :class:`.DeformationField` or
:class:`.CoefficientField`.
"""
# If we have a coefficient field
# which transforms between fsl
# space, we can just create a copy.
if isinstance(field, nonlinear.CoefficientField) and \
(field.srcSpace == 'fsl' and field.refSpace == 'fsl'):
# We start with a nibabel image,
# as we need to mess with the header
# fields to store all of the FNIRT
# coefficient field information
fieldBase = nib.nifti1.Nifti1Image(field.data, None)
# Set the intent
if field.fieldType == 'cubic':
intent = constants.FSL_CUBIC_SPLINE_COEFFICIENTS
elif field.fieldType == 'quadratic':
intent = constants.FSL_QUADRATIC_SPLINE_COEFFICIENTS
fieldBase.header['intent_code'] = intent
# Reference image pixdims are
# stored in the intent code
# parameters.
fieldBase.header['intent_p1'] = field.ref.pixdim[0]
fieldBase.header['intent_p2'] = field.ref.pixdim[1]
fieldBase.header['intent_p3'] = field.ref.pixdim[2]
# Pixdims are used to
# store the knot spacing,
pixdims = list(field.knotSpacing) + [1]
qform = np.diag(pixdims)
# The sform is used to store the
# initial src-to-ref affine
if field.srcToRefMat is not None: sform = field.srcToRefMat
else: sform = np.eye(4)
# The qform offsets are
# used to store the
# reference image shape
qform[:3, 3] = field.ref.shape[:3]
fieldBase.header.set_zooms(pixdims)
fieldBase.set_sform(sform, 1)
fieldBase.set_qform(qform, 1)
fieldBase.update_header()
field = nonlinear.CoefficientField(
fieldBase,
src=field.src,
ref=field.ref,
srcSpace='fsl',
refSpace='fsl',
fieldType=field.fieldType,
knotSpacing=field.knotSpacing,
fieldToRefMat=field.fieldToRefMat,
srcToRefMat=field.srcToRefMat)
# Otherwise we have a non-FSL coefficient
# field, or a deformation field.
else:
# We can't convert a CoefficientField
# which doesn't transform in FSL
# coordinates, because the coefficients
# will have been calculated between some
# other source/reference coordinate
# systems, and we can't adjust the
# coefficients to encode an FSL->FSL
# deformation.
if isinstance(field, nonlinear.CoefficientField):
field = nonlinear.coefficientFieldToDeformationField(field)
# Again, if we have a displacement
# field which transforms between
# fsl spaces, we can just take a copy
if field.srcSpace == 'fsl' and field.refSpace == 'fsl':
field = nonlinear.DeformationField(
field.data,
header=field.header,
src=field.src,
ref=field.ref,
defType=field.deformationType)
# Otherwise we have to adjust the
# displacements so they transform
# between fsl coordinates.
field = nonlinear.convertDeformationSpace(
field, from_='fsl', to='fsl')
field.header['intent_code'] = constants.FSL_FNIRT_DISPLACEMENT_FIELD
return field
def fromFnirt(field, from_='world', to='world'):
"""Convert a FNIRT-style :class:`.NonLinearTransform` to a generic
:class:`.DeformationField`.
:arg field: A FNIRT-style :class:`.CoefficientField` or
:class:`.DeformationField`
:arg from_: Desired reference image coordinate system
:arg to: Desired source image coordinate system
:return: A :class:`.DeformationField` which contains displacements
from the reference image ``from_`` cordinate system to the
source image ``to`` coordinate syste.
"""
if isinstance(field, nonlinear.CoefficientField):
field = nonlinear.coefficientFieldToDeformationField(field)
return nonlinear.convertDeformationSpace(field, from_=from_, to=to)
|
[
"nibabel.nifti1.Nifti1Image",
"numpy.eye",
"fsl.data.image.Image",
"numpy.isclose",
"numpy.array",
"numpy.diag",
"logging.getLogger"
] |
[((4495, 4522), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4512, 4522), False, 'import logging\n'), ((7771, 7861), 'numpy.array', 'np.array', (["[img.header['intent_p1'], img.header['intent_p2'], img.header['intent_p3']]"], {}), "([img.header['intent_p1'], img.header['intent_p2'], img.header[\n 'intent_p3']])\n", (7779, 7861), True, 'import numpy as np\n'), ((9794, 9831), 'fsl.data.image.Image', 'fslimage.Image', (['fname'], {'loadData': '(False)'}), '(fname, loadData=False)\n', (9808, 9831), True, 'import fsl.data.image as fslimage\n'), ((11340, 11380), 'nibabel.nifti1.Nifti1Image', 'nib.nifti1.Nifti1Image', (['field.data', 'None'], {}), '(field.data, None)\n', (11362, 11380), True, 'import nibabel as nib\n'), ((12084, 12100), 'numpy.diag', 'np.diag', (['pixdims'], {}), '(pixdims)\n', (12091, 12100), True, 'import numpy as np\n'), ((7930, 7968), 'numpy.isclose', 'np.isclose', (['ref.pixdim[:3]', 'refPixdims'], {}), '(ref.pixdim[:3], refPixdims)\n', (7940, 7968), True, 'import numpy as np\n'), ((12297, 12306), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12303, 12306), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import clusterking as ck
import clusterking_physics.models.bdlnu.distribution as bdlnu
import numpy as np
from numpy import sqrt
from wilson.run.smeft.smpar import p
from wilson import Wilson
v = sqrt(1 / (sqrt(2) * p["GF"]))
Yb = 4.2 / v
Ytau = 1.776 / v
def dGEl(bm, el):
w = Wilson(
wcdict={"CSR_bctaunutau": -sqrt(2) * Yb * Ytau / 4 / p["GF"] * bm**2},
scale=5,
eft="WET",
basis="flavio",
)
return bdlnu.dGEl(w, el)
s = ck.scan.Scanner()
s.set_dfunction(
dGEl, binning=np.linspace(-bdlnu.Elmin, bdlnu.Elmaxval, 11), normalize=True
)
s.set_spoints_equidist(
{
"p": (0, 1, 100),
}
)
s.set_no_workers(20)
d = ck.Data()
r = s.run(d)
r.write()
d.write("output/el.sql", overwrite="overwrite")
|
[
"clusterking_physics.models.bdlnu.distribution.dGEl",
"clusterking.scan.Scanner",
"clusterking.Data",
"numpy.linspace",
"numpy.sqrt"
] |
[((497, 514), 'clusterking.scan.Scanner', 'ck.scan.Scanner', ([], {}), '()\n', (512, 514), True, 'import clusterking as ck\n'), ((703, 712), 'clusterking.Data', 'ck.Data', ([], {}), '()\n', (710, 712), True, 'import clusterking as ck\n'), ((473, 490), 'clusterking_physics.models.bdlnu.distribution.dGEl', 'bdlnu.dGEl', (['w', 'el'], {}), '(w, el)\n', (483, 490), True, 'import clusterking_physics.models.bdlnu.distribution as bdlnu\n'), ((550, 595), 'numpy.linspace', 'np.linspace', (['(-bdlnu.Elmin)', 'bdlnu.Elmaxval', '(11)'], {}), '(-bdlnu.Elmin, bdlnu.Elmaxval, 11)\n', (561, 595), True, 'import numpy as np\n'), ((231, 238), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (235, 238), False, 'from numpy import sqrt\n'), ((352, 359), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (356, 359), False, 'from numpy import sqrt\n')]
|
import click
import os
import numpy as np
import tensorflow as tf
from math import ceil
from BNN_functions import (normalizeData, build_input_pipeline, createNeuralNet,
percentError, setupOptimization)
@click.command()
@click.option('--hidden', default=3, help='Number of hidden layers')
@click.option('--width', default=50, help='Width of the hidden layers')
@click.option('--epochs', default=60, help='Number of epochs to train for')
@click.option('--tb', default=None, help='Folder for Tensorboard')
@click.option('--name', default=None, help='Name of network')
def main(hidden, width, epochs, tb, name):
"""This script creates a Bayesian Neural Network using Dense Flipout Layers from
TensorFlow-Probability. Currently the network is trained using however many
hidden layers the user specifies with the depth specified and trained for the
number of epochs specified. The hidden layers use a PRELU activation.
The optimizer used is the Adam Optimizer with a learning rate of 0.001 and an
epsilon of 1E-08.
This script will connect to Tensorboard and create plots of validation error
and validation percent difference vs. epoch if a folder for it is input by
the user in tb.
Additionally, if the user gives the network a name in --name this program
will save the network using that name.
"""
#Load training and validation data
trainIn=np.loadtxt("fullTrainInput.txt",delimiter="\t",skiprows=1)
trainOut=np.loadtxt("fullTrainOutput.txt",delimiter="\t",skiprows=1)
valIn=np.loadtxt("fullValidateInput.txt",delimiter="\t",skiprows=0)
valOut=np.loadtxt("fullValidateOutput.txt",delimiter="\t",skiprows=0)
#Normalize the training and output data and collect the values used to do so
normInfo, data = normalizeData(trainIn, trainOut, valIn, valOut)
graph1=tf.Graph()
with graph1.as_default():
#Create the iterators used for training and validation
#Path for tensorboard to save data
if(tb is not None):
STORE_PATH = os.path.join(os.getcwd(),tb)
#hyper paramaters
batch_size=128
learning_rate=0.001
#dropout paramaters
dropoutPercent=0.0
rate=tf.placeholder(dtype=tf.float32, shape=(), name="rate")
#size of data
train_size=len(trainIn[:,1])
val_size=len(valIn[:,1])
data_size=train_size+val_size
#setup data pipelines
(x_input, y_output, handle, training_iterator, validation_iterator) = build_input_pipeline(
data, batch_size)
#Create the neural network
neural_net, logits = createNeuralNet(width, hidden, x_input, rate)
#Print a network summary
neural_net.summary()
#Create the percent difference metric
percentErr = percentError(normInfo[0][0], normInfo[0][1], y_output, logits)
#Create the loss function and optimizer
loss, train_op = setupOptimization(normInfo[0][0], normInfo[0][1], learning_rate, y_output, logits)
init_op= tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
#merge outputs for tensorboard
if(tb is not None):
merged = tf.summary.merge_all()
with tf.Session(graph=graph1) as sess:
if(tb is not None):
writer = tf.summary.FileWriter(STORE_PATH, sess.graph) #Tensorboard writer
sess.run(init_op)
train_handle = sess.run(training_iterator.string_handle())
validate_handle = sess.run(validation_iterator.string_handle())
steps=ceil(train_size/batch_size) #Number of batches to get through all the data
for j in range(epochs):
averageLoss=0
averageError=0
#Run the training cycle
for i in range(steps):
loss_value, error_value, _ = sess.run([loss, percentErr, train_op],
feed_dict={handle: train_handle, rate: dropoutPercent})
averageLoss+=loss_value
averageError+=error_value
print("Epoch: {:>3d} Training loss: {:.5f} Training Error: {:.3f}".format(
j+1, averageLoss/steps, averageError/steps))
#Run the validation cycle
valid_iters=1 #Numer of runs through the validation data. Note:
#adjusting this value will scale the output to
#Tensorboard by the same amount
averageLoss=0
averageError=0
if(tb is not None): #when writing to tensorboard
for i in range(valid_iters):
loss_value, error_value, summary = sess.run([loss, percentErr, merged],
feed_dict={handle: validate_handle, rate: 0.0})
averageLoss+=loss_value
averageError+=error_value
writer.add_summary(summary, j+1)
else: #when not writing to tensorboard
for i in range(valid_iters):
loss_value, error_value = sess.run([loss, percentErr],
feed_dict={handle: validate_handle, rate: 0.0})
averageLoss+=loss_value
averageError+=error_value
print("Validation loss: {:.5f} Validation Percent Error: {:.3f} Iterations: {}".format(
averageLoss/valid_iters, averageError/valid_iters, valid_iters))
#save the network
if(name is not None):
saver = tf.train.Saver()
print('\nSaving...')
saver.save(sess, "./"+name)
if(__name__=="__main__"):
main()
|
[
"BNN_functions.build_input_pipeline",
"tensorflow.train.Saver",
"BNN_functions.setupOptimization",
"tensorflow.global_variables_initializer",
"math.ceil",
"os.getcwd",
"click.option",
"tensorflow.Session",
"BNN_functions.createNeuralNet",
"click.command",
"tensorflow.local_variables_initializer",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"BNN_functions.percentError",
"numpy.loadtxt",
"tensorflow.Graph",
"BNN_functions.normalizeData",
"tensorflow.summary.merge_all"
] |
[((244, 259), 'click.command', 'click.command', ([], {}), '()\n', (257, 259), False, 'import click\n'), ((262, 329), 'click.option', 'click.option', (['"""--hidden"""'], {'default': '(3)', 'help': '"""Number of hidden layers"""'}), "('--hidden', default=3, help='Number of hidden layers')\n", (274, 329), False, 'import click\n'), ((332, 402), 'click.option', 'click.option', (['"""--width"""'], {'default': '(50)', 'help': '"""Width of the hidden layers"""'}), "('--width', default=50, help='Width of the hidden layers')\n", (344, 402), False, 'import click\n'), ((405, 479), 'click.option', 'click.option', (['"""--epochs"""'], {'default': '(60)', 'help': '"""Number of epochs to train for"""'}), "('--epochs', default=60, help='Number of epochs to train for')\n", (417, 479), False, 'import click\n'), ((482, 547), 'click.option', 'click.option', (['"""--tb"""'], {'default': 'None', 'help': '"""Folder for Tensorboard"""'}), "('--tb', default=None, help='Folder for Tensorboard')\n", (494, 547), False, 'import click\n'), ((550, 610), 'click.option', 'click.option', (['"""--name"""'], {'default': 'None', 'help': '"""Name of network"""'}), "('--name', default=None, help='Name of network')\n", (562, 610), False, 'import click\n'), ((1484, 1544), 'numpy.loadtxt', 'np.loadtxt', (['"""fullTrainInput.txt"""'], {'delimiter': '"""\t"""', 'skiprows': '(1)'}), "('fullTrainInput.txt', delimiter='\\t', skiprows=1)\n", (1494, 1544), True, 'import numpy as np\n'), ((1557, 1618), 'numpy.loadtxt', 'np.loadtxt', (['"""fullTrainOutput.txt"""'], {'delimiter': '"""\t"""', 'skiprows': '(1)'}), "('fullTrainOutput.txt', delimiter='\\t', skiprows=1)\n", (1567, 1618), True, 'import numpy as np\n'), ((1628, 1691), 'numpy.loadtxt', 'np.loadtxt', (['"""fullValidateInput.txt"""'], {'delimiter': '"""\t"""', 'skiprows': '(0)'}), "('fullValidateInput.txt', delimiter='\\t', skiprows=0)\n", (1638, 1691), True, 'import numpy as np\n'), ((1702, 1766), 'numpy.loadtxt', 'np.loadtxt', (['"""fullValidateOutput.txt"""'], {'delimiter': '"""\t"""', 'skiprows': '(0)'}), "('fullValidateOutput.txt', delimiter='\\t', skiprows=0)\n", (1712, 1766), True, 'import numpy as np\n'), ((1873, 1920), 'BNN_functions.normalizeData', 'normalizeData', (['trainIn', 'trainOut', 'valIn', 'valOut'], {}), '(trainIn, trainOut, valIn, valOut)\n', (1886, 1920), False, 'from BNN_functions import normalizeData, build_input_pipeline, createNeuralNet, percentError, setupOptimization\n'), ((1940, 1950), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1948, 1950), True, 'import tensorflow as tf\n'), ((2356, 2411), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '()', 'name': '"""rate"""'}), "(dtype=tf.float32, shape=(), name='rate')\n", (2370, 2411), True, 'import tensorflow as tf\n'), ((2676, 2714), 'BNN_functions.build_input_pipeline', 'build_input_pipeline', (['data', 'batch_size'], {}), '(data, batch_size)\n', (2696, 2714), False, 'from BNN_functions import normalizeData, build_input_pipeline, createNeuralNet, percentError, setupOptimization\n'), ((2796, 2841), 'BNN_functions.createNeuralNet', 'createNeuralNet', (['width', 'hidden', 'x_input', 'rate'], {}), '(width, hidden, x_input, rate)\n', (2811, 2841), False, 'from BNN_functions import normalizeData, build_input_pipeline, createNeuralNet, percentError, setupOptimization\n'), ((2979, 3041), 'BNN_functions.percentError', 'percentError', (['normInfo[0][0]', 'normInfo[0][1]', 'y_output', 'logits'], {}), '(normInfo[0][0], normInfo[0][1], y_output, logits)\n', (2991, 3041), False, 'from BNN_functions import normalizeData, build_input_pipeline, createNeuralNet, percentError, setupOptimization\n'), ((3119, 3205), 'BNN_functions.setupOptimization', 'setupOptimization', (['normInfo[0][0]', 'normInfo[0][1]', 'learning_rate', 'y_output', 'logits'], {}), '(normInfo[0][0], normInfo[0][1], learning_rate, y_output,\n logits)\n', (3136, 3205), False, 'from BNN_functions import normalizeData, build_input_pipeline, createNeuralNet, percentError, setupOptimization\n'), ((3233, 3266), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3264, 3266), True, 'import tensorflow as tf\n'), ((3294, 3326), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3324, 3326), True, 'import tensorflow as tf\n'), ((3421, 3443), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3441, 3443), True, 'import tensorflow as tf\n'), ((3460, 3484), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph1'}), '(graph=graph1)\n', (3470, 3484), True, 'import tensorflow as tf\n'), ((3848, 3877), 'math.ceil', 'ceil', (['(train_size / batch_size)'], {}), '(train_size / batch_size)\n', (3852, 3877), False, 'from math import ceil\n'), ((2168, 2179), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2177, 2179), False, 'import os\n'), ((3553, 3598), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['STORE_PATH', 'sess.graph'], {}), '(STORE_PATH, sess.graph)\n', (3574, 3598), True, 'import tensorflow as tf\n'), ((6048, 6064), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6062, 6064), True, 'import tensorflow as tf\n')]
|
import numpy as np
from mahjong.shanten import Shanten
from itertools import combinations
from utils import *
np.random.seed(0)
def is_tinroto(hand):
return all([x in [0, 8, 9, 17, 18, 26] for x in key])
shanten = Shanten()
cases = []
for key in combinations(range(34), 5):
hand = [0] * 34
for tile in key[:4]:
hand[tile] = 3
hand[key[4]] = 2
flatten = flatten_tile34(hand)
win_tile = np.random.choice(flatten)
is_established = is_tinroto(hand)
cases.append((flatten, win_tile, is_established))
with open(TESTCASE_DIR / "test_score_tinroto.txt", "w") as f:
for hand, win_tile, is_established in cases:
hand_str = " ".join(str(x) for x in hand)
f.write(f"{hand_str} {win_tile} {int(is_established)}\n")
|
[
"mahjong.shanten.Shanten",
"numpy.random.seed",
"numpy.random.choice"
] |
[((112, 129), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (126, 129), True, 'import numpy as np\n'), ((224, 233), 'mahjong.shanten.Shanten', 'Shanten', ([], {}), '()\n', (231, 233), False, 'from mahjong.shanten import Shanten\n'), ((425, 450), 'numpy.random.choice', 'np.random.choice', (['flatten'], {}), '(flatten)\n', (441, 450), True, 'import numpy as np\n')]
|
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from sanitize_data import read_from_tar, TORCH_FILENAME
from weather_format import WeatherDataset, WeatherRow
from model import WeatherLSTM
from config import WINDOW_SIZE, DEVICE, DTYPE, TRAIN_END, VALIDATE_END, BATCH_SIZE, HIDDEN_DIM, TOTAL_POINTS, REPRODUCIBLE
if __name__ == '__main__':
if not os.path.isfile(f'{TORCH_FILENAME}.tar.xz'):
print('Run preprocessing script first')
exit()
'''
t = np.arange(0, len(weather))
linear_component = np.polyfit(t, thermometer, 1)[0]
thermometer_without_linear = thermometer - linear_component * t
amplitudes = np.fft.fft(thermometer_without_linear)
freqs = np.fft.fftfreq(len(amplitudes))
indices = np.argsort(-amplitudes)
t = np.arange(0, len(weather) + 100000)
restored = np.zeros(len(t))
for i in indices[: 21]:
amplitude = np.absolute(amplitudes[i]) / len(t)
phase = np.angle(amplitudes[i])
restored += amplitude * np.cos(2 * np.pi * freqs[i] * t + phase)
restored += linear_component * t
plt.plot(t, restored)
plt.plot(np.arange(0, len(thermometer)), thermometer)
plt.show()
'''
torch_tar, torch_binary = read_from_tar(TORCH_FILENAME)
data = torch.load(torch_binary)
torch_tar.close()
# Needed to obtain reproducible results for debugging
if REPRODUCIBLE:
np.random.seed(2)
torch.manual_seed(2)
time = data[:TRAIN_END,1]
TARGET_FEATURES = [3] + list(range(5, 10)) + list(range(11, 13)) + list(range(14, 15)) + list(range(16,18))
training_data = WeatherDataset(torch.from_numpy(data[:TRAIN_END, TARGET_FEATURES]).to(DEVICE, dtype=DTYPE))
validation_data = WeatherDataset(torch.from_numpy(data[TRAIN_END:VALIDATE_END,TARGET_FEATURES]).to(DEVICE, dtype=DTYPE), training_data.scaler)
train_loader = torch.utils.data.DataLoader(training_data, batch_size=BATCH_SIZE, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=(VALIDATE_END-TRAIN_END) // 8, shuffle=False)
model = WeatherLSTM(input_dim=len(TARGET_FEATURES), hidden_dim=HIDDEN_DIM, output_dim=len(TARGET_FEATURES))
model.to(DEVICE, dtype=DTYPE)
loss_func = nn.MSELoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)#torch.optim.LBFGS(model.parameters(), lr=0.7)
previous_validation_loss = float('inf')
for epoch in range(100):
for step, batch in enumerate(train_loader):
def step_closure():
optimizer.zero_grad()
# model.initialize()
out = model(batch[:,:-1,:])
loss = loss_func(out, batch[:,-1,:])
print(f'Epoch {epoch+1}, Step {step+1} Loss: {loss.item()}')
loss.backward()
return loss
optimizer.step(step_closure)
with torch.no_grad():
print('Evaluating model against validation set')
model.eval()
current_validation_loss = 0.0
for batch in validation_loader:
out = model(batch[:,:-1,:])
current_validation_loss += loss_func(out, batch[:,-1,:]).item() * len(batch)
current_validation_loss = current_validation_loss / (VALIDATE_END - TRAIN_END)
model.train()
should_stop_early = current_validation_loss > previous_validation_loss
if should_stop_early:
print(f'Stopping early, current validation loss {current_validation_loss} compared to previous validation loss {previous_validation_loss}')
else:
print(f'Current validation loss is {current_validation_loss}, down from previous {previous_validation_loss}')
previous_validation_loss = current_validation_loss
if should_stop_early:
break
print('Done training, now testing')
with torch.no_grad():
model.eval()
feature_names = list(WeatherRow.__annotations__.keys())
test_data = WeatherDataset(torch.from_numpy(data[VALIDATE_END:TOTAL_POINTS,TARGET_FEATURES]).to(device=DEVICE, dtype=DTYPE), training_data.scaler)
test_results = [] #[model(test_data[idx][:-1,:].reshape((1, WINDOW_SIZE-1, len(TARGET_FEATURES))))[0,:] for idx in range(len(test_data))]
print('Running model on test dataset')
test_loader = torch.utils.data.DataLoader(test_data, batch_size=(TOTAL_POINTS-VALIDATE_END) // 8, shuffle=False)
for step, batch in enumerate(test_loader):
test_batch_results = test_data.scaler.inverse_transform(model(batch[:,:-1,:]).cpu().numpy())
for i in range(len(test_batch_results)):
test_results.append(test_batch_results[i])
print(f'{step*test_loader.batch_size * 100.0 / len(test_data)}% done')
'''
print('Plotting test actual and predicted')
for i, feature in enumerate(TARGET_FEATURES):
plt.title(feature_names[feature])
plt.plot(data[VALIDATE_END: TOTAL_POINTS - WINDOW_SIZE, 1], data[VALIDATE_END+WINDOW_SIZE: TOTAL_POINTS, TARGET_FEATURES[i]])
plt.plot(data[VALIDATE_END: TOTAL_POINTS - WINDOW_SIZE, 1], [test_results[idx][i] for idx in range(len(test_data))])
# plt.plot(data[VALIDATE_END: TOTAL_POINTS - WINDOW_SIZE, 1], [np.average(data[idx+VALIDATE_END:idx+VALIDATE_END+WINDOW_SIZE-1, feature], axis=0) for idx in range(len(test_results))])
plt.savefig(f'test-{feature_names[feature]}.png')
plt.clf()
'''
model_errors = []
last_errors = []
avg_errors = []
for i, feature in enumerate(TARGET_FEATURES):
error = sum([abs(data[idx+VALIDATE_END+WINDOW_SIZE-1, feature] - test_results[idx][i]) for idx in range(len(test_results))])
avg_error = sum([abs(data[idx+VALIDATE_END+WINDOW_SIZE-1, feature] - np.average(data[idx+VALIDATE_END:idx+VALIDATE_END+WINDOW_SIZE-1, feature], axis=0)) for idx in range(len(test_results))]);
last_error = sum([abs(data[idx+VALIDATE_END+WINDOW_SIZE-1, feature] - data[idx+VALIDATE_END+WINDOW_SIZE-2, feature]) for idx in range(len(test_results))]);
print("The error for {} was {}".format(feature_names[feature], error/len(test_data)));
print("Average error: {}. This is {}% better than the average metric".format(avg_error/len(test_data), avg_error/error*100-100));
print("Last error: {}. This is {}% better than the last value metric".format(last_error/len(test_data), last_error/error*100-100));
model_errors.append(error)
last_errors.append(last_error)
avg_errors.append(avg_error)
usable_features = [feature_names[feature] for i,feature in enumerate(TARGET_FEATURES)];
y_pos = np.arange(len(usable_features));
fig, ax = plt.subplots()
for i in range(len(model_errors)):
max_val = max(model_errors[i], last_errors[i], avg_errors[i]);
model_errors[i] = model_errors[i] / max_val;
avg_errors[i] = avg_errors[i] / max_val;
last_errors[i] = last_errors[i] / max_val;
plt.bar(y_pos, avg_errors, width=0.25, alpha=0.8, color='g', label='Average Sample Model');
plt.bar(y_pos+0.25, model_errors, width=0.25, alpha=0.8, color='b', label='LSTM Model');
plt.bar(y_pos-0.25, last_errors, width=0.25, alpha=0.8, color='r', label='Last Sample Model');
ticklabels = [usable_features[i][0:4]+usable_features[i][-1] for i in range(len(usable_features))]
plt.xticks(y_pos+0.25, ticklabels)
plt.xlabel('Feature')
plt.ylabel('Scaled Average L1 Error')
plt.title('Average Test Prediction Method Errors')
plt.legend()
plt.tight_layout()
plt.savefig('errors.png');
plt.clf()
|
[
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.bar",
"os.path.isfile",
"torch.no_grad",
"matplotlib.pyplot.tight_layout",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"torch.load",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"numpy.average",
"sanitize_data.read_from_tar",
"torch.manual_seed",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"weather_format.WeatherRow.__annotations__.keys",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1259, 1288), 'sanitize_data.read_from_tar', 'read_from_tar', (['TORCH_FILENAME'], {}), '(TORCH_FILENAME)\n', (1272, 1288), False, 'from sanitize_data import read_from_tar, TORCH_FILENAME\n'), ((1300, 1324), 'torch.load', 'torch.load', (['torch_binary'], {}), '(torch_binary)\n', (1310, 1324), False, 'import torch\n'), ((1905, 1984), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['training_data'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(training_data, batch_size=BATCH_SIZE, shuffle=True)\n', (1932, 1984), False, 'import torch\n'), ((2009, 2116), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validation_data'], {'batch_size': '((VALIDATE_END - TRAIN_END) // 8)', 'shuffle': '(False)'}), '(validation_data, batch_size=(VALIDATE_END -\n TRAIN_END) // 8, shuffle=False)\n', (2036, 2116), False, 'import torch\n'), ((2275, 2287), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2285, 2287), False, 'from torch import nn\n'), ((400, 442), 'os.path.isfile', 'os.path.isfile', (['f"""{TORCH_FILENAME}.tar.xz"""'], {}), "(f'{TORCH_FILENAME}.tar.xz')\n", (414, 442), False, 'import os\n'), ((1435, 1452), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (1449, 1452), True, 'import numpy as np\n'), ((1461, 1481), 'torch.manual_seed', 'torch.manual_seed', (['(2)'], {}), '(2)\n', (1478, 1481), False, 'import torch\n'), ((3947, 3962), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3960, 3962), False, 'import torch\n'), ((4423, 4527), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': '((TOTAL_POINTS - VALIDATE_END) // 8)', 'shuffle': '(False)'}), '(test_data, batch_size=(TOTAL_POINTS -\n VALIDATE_END) // 8, shuffle=False)\n', (4450, 4527), False, 'import torch\n'), ((6912, 6926), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6924, 6926), True, 'import matplotlib.pyplot as plt\n'), ((7220, 7315), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'avg_errors'], {'width': '(0.25)', 'alpha': '(0.8)', 'color': '"""g"""', 'label': '"""Average Sample Model"""'}), "(y_pos, avg_errors, width=0.25, alpha=0.8, color='g', label=\n 'Average Sample Model')\n", (7227, 7315), True, 'import matplotlib.pyplot as plt\n'), ((7320, 7414), 'matplotlib.pyplot.bar', 'plt.bar', (['(y_pos + 0.25)', 'model_errors'], {'width': '(0.25)', 'alpha': '(0.8)', 'color': '"""b"""', 'label': '"""LSTM Model"""'}), "(y_pos + 0.25, model_errors, width=0.25, alpha=0.8, color='b', label\n ='LSTM Model')\n", (7327, 7414), True, 'import matplotlib.pyplot as plt\n'), ((7417, 7517), 'matplotlib.pyplot.bar', 'plt.bar', (['(y_pos - 0.25)', 'last_errors'], {'width': '(0.25)', 'alpha': '(0.8)', 'color': '"""r"""', 'label': '"""Last Sample Model"""'}), "(y_pos - 0.25, last_errors, width=0.25, alpha=0.8, color='r', label=\n 'Last Sample Model')\n", (7424, 7517), True, 'import matplotlib.pyplot as plt\n'), ((7628, 7664), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(y_pos + 0.25)', 'ticklabels'], {}), '(y_pos + 0.25, ticklabels)\n', (7638, 7664), True, 'import matplotlib.pyplot as plt\n'), ((7671, 7692), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature"""'], {}), "('Feature')\n", (7681, 7692), True, 'import matplotlib.pyplot as plt\n'), ((7701, 7738), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scaled Average L1 Error"""'], {}), "('Scaled Average L1 Error')\n", (7711, 7738), True, 'import matplotlib.pyplot as plt\n'), ((7747, 7797), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Test Prediction Method Errors"""'], {}), "('Average Test Prediction Method Errors')\n", (7756, 7797), True, 'import matplotlib.pyplot as plt\n'), ((7806, 7818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7816, 7818), True, 'import matplotlib.pyplot as plt\n'), ((7827, 7845), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7843, 7845), True, 'import matplotlib.pyplot as plt\n'), ((7855, 7880), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""errors.png"""'], {}), "('errors.png')\n", (7866, 7880), True, 'import matplotlib.pyplot as plt\n'), ((7890, 7899), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7897, 7899), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2933), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2931, 2933), False, 'import torch\n'), ((4015, 4048), 'weather_format.WeatherRow.__annotations__.keys', 'WeatherRow.__annotations__.keys', ([], {}), '()\n', (4046, 4048), False, 'from weather_format import WeatherDataset, WeatherRow\n'), ((1661, 1712), 'torch.from_numpy', 'torch.from_numpy', (['data[:TRAIN_END, TARGET_FEATURES]'], {}), '(data[:TRAIN_END, TARGET_FEATURES])\n', (1677, 1712), False, 'import torch\n'), ((1775, 1838), 'torch.from_numpy', 'torch.from_numpy', (['data[TRAIN_END:VALIDATE_END, TARGET_FEATURES]'], {}), '(data[TRAIN_END:VALIDATE_END, TARGET_FEATURES])\n', (1791, 1838), False, 'import torch\n'), ((4086, 4152), 'torch.from_numpy', 'torch.from_numpy', (['data[VALIDATE_END:TOTAL_POINTS, TARGET_FEATURES]'], {}), '(data[VALIDATE_END:TOTAL_POINTS, TARGET_FEATURES])\n', (4102, 4152), False, 'import torch\n'), ((5946, 6040), 'numpy.average', 'np.average', (['data[idx + VALIDATE_END:idx + VALIDATE_END + WINDOW_SIZE - 1, feature]'], {'axis': '(0)'}), '(data[idx + VALIDATE_END:idx + VALIDATE_END + WINDOW_SIZE - 1,\n feature], axis=0)\n', (5956, 6040), True, 'import numpy as np\n')]
|
import logging
import os
import pickle
import numpy as np
from termcolor import colored
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.utils.logger import setup_logger
logger = setup_logger(name=__name__)
def load_semantic_embeddings(semantic_corpus, classes, precomputed_semantic_embs=None):
"""
Load precomputed semantic embeddings if it exists. Otherwise, extract it from corpus.
Args:
semantic_corpus (str)
classes (List[str])
precomputed_semantic_embs (str)
Returns:
class_embs_dict (Dict[str: np.array])
"""
# Prepare the semantic embeddings
to_compute_semantic_embs = True
if os.path.isfile(precomputed_semantic_embs):
with open(precomputed_semantic_embs, "rb") as f:
precomputed_embs_dict = pickle.load(f)
# Check if novel classes exist in precomputed embs
if all(x in precomputed_embs_dict.keys() for x in classes):
return precomputed_embs_dict
if to_compute_semantic_embs:
# We take the average for classes e.g. "hot dog", "parking meter".
word_embs_dict = {x: None for cls in classes for x in cls.split(" ")}
with open(semantic_corpus, "r", encoding="utf-8") as f:
for line in f.readlines():
line = line.split("\n")[0].split(" ")
word = line[0]
if word in word_embs_dict:
emb = np.asarray([float(x) for x in line[1:]])
word_embs_dict[word] = emb
if all([v is not None for k, v in word_embs_dict.items()]):
# Break if all words have found its embedding.
break
# check all words have a corresponding semantic embeddings
none_embs = [x for x, emb in word_embs_dict.items() if emb is None]
if len(none_embs) > 0:
msg = "Some classes (words) are not in the corpus and will be skipped in inference:\n"
msg += "\n".join(" " + colored(x, "blue") for x in none_embs)
logger.info(msg)
# Remove none classes
def is_valid(cls, none_embs):
for x in cls.split(" "):
if x in none_embs:
return False
return True
classes = [x for x in classes if is_valid(x, none_embs)]
class_embs_dict = {}
for cls in classes:
emb = [word_embs_dict[x] for x in cls.split(" ") if word_embs_dict[x] is not None]
emb = np.stack(emb, axis=0).mean(axis=0)
class_embs_dict[cls] = emb
# Save semantic embeddings to avoid repeated computations.
if os.path.isfile(precomputed_semantic_embs):
with open(precomputed_semantic_embs, "rb") as f:
precomputed_embs_dict = pickle.load(f)
precomputed_embs_dict.update(class_embs_dict)
with open(precomputed_semantic_embs, "wb") as f:
pickle.dump(precomputed_embs_dict, f)
else:
with open("./datasets/precomputed_semantic_embeddings.pkl", "wb") as f:
pickle.dump(class_embs_dict, f)
return class_embs_dict
class ZeroShotPredictor(nn.Module):
"""
Zero-shot predictors for discovering objects from novel categories.
"""
def __init__(self, cfg, known_classes, novel_classes):
super(ZeroShotPredictor, self).__init__()
# fmt: off
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.pre_inference_thresh = cfg.ZERO_SHOT.PRE_INFERENCE_THRESH
self.post_inference_thresh = cfg.ZERO_SHOT.POST_INFERENCE_THRESH
self.topk_known_classes = cfg.ZERO_SHOT.TOPK_KNOWN_CLASSES
self.detections_per_image = cfg.ZERO_SHOT.DETECTIONS_PER_IMAGE
self.precomputed_semantic_embs = cfg.ZERO_SHOT.PRECOMPUTED_SEMANTIC_EMBEDDINGS
self.semantic_corpus = cfg.ZERO_SHOT.SEMANTIC_CORPUS
# fmt: on
self._init_embs(known_classes, novel_classes)
def _init_embs(self, known_classes, novel_classes):
"""
Initilize semantic embeddings for classes.
"""
# laading semantic word embeddings.
class_embs_dict = load_semantic_embeddings(
self.semantic_corpus,
known_classes + novel_classes,
self.precomputed_semantic_embs,
)
assert all([x in class_embs_dict for x in known_classes])
self.known_classes = known_classes
self.novel_classes = [x for x in novel_classes if x in class_embs_dict]
self.known_class_embs = torch.stack([
torch.as_tensor(class_embs_dict[x]) for x in known_classes
], dim=0)
if len(self.novel_classes) == 0:
return
self.novel_class_embs = torch.stack([
torch.as_tensor(class_embs_dict[x]) for x in novel_classes if x in class_embs_dict
], dim=0)
def inference(self, scores, proposal_deltas, proposals):
"""
Args:
scores: predicted probability of known classes.
proposal_deltas: predicted box deltas. If `CLS_AGNOSTIC_BBOX_REG` = True, it has
shape (N, 4), otherwise its shape is (N, C * 4), where N is the number of
instances and C is the number of known classes.
"""
device = scores.device
num_novel_classes = len(self.novel_classes)
num_instances = len(scores)
if num_instances == 0 or num_novel_classes == 0:
return scores, proposal_deltas
known_class_embs = self.known_class_embs.to(device)
novel_class_embs = self.novel_class_embs.to(device)
novel_scores = torch.zeros(
(num_instances, num_novel_classes), dtype=scores.dtype, device=device
)
# 1. For the boxes whose score of known classes is less than threshold, we perform
# zero-shot inference to reason its score of being the given novel classes.
known_scores = scores[:, :-1] # excluding background scores
max_known_scores = torch.max(known_scores, dim=1)[0]
enable = torch.nonzero(
(max_known_scores < self.pre_inference_thresh) & (max_known_scores > 1e-3)
).squeeze(1)
# 2. Obtain the scores of top K known classes.
known_scores, kept_idxs = torch.sort(known_scores[enable], dim=-1, descending=True)
known_scores = known_scores[:, :self.topk_known_classes]
kept_idxs = kept_idxs[:, :self.topk_known_classes]
# 3. Estimate the semantic embeddings of boxes
base_embs = known_class_embs[kept_idxs]
norm_factors = known_scores.sum(dim=-1, keepdim=True)
base_wgts = known_scores / norm_factors.repeat(1, self.topk_known_classes)
pred_embs = base_embs * base_wgts.unsqueeze(-1).repeat(1, 1, base_embs.size(-1))
pred_embs = torch.sum(pred_embs, dim=1)
# 4. Predict scores for novel classes by computing cosine similarity.
emb_norms = torch.norm(pred_embs, p=2, dim=1, keepdim=True)
pred_embs = pred_embs.div(emb_norms.expand_as(pred_embs))
emb_norms = torch.norm(novel_class_embs, p=2, dim=1, keepdim=True)
novel_class_embs = novel_class_embs.div(emb_norms.expand_as(novel_class_embs))
novel_scores[enable, :] = torch.mm(
pred_embs, novel_class_embs.permute(1, 0)
).to(novel_scores.dtype)
# Reweight interactness scores
interactness_scores = torch.sigmoid(proposals[0].interactness_logits)
novel_scores = novel_scores * interactness_scores.unsqueeze(1).repeat(1, num_novel_classes)
# 5. Post processing. Remove predictions whose score < post_inference_thresh.
novel_scores[novel_scores < self.post_inference_thresh] = 0.
novel_scores[proposals[0].is_person == 1, :] = 0.
# Maximum number of detections to keep
thresh = torch.topk(novel_scores.reshape(-1), self.detections_per_image)[0][-1]
novel_scores[novel_scores <= thresh] = 0.
novel_scores = torch.clamp(novel_scores * 3, min=0., max=1.)
# Always keep the background as the last.
scores = torch.cat([scores[:, :-1], novel_scores, scores[:, -1:]], dim=-1)
if not self.cls_agnostic_bbox_reg:
proposal_deltas = torch.cat([
proposal_deltas,
torch.zeros((num_instances, num_novel_classes * 4), device=device)
], dim=-1)
return scores, proposal_deltas
|
[
"numpy.stack",
"pickle.dump",
"torch.norm",
"torch.nonzero",
"torch.cat",
"detectron2.utils.logger.setup_logger",
"termcolor.colored",
"os.path.isfile",
"torch.sigmoid",
"torch.clamp",
"pickle.load",
"torch.max",
"torch.zeros",
"torch.as_tensor",
"torch.sum",
"torch.sort"
] |
[((219, 246), 'detectron2.utils.logger.setup_logger', 'setup_logger', ([], {'name': '__name__'}), '(name=__name__)\n', (231, 246), False, 'from detectron2.utils.logger import setup_logger\n'), ((691, 732), 'os.path.isfile', 'os.path.isfile', (['precomputed_semantic_embs'], {}), '(precomputed_semantic_embs)\n', (705, 732), False, 'import os\n'), ((2683, 2724), 'os.path.isfile', 'os.path.isfile', (['precomputed_semantic_embs'], {}), '(precomputed_semantic_embs)\n', (2697, 2724), False, 'import os\n'), ((5739, 5826), 'torch.zeros', 'torch.zeros', (['(num_instances, num_novel_classes)'], {'dtype': 'scores.dtype', 'device': 'device'}), '((num_instances, num_novel_classes), dtype=scores.dtype, device=\n device)\n', (5750, 5826), False, 'import torch\n'), ((6377, 6434), 'torch.sort', 'torch.sort', (['known_scores[enable]'], {'dim': '(-1)', 'descending': '(True)'}), '(known_scores[enable], dim=-1, descending=True)\n', (6387, 6434), False, 'import torch\n'), ((6925, 6952), 'torch.sum', 'torch.sum', (['pred_embs'], {'dim': '(1)'}), '(pred_embs, dim=1)\n', (6934, 6952), False, 'import torch\n'), ((7052, 7099), 'torch.norm', 'torch.norm', (['pred_embs'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(pred_embs, p=2, dim=1, keepdim=True)\n', (7062, 7099), False, 'import torch\n'), ((7195, 7249), 'torch.norm', 'torch.norm', (['novel_class_embs'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(novel_class_embs, p=2, dim=1, keepdim=True)\n', (7205, 7249), False, 'import torch\n'), ((7546, 7593), 'torch.sigmoid', 'torch.sigmoid', (['proposals[0].interactness_logits'], {}), '(proposals[0].interactness_logits)\n', (7559, 7593), False, 'import torch\n'), ((8125, 8172), 'torch.clamp', 'torch.clamp', (['(novel_scores * 3)'], {'min': '(0.0)', 'max': '(1.0)'}), '(novel_scores * 3, min=0.0, max=1.0)\n', (8136, 8172), False, 'import torch\n'), ((8240, 8305), 'torch.cat', 'torch.cat', (['[scores[:, :-1], novel_scores, scores[:, -1:]]'], {'dim': '(-1)'}), '([scores[:, :-1], novel_scores, scores[:, -1:]], dim=-1)\n', (8249, 8305), False, 'import torch\n'), ((827, 841), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (838, 841), False, 'import pickle\n'), ((2819, 2833), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2830, 2833), False, 'import pickle\n'), ((2957, 2994), 'pickle.dump', 'pickle.dump', (['precomputed_embs_dict', 'f'], {}), '(precomputed_embs_dict, f)\n', (2968, 2994), False, 'import pickle\n'), ((3097, 3128), 'pickle.dump', 'pickle.dump', (['class_embs_dict', 'f'], {}), '(class_embs_dict, f)\n', (3108, 3128), False, 'import pickle\n'), ((6114, 6144), 'torch.max', 'torch.max', (['known_scores'], {'dim': '(1)'}), '(known_scores, dim=1)\n', (6123, 6144), False, 'import torch\n'), ((4654, 4689), 'torch.as_tensor', 'torch.as_tensor', (['class_embs_dict[x]'], {}), '(class_embs_dict[x])\n', (4669, 4689), False, 'import torch\n'), ((4850, 4885), 'torch.as_tensor', 'torch.as_tensor', (['class_embs_dict[x]'], {}), '(class_embs_dict[x])\n', (4865, 4885), False, 'import torch\n'), ((6165, 6260), 'torch.nonzero', 'torch.nonzero', (['((max_known_scores < self.pre_inference_thresh) & (max_known_scores > 0.001))'], {}), '((max_known_scores < self.pre_inference_thresh) & (\n max_known_scores > 0.001))\n', (6178, 6260), False, 'import torch\n'), ((2538, 2559), 'numpy.stack', 'np.stack', (['emb'], {'axis': '(0)'}), '(emb, axis=0)\n', (2546, 2559), True, 'import numpy as np\n'), ((8440, 8506), 'torch.zeros', 'torch.zeros', (['(num_instances, num_novel_classes * 4)'], {'device': 'device'}), '((num_instances, num_novel_classes * 4), device=device)\n', (8451, 8506), False, 'import torch\n'), ((2029, 2047), 'termcolor.colored', 'colored', (['x', '"""blue"""'], {}), "(x, 'blue')\n", (2036, 2047), False, 'from termcolor import colored\n')]
|
# Copyright 2019 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import gym
import os
import sys
import inspect
import numpy as np
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
pparentdir = os.path.dirname(parentdir)
sys.path.insert(0,pparentdir)
from src.gym.simulate_network.link import Link
from src.gym.simulate_network.network import Network
from src.gym.simulate_network.sender import Sender
from src.gym.worker.aurora_worker import AuroraWorker
from src.gym.worker.ogd_worker import OGDWorker
from src.gym.worker.two_point_ogd_worker import TwoPointOGDWorker
from src.gym.worker.combining_worker import CombiningWorker
from src.gym.worker.worker_runner import WorkerRunner
from src.gym.simulate_network.simulated_network_env import SimulatedNetworkEnv
from src.gym.aurora_policy.aurora_policy import AuroraPolicy
from src.gym.no_regret_policy.gradient_calculating_agent import GradientCalculatingAgent
from src.gym.no_regret_policy.no_regret_combining_connected_policy import NoRegretCombiningConnectPolicy
import src.gym.simulate_network.single_sender_network
from src.common.simple_arg_parse import arg_or_default
from src.gym.no_regret_policy.no_regret_policy import NoRegretAgent
history_len = 10
features = "sent latency inflation," + "latency ratio," + "send ratio"
bws = [240, 240] # [200, 300, 200, 300]
index = 0
def get_network():
global index
while True:
# link1 = Link.generate_link(bws[index], 0.2, 6, 0)
link1 = Link.generate_random_link()
link1.bw = bws[index]
links = [link1]
yield links
index = 1 - index
senders = [
Sender(
random.uniform(0.3, 1.5) * bws[0],
None, 0, features.split(","),
history_len=history_len
),
Sender(
random.uniform(0.3, 1.5) * bws[0],
None, 0, features.split(","),
history_len=history_len
)
]
import matplotlib.pyplot as plt
env = SimulatedNetworkEnv(senders, get_network(), history_len=history_len, features=features)
model = CombiningWorker(
(40, 300),
env,
[
AuroraWorker("./rand_model_12", env, (40, 300)),
TwoPointOGDWorker(env, (40, 300), C=11 * 300, L=20)
# OGDWorker(env, (40, 300), C=11 * 300, L=2)
]
)
model2 = CombiningWorker(
(40, 300),
env,
[
AuroraWorker("./rand_model_12", env, (40, 300)),
TwoPointOGDWorker(env, (40, 300), C=11 * 300, L=20)
# OGDWorker(env, (40, 300), C=11 * 300, L=2)
]
)
model.workers[1].set_action(200)
model2.workers[1].set_action(50)
#time_data = [float(event["Time"]) for event in data["Events"][1:]]
#rew_data = [float(event["Reward"]) for event in data["Events"][1:]]
#optimal_data = [float(event["Optimal"]) for event in data["Optimal"][1:]]
#send_data = [float(event["Send Rate"]) for event in data["Events"][1:]]
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 12))
senders_axis = axes[0][0]
sender_ewma_axis = axes[1][0]
sender1_sig_axis = axes[0][1]
sender2_sig_axis = axes[1][1]
senders_axis.title.set_text("Sending Rate")
sender_ewma_axis.title.set_text("Reward")
sender1_sig_axis.title.set_text("Sender 1 Sig")
sender2_sig_axis.title.set_text("Sender 2 Sig")
def plot_axis(axis, events_arr):
colors = [('r', 'g'), ('b', 'm'), ('k', 'y')]
times = []
optim = []
for i in range(len(events_arr)):
events = events_arr[i]
times = [event["Time"] for event in events[-501:]]
optim = [8*event["Optimal"] for event in events[-501:]]
send = [event["Send Rate"] for event in events[-500:]]
throu = [event["Throughput"] for event in events[-500:]]
axis.plot(times[:500], send, colors[i][0] + "-", label="[%d] Sent" % (i+1))
# axis.plot(times[:500], throu, colors[i][1] + "x", label="[%d] Throughput" % (i+1))
axis.plot(times, optim, "b--", label="Optimal")
axis.plot(times, np.array(optim)/2, "r--", label="Optimal/2")
def plot_ewma(axis, event_arr):
colors = ["r", "b", "g", "p"]
i = 0
for events in event_arr:
times = [event["Time"] for event in events[-500:]]
ewma = [event["EWMA"] for event in events[-500:]]
axis.plot(times, ewma, colors[i] + "-", label="Sender" + str(i))
i += 1
legend_drawn = [False, False]
def plot_sender_sig(axis, i, event_arr, sig_arr):
times = [event["Time"] for event in event_arr[i][-500:]]
axis.plot(times, list(map(lambda x: x[0], sig_arr[-500:])), "b-", label="Aurora Sig")
axis.plot(times, list(map(lambda x: x[1], sig_arr[-500:])), "g-", label="OGD Sig")
if not legend_drawn[i]:
axis.legend()
legend_drawn[i] = True
sender1_sig = []
sender2_sig = []
obs = env.reset()
reward = 0
wr = WorkerRunner([model, model2], obs, [reward, reward])
for i in range(1600 * 410):
actions = wr.start_step()
# print("[Step %d] actions are" % i, action, action2)
env.senders[0].set_rate(actions[0])
env.senders[1].set_rate(actions[1])
# env.senders[2].set_rate(action3)
obs, rewards, dones, info = env.step([0, 0])
wr.finish_step(obs, rewards)
sender1_sig.append(model.get_proba()[:])
sender2_sig.append(model2.get_proba()[:])
# sender1_sig.append([0.5, 0.25])
# sender2_sig.append([0.25, 0.5])
# print("[Step %d] rewards are" % i, rewards)
if i > 0 and i % 400 == 0:
obs = env.reset()
event_arr = [x["Events"] for x in info]
plot_axis(senders_axis, event_arr)
plot_ewma(sender_ewma_axis, event_arr)
plot_sender_sig(sender1_sig_axis, 0, event_arr, sender1_sig)
plot_sender_sig(sender2_sig_axis, 1, event_arr, sender2_sig)
if i == 400:
senders_axis.legend()
plt.draw()
plt.pause(0.1)
if i > 0 and i % 10000 == 0:
obs = env.reset(True)
env.render()
|
[
"src.gym.worker.worker_runner.WorkerRunner",
"src.gym.worker.aurora_worker.AuroraWorker",
"random.uniform",
"os.path.dirname",
"src.gym.worker.two_point_ogd_worker.TwoPointOGDWorker",
"sys.path.insert",
"matplotlib.pyplot.draw",
"src.gym.simulate_network.link.Link.generate_random_link",
"numpy.array",
"matplotlib.pyplot.pause",
"inspect.currentframe",
"matplotlib.pyplot.subplots"
] |
[((763, 790), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (778, 790), False, 'import os\n'), ((804, 830), 'os.path.dirname', 'os.path.dirname', (['parentdir'], {}), '(parentdir)\n', (819, 830), False, 'import os\n'), ((831, 861), 'sys.path.insert', 'sys.path.insert', (['(0)', 'pparentdir'], {}), '(0, pparentdir)\n', (846, 861), False, 'import sys\n'), ((3451, 3499), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(10, 12)'}), '(nrows=2, ncols=2, figsize=(10, 12))\n', (3463, 3499), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5373), 'src.gym.worker.worker_runner.WorkerRunner', 'WorkerRunner', (['[model, model2]', 'obs', '[reward, reward]'], {}), '([model, model2], obs, [reward, reward])\n', (5333, 5373), False, 'from src.gym.worker.worker_runner import WorkerRunner\n'), ((2080, 2107), 'src.gym.simulate_network.link.Link.generate_random_link', 'Link.generate_random_link', ([], {}), '()\n', (2105, 2107), False, 'from src.gym.simulate_network.link import Link\n'), ((2680, 2727), 'src.gym.worker.aurora_worker.AuroraWorker', 'AuroraWorker', (['"""./rand_model_12"""', 'env', '(40, 300)'], {}), "('./rand_model_12', env, (40, 300))\n", (2692, 2727), False, 'from src.gym.worker.aurora_worker import AuroraWorker\n'), ((2737, 2788), 'src.gym.worker.two_point_ogd_worker.TwoPointOGDWorker', 'TwoPointOGDWorker', (['env', '(40, 300)'], {'C': '(11 * 300)', 'L': '(20)'}), '(env, (40, 300), C=11 * 300, L=20)\n', (2754, 2788), False, 'from src.gym.worker.two_point_ogd_worker import TwoPointOGDWorker\n'), ((2915, 2962), 'src.gym.worker.aurora_worker.AuroraWorker', 'AuroraWorker', (['"""./rand_model_12"""', 'env', '(40, 300)'], {}), "('./rand_model_12', env, (40, 300))\n", (2927, 2962), False, 'from src.gym.worker.aurora_worker import AuroraWorker\n'), ((2972, 3023), 'src.gym.worker.two_point_ogd_worker.TwoPointOGDWorker', 'TwoPointOGDWorker', (['env', '(40, 300)'], {'C': '(11 * 300)', 'L': '(20)'}), '(env, (40, 300), C=11 * 300, L=20)\n', (2989, 3023), False, 'from src.gym.worker.two_point_ogd_worker import TwoPointOGDWorker\n'), ((6314, 6324), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6322, 6324), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6347), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (6342, 6347), True, 'import matplotlib.pyplot as plt\n'), ((725, 747), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (745, 747), False, 'import inspect\n'), ((2242, 2266), 'random.uniform', 'random.uniform', (['(0.3)', '(1.5)'], {}), '(0.3, 1.5)\n', (2256, 2266), False, 'import random\n'), ((2374, 2398), 'random.uniform', 'random.uniform', (['(0.3)', '(1.5)'], {}), '(0.3, 1.5)\n', (2388, 2398), False, 'import random\n'), ((4488, 4503), 'numpy.array', 'np.array', (['optim'], {}), '(optim)\n', (4496, 4503), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Dark matter spatial and spectral models
#
# ## Introduction
#
# Gammapy has some convenience methods for dark matter analyses in `~gammapy.astro.darkmatter`. These include J-Factor computation and calculation the expected gamma flux for a number of annihilation channels. They are presented in this notebook.
#
# The basic concepts of indirect dark matter searches, however, are not explained. So this is aimed at people who already know what the want to do. A good introduction to indirect dark matter searches is given for example in https://arxiv.org/pdf/1012.4515.pdf (Chapter 1 and 5)
# ## Setup
#
# As always, we start with some setup for the notebook, and with imports.
# In[1]:
from gammapy.astro.darkmatter import (
profiles,
JFactory,
PrimaryFlux,
DarkMatterAnnihilationSpectralModel,
)
from gammapy.maps import WcsGeom, WcsNDMap
from astropy.coordinates import SkyCoord
from matplotlib.colors import LogNorm
from regions import CircleSkyRegion
import astropy.units as u
import numpy as np
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# ## Profiles
#
# The following dark matter profiles are currently implemented. Each model can be scaled to a given density at a certain distance. These parameters are controlled by ``profiles.DMProfile.LOCAL_DENSITY`` and ``profiles.DMProfile.DISTANCE_GC``
# In[3]:
profiles.DMProfile.__subclasses__()
# In[4]:
for profile in profiles.DMProfile.__subclasses__():
p = profile()
p.scale_to_local_density()
radii = np.logspace(-3, 2, 100) * u.kpc
plt.plot(radii, p(radii), label=p.__class__.__name__)
plt.loglog()
plt.axvline(8.5, linestyle="dashed", color="black", label="local density")
plt.legend()
print("LOCAL_DENSITY:", profiles.DMProfile.LOCAL_DENSITY)
print("DISTANCE_GC:", profiles.DMProfile.DISTANCE_GC)
# ## J Factors
#
# There are utilities to compute J-Factor maps can can serve as a basis to compute J-Factors for certain regions. In the following we compute a J-Factor map for the Galactic Centre region
# In[5]:
profile = profiles.NFWProfile()
# Adopt standard values used in HESS
profiles.DMProfile.DISTANCE_GC = 8.5 * u.kpc
profiles.DMProfile.LOCAL_DENSITY = 0.39 * u.Unit("GeV / cm3")
profile.scale_to_local_density()
position = SkyCoord(0.0, 0.0, frame="galactic", unit="deg")
geom = WcsGeom.create(binsz=0.05, skydir=position, width=3.0, frame="galactic")
# In[6]:
jfactory = JFactory(
geom=geom, profile=profile, distance=profiles.DMProfile.DISTANCE_GC
)
jfact = jfactory.compute_jfactor()
# In[7]:
jfact_map = WcsNDMap(geom=geom, data=jfact.value, unit=jfact.unit)
fig, ax, im = jfact_map.plot(cmap="viridis", norm=LogNorm(), add_cbar=True)
plt.title(f"J-Factor [{jfact_map.unit}]")
# 1 deg circle usually used in H.E.S.S. analyses
sky_reg = CircleSkyRegion(center=position, radius=1 * u.deg)
pix_reg = sky_reg.to_pixel(wcs=geom.wcs)
pix_reg.plot(ax=ax, facecolor="none", edgecolor="red", label="1 deg circle")
plt.legend()
# In[8]:
# NOTE: https://arxiv.org/abs/1607.08142 quote 2.67e21 without the +/- 0.3 deg band around the plane
total_jfact = pix_reg.to_mask().multiply(jfact).sum()
print(
"J-factor in 1 deg circle around GC assuming a "
f"{profile.__class__.__name__} is {total_jfact:.3g}"
)
# ## Gamma-ray spectra at production
#
# The gamma-ray spectrum per annihilation is a further ingredient for a dark matter analysis. The following annihilation channels are supported. For more info see https://arxiv.org/pdf/1012.4515.pdf
# In[9]:
fluxes = PrimaryFlux(mDM="1 TeV", channel="eL")
print(fluxes.allowed_channels)
# In[10]:
fig, axes = plt.subplots(4, 1, figsize=(6, 16))
mDMs = [0.01, 0.1, 1, 10] * u.TeV
for mDM, ax in zip(mDMs, axes):
fluxes.mDM = mDM
ax.set_title(rf"m$_{{\mathrm{{DM}}}}$ = {mDM}")
ax.set_yscale("log")
ax.set_ylabel("dN/dE")
for channel in ["tau", "mu", "b", "Z"]:
fluxes.channel = channel
fluxes.table_model.plot(
energy_range=[mDM / 100, mDM],
ax=ax,
label=channel,
flux_unit="1/GeV",
)
axes[0].legend()
plt.subplots_adjust(hspace=0.5)
# ## Flux maps
#
# Finally flux maps can be produced like this:
# In[11]:
channel = "Z"
massDM = 10 * u.TeV
diff_flux = DarkMatterAnnihilationSpectralModel(mass=massDM, channel=channel)
int_flux = (
jfact * diff_flux.integral(energy_min=0.1 * u.TeV, energy_max=10 * u.TeV)
).to("cm-2 s-1")
# In[12]:
flux_map = WcsNDMap(geom=geom, data=int_flux.value, unit="cm-2 s-1")
fig, ax, im = flux_map.plot(cmap="viridis", norm=LogNorm(), add_cbar=True)
plt.title(
f"Flux [{int_flux.unit}]\n m$_{{DM}}$={fluxes.mDM.to('TeV')}, channel={fluxes.channel}"
);
# In[ ]:
|
[
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.title",
"numpy.logspace",
"gammapy.astro.darkmatter.DarkMatterAnnihilationSpectralModel",
"gammapy.astro.darkmatter.PrimaryFlux",
"gammapy.astro.darkmatter.profiles.NFWProfile",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.axvline",
"gammapy.maps.WcsNDMap",
"gammapy.astro.darkmatter.profiles.DMProfile.__subclasses__",
"gammapy.astro.darkmatter.JFactory",
"matplotlib.pyplot.subplots",
"regions.CircleSkyRegion",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots_adjust",
"astropy.units.Unit",
"warnings.filterwarnings",
"gammapy.maps.WcsGeom.create",
"astropy.coordinates.SkyCoord"
] |
[((1185, 1218), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1208, 1218), False, 'import warnings\n'), ((1492, 1527), 'gammapy.astro.darkmatter.profiles.DMProfile.__subclasses__', 'profiles.DMProfile.__subclasses__', ([], {}), '()\n', (1525, 1527), False, 'from gammapy.astro.darkmatter import profiles, JFactory, PrimaryFlux, DarkMatterAnnihilationSpectralModel\n'), ((1556, 1591), 'gammapy.astro.darkmatter.profiles.DMProfile.__subclasses__', 'profiles.DMProfile.__subclasses__', ([], {}), '()\n', (1589, 1591), False, 'from gammapy.astro.darkmatter import profiles, JFactory, PrimaryFlux, DarkMatterAnnihilationSpectralModel\n'), ((1745, 1757), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (1755, 1757), True, 'import matplotlib.pyplot as plt\n'), ((1758, 1832), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(8.5)'], {'linestyle': '"""dashed"""', 'color': '"""black"""', 'label': '"""local density"""'}), "(8.5, linestyle='dashed', color='black', label='local density')\n", (1769, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1833, 1845), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1843, 1845), True, 'import matplotlib.pyplot as plt\n'), ((2189, 2210), 'gammapy.astro.darkmatter.profiles.NFWProfile', 'profiles.NFWProfile', ([], {}), '()\n', (2208, 2210), False, 'from gammapy.astro.darkmatter import profiles, JFactory, PrimaryFlux, DarkMatterAnnihilationSpectralModel\n'), ((2402, 2450), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0.0)', '(0.0)'], {'frame': '"""galactic"""', 'unit': '"""deg"""'}), "(0.0, 0.0, frame='galactic', unit='deg')\n", (2410, 2450), False, 'from astropy.coordinates import SkyCoord\n'), ((2458, 2530), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'binsz': '(0.05)', 'skydir': 'position', 'width': '(3.0)', 'frame': '"""galactic"""'}), "(binsz=0.05, skydir=position, width=3.0, frame='galactic')\n", (2472, 2530), False, 'from gammapy.maps import WcsGeom, WcsNDMap\n'), ((2555, 2632), 'gammapy.astro.darkmatter.JFactory', 'JFactory', ([], {'geom': 'geom', 'profile': 'profile', 'distance': 'profiles.DMProfile.DISTANCE_GC'}), '(geom=geom, profile=profile, distance=profiles.DMProfile.DISTANCE_GC)\n', (2563, 2632), False, 'from gammapy.astro.darkmatter import profiles, JFactory, PrimaryFlux, DarkMatterAnnihilationSpectralModel\n'), ((2699, 2753), 'gammapy.maps.WcsNDMap', 'WcsNDMap', ([], {'geom': 'geom', 'data': 'jfact.value', 'unit': 'jfact.unit'}), '(geom=geom, data=jfact.value, unit=jfact.unit)\n', (2707, 2753), False, 'from gammapy.maps import WcsGeom, WcsNDMap\n'), ((2830, 2871), 'matplotlib.pyplot.title', 'plt.title', (['f"""J-Factor [{jfact_map.unit}]"""'], {}), "(f'J-Factor [{jfact_map.unit}]')\n", (2839, 2871), True, 'import matplotlib.pyplot as plt\n'), ((2932, 2982), 'regions.CircleSkyRegion', 'CircleSkyRegion', ([], {'center': 'position', 'radius': '(1 * u.deg)'}), '(center=position, radius=1 * u.deg)\n', (2947, 2982), False, 'from regions import CircleSkyRegion\n'), ((3101, 3113), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3111, 3113), True, 'import matplotlib.pyplot as plt\n'), ((3663, 3701), 'gammapy.astro.darkmatter.PrimaryFlux', 'PrimaryFlux', ([], {'mDM': '"""1 TeV"""', 'channel': '"""eL"""'}), "(mDM='1 TeV', channel='eL')\n", (3674, 3701), False, 'from gammapy.astro.darkmatter import profiles, JFactory, PrimaryFlux, DarkMatterAnnihilationSpectralModel\n'), ((3759, 3794), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(6, 16)'}), '(4, 1, figsize=(6, 16))\n', (3771, 3794), True, 'import matplotlib.pyplot as plt\n'), ((4246, 4277), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (4265, 4277), True, 'import matplotlib.pyplot as plt\n'), ((4404, 4469), 'gammapy.astro.darkmatter.DarkMatterAnnihilationSpectralModel', 'DarkMatterAnnihilationSpectralModel', ([], {'mass': 'massDM', 'channel': 'channel'}), '(mass=massDM, channel=channel)\n', (4439, 4469), False, 'from gammapy.astro.darkmatter import profiles, JFactory, PrimaryFlux, DarkMatterAnnihilationSpectralModel\n'), ((4603, 4660), 'gammapy.maps.WcsNDMap', 'WcsNDMap', ([], {'geom': 'geom', 'data': 'int_flux.value', 'unit': '"""cm-2 s-1"""'}), "(geom=geom, data=int_flux.value, unit='cm-2 s-1')\n", (4611, 4660), False, 'from gammapy.maps import WcsGeom, WcsNDMap\n'), ((2336, 2355), 'astropy.units.Unit', 'u.Unit', (['"""GeV / cm3"""'], {}), "('GeV / cm3')\n", (2342, 2355), True, 'import astropy.units as u\n'), ((1654, 1677), 'numpy.logspace', 'np.logspace', (['(-3)', '(2)', '(100)'], {}), '(-3, 2, 100)\n', (1665, 1677), True, 'import numpy as np\n'), ((2804, 2813), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (2811, 2813), False, 'from matplotlib.colors import LogNorm\n'), ((4711, 4720), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (4718, 4720), False, 'from matplotlib.colors import LogNorm\n')]
|
# coding=utf-8
import itertools
import numpy as np
import matplotlib.pyplot as plt
from ..helper_functions.helpers import is_this_saved_iteration, convert_global_to_particle_iter, colors, directions
class Plot:
"""
A plot for visualization. Mainly an abstract class for overloading with interesting kinds of diagnostics.
Parameters
----------
S : Simulation
A `Simulation` object to pull data from.
ax : matplotlib axis
An axis to draw on
"""
def __init__(self, S, ax):
self.S = S
if isinstance(ax, str):
fig, self.ax = plt.subplots()
else:
self.ax = ax
self.plots = []
L = S.grid.L
self.ax.set_xlim(0, S.grid.L)
self.ax.set_xlabel(rf"Position $x$ (L={L:.3e} m)")
self.ax.grid()
xticks = np.linspace(0, L, 7)
self.ax.set_xticks(xticks)
self.ax.xaxis.set_ticklabels([f"{x/L:.1f}L" for x in xticks])
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0), useMathText=True,
useOffset=False) # TODO axis=both?
# self.ax.yaxis.set_label_position("right")
def animation_init(self):
"""
Zeroes out all data in all lines of the plot. Useful for Animation.
"""
for plot in self.plots:
plot.set_data([], [])
def update(self, i):
"""
Updates the plot with information from a particular iteration of the simulation.
Parameters
----------
i : int
Iteration of the simulation
"""
pass
def return_animated(self):
"""
Returns an iterable of all items that have changed. Useful for Animation
"""
return self.plots
class FrequencyPlot(Plot):
"""
Plots the spatial Fourier transform of field energy versus wave number.
""" # REFACTOR move the fourier analysis to PostProcessedGrid; describe the math here as well as there
def __init__(self, S, ax):
super().__init__(S, ax)
self.plots.append(self.ax.plot([], [], "o-", label="energy per mode")[0])
self.ax.set_xlabel(r"Wavevector mode $k$")
self.ax.set_ylabel(r"Energy $E$")
# max_interesting = S.grid.k_plot[...].max() * 0.3
# self.indices = S.grid.k_plot < max_interesting
self.indices = np.ones_like(S.grid.k_plot, dtype=bool)
interesting_x = S.grid.k_plot[self.indices]
self.ax.set_xticks(interesting_x)
self.ax.xaxis.set_ticklabels(np.arange(len(interesting_x)))
self.ax.set_xlim(interesting_x.min(), interesting_x.max())
self.ax.set_ylim(0, S.grid.longitudinal_energy_per_mode_history[...].max())
def update(self, i):
# import ipdb; ipdb.set_trace()
self.plots[0].set_data(self.S.grid.k_plot[self.indices],
self.S.grid.longitudinal_energy_per_mode_history[i][self.indices])
def phaseplot_values(species):
"""
A convenience function to get a dictionary of values, to allow generalization of the PhasePlot class.
The keys you can pull for phase plots are `x`, `v_x`, `v_y` and `v_z`.
Parameters
----------
species : Species
A species to draw data from
Returns
-------
A dictionary of phase plot values.
"""
return {"x": species.position_history,
"v_x": species.velocity_history[:, :, 0],
"v_y": species.velocity_history[:, :, 1],
"v_z": species.velocity_history[:, :, 2],
}
class PhasePlot(Plot):
"""
Draws a phase plot.
The keys you can pull for phase plots are `x`, `v_x`, `v_y` and `v_z`.
Parameters
----------
v1, v2 : str
keys for the phase plot.
alpha : float
An opacity value between 0 and 1. Useful for neat phase plots displaying density.
"""
def __init__(self, S, ax, v1, v2, alpha):
super().__init__(S, ax)
self.x = [phaseplot_values(species)[v1] for species in S.list_species]
self.y = [phaseplot_values(species)[v2] for species in S.list_species]
if len(self.y):
maxys = max([np.max(np.abs(y)) for y in self.y])
self.ax.set_ylim(-maxys, maxys)
for i, species in enumerate(S.list_species):
self.plots.append(self.ax.plot([], [], colors[i] + ".", alpha=alpha)[0])
# self.ax.yaxis.set_label_position("right")
self.ax.set_xlabel(rf"${v1}$")
self.ax.set_ylabel(rf"${v2}$")
def update(self, i):
for plot, species, x, y in zip(self.plots, self.S.list_species, self.x, self.y):
if is_this_saved_iteration(i, species.save_every_n_iterations):
index = convert_global_to_particle_iter(i, species.save_every_n_iterations)
alive = species.N_alive_history[index] +1
# print(y[index, species.alive_history[index]]) #TODO: get alive history to work here!
plot.set_data(x[index, :alive], # , species.alive_history[index]],
y[index, :alive]) # , species.alive_history[index]])
class SpatialDistributionPlot(Plot):
"""
Draws particle density on the grid.
"""
def __init__(self, S, ax):
super().__init__(S, ax)
ax.set_ylabel(f"Particle density $n$")
for species in S.list_species:
self.plots.append(self.ax.plot([], [], "-", label=species.name)[0])
if len(S.list_species):
self.ax.set_ylim(0, 1.2*max([species.density_history[...].max() for species in S.list_species]))
self.ax.legend(loc='best')
def update(self, i):
for species, plot in zip(self.S.list_species, self.plots):
plot.set_data(self.S.grid.x, species.density_history[i])
class SpatialPerturbationDistributionPlot(SpatialDistributionPlot):
def __init__(self, S, ax):
super().__init__(S, ax)
self.ax.set_ylabel(r"$\Delta n = n - n(t=0)$")
self.y = [species.density_history - species.density_history[0] for species in S.list_species]
if len(S.list_species):
self.ax.set_ylim(min([1.2 * y.min() for y in self.y]),max([1.2 * y.max() for y in self.y]))
self.ax.legend(loc='best')
def update(self, i):
for species, plot, y in zip(self.S.list_species, self.plots, self.y):
plot.set_data(self.S.grid.x, y[i])
class ChargeDistributionPlot(Plot):
"""
Draws charge density from the grid.
"""
def __init__(self, S, ax, check_poisson=False):
super().__init__(S, ax)
self.plots.append(self.ax.plot([], [], "-", alpha=0.8, label="charge")[0])
self.ax.set_ylabel(f"Charge density $\\rho$")
mincharge = np.min(S.grid.charge_density_history)
maxcharge = np.max(S.grid.charge_density_history)
self.ax.set_ylim(mincharge, maxcharge)
self.check_poisson = check_poisson
if check_poisson:
self.plots.append(self.ax.plot([], [], "-", alpha=0.8, label=r"$\varepsilon_0 \partial E/ \partial x$")[0])
self.ax.legend(loc='lower left')
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.charge_density_history[i, :])
if self.check_poisson:
self.plots[1].set_data(self.S.grid.x, self.S.grid.check_on_charge[i])
class Histogram(Plot):
"""
Draws a histogram of a given value from the phase plot dataset.
The keys you can pull for phase plots are `x`, `v_x`, `v_y` and `v_z`.
Parameters
----------
v1 : str
A key to phase plot values.
n_bins: int
Number of bins to draw.
"""
def __init__(self, S, ax, v1: str, n_bins: int = 50):
super().__init__(S, ax)
self.bin_arrays = []
self.values = [phaseplot_values(species)[v1] for species in S.list_species]
if len(self.values):
maxxs = max([np.max(np.abs(v)) for v in self.values])
self.ax.set_xlim(-maxxs, maxxs)
for i, s, v in zip(range(len(S.list_species)), S.list_species, self.values):
bin_array = np.linspace(v.min(), v.max(), n_bins)
self.bin_arrays.append(bin_array)
self.plots.append(
self.ax.plot(*calculate_histogram_data(v[0], bin_array), colors[i])[0])
self.ax.set_xlabel(rf"${v1}$")
self.ax.set_ylabel(r"Number of particles")
if len(self.bin_arrays):
self.ax.set_xlim(min([bin_array.min() for bin_array in self.bin_arrays]),
max([bin_array.max() for bin_array in self.bin_arrays]))
def update(self, i):
for species, histogram, bin_array, v in zip(self.S.list_species, self.plots, self.bin_arrays, self.values):
index = convert_global_to_particle_iter(i, species.save_every_n_iterations)
alive = species.N_alive_history[index] +1
histogram.set_data(*calculate_histogram_data(v[index, :alive], bin_array))
def calculate_histogram_data(arr, bins):
"""
Calculates histogram values, normalized to the number of particles.
Parameters
----------
arr : ndarray
Values of a particle property, for example, velocity
bins : ndarray
Bin edges for the histogram.
Returns
-------
bin_center : ndarray
Centers of histogram bars (the x array for plotting)
bin_height : ndarray
Heights of histogram bars (the y array for plotting)
"""
bin_height, bin_edge = np.histogram(arr, bins=bins) # OPTIMIZE
bin_center = (bin_edge[:-1] + bin_edge[1:]) * 0.5
return bin_center, bin_height
class IterationCounter:
"""
A little widget inserted on an axis, displaying the iteration number and current simulation time.
"""
def __init__(self, S, ax):
self.S = S
self.ax = ax
self.counter = ax.text(0.1, 0.9, 'i=x', horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
def animation_init(self):
self.counter.set_text("Iteration: \nTime: ")
def update(self, i):
self.counter.set_text(f"Iteration: {i}/{self.S.NT}\nTime: {i*self.S.dt:.3g}/{self.S.NT*self.S.dt:.3g}")
def return_animated(self):
return [self.counter]
class FieldPlot(Plot):
"""
Draws electric and magnetic fields from the grid in a given direction
Parameters
----------
j : int
Direction as Cartesian index number. 0: x, 1: y, 2: z
"""
def __init__(self, S, ax, j):
super().__init__(S, ax)
self.j = j
self.plots.append(self.ax.plot([], [], "-", label=f"$E_{directions[j]}$")[0])
self.ax.set_ylabel(r"Fields $E$, $B$")
max_e = np.max(np.abs(S.grid.electric_field_history[:, :, j]))
if j != 0:
self.plots.append(self.ax.plot([], [], "-", label=f"$B_{directions[j]}$")[0])
max_b = np.max(np.abs(S.grid.magnetic_field_history[:, :, j]))
maxfield = max([max_e, max_b])
else:
maxfield = max_e
print(f"For direction {directions[j]}, maxfield is {maxfield:.2e}")
self.ax.set_ylim(-maxfield, maxfield)
self.ax.legend(loc='upper right')
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.electric_field_history[i, :, self.j])
if self.j != 0:
self.plots[1].set_data(self.S.grid.x, self.S.grid.magnetic_field_history[i, :, self.j])
class PoyntingFieldPlot(Plot):
"""
Draws electric and magnetic field energy flux (Poynting flux) from the grid
"""
def __init__(self, S, ax):
super().__init__(S, ax)
self.plots.append(self.ax.plot([], [], "-", label=f"Poynting flux")[0])
self.ax.set_ylabel(r"Poynting flux")
max_P = np.max(np.abs(S.grid.poynting_history[...]))
self.ax.set_ylim(-max_P, max_P)
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.poynting_history[i, :])
class CurrentPlot(Plot):
"""
Draws currents from the grid in a given direction.
Parameters
----------
j : int
Direction as Cartesian index number. 0: x, 1: y, 2: z
"""
def __init__(self, S, ax, j):
super().__init__(S, ax)
self.j = j
x = S.grid.x_current if j == 0 else S.grid.x
self.plots.append(self.ax.plot(x, S.grid.current_density_history[0, :, j], "-",
alpha=0.9,
label=fr"$j_{directions[j]}$")[0])
self.ax.set_ylabel(f"Current density $j_{directions[j]}$")
self.ax.tick_params('y')
self.ax.legend(loc='lower left')
current = S.grid.current_density_history[:, :, j]
# mean = current.mean()
# std = 3*current.std()
#
# mincurrent = mean - std
# maxcurrent = mean + std
mincurrent = current.min()
maxcurrent = current.max()
try:
ax.set_ylim(mincurrent, maxcurrent)
except ValueError as E:
print(f"Error on setting current limits in {j}: {E}")
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.current_density_history[i, :, self.j])
class PlotSet:
"""
A single object representing a few different plots on different axes.
Useful for plotting sets of directional values (fields, currents).
Parameters
----------
axes : list
List of axes to use.
list_plots :
List of `Plot`s to update and return.
"""
def __init__(self, axes, list_plots):
self.axes = axes
self.list_plots = list_plots
def update(self, i):
for plot in self.list_plots:
plot.update(i)
def animation_init(self):
for plot in self.list_plots:
plot.animation_init()
def return_animated(self):
return list(itertools.chain.from_iterable(plot.return_animated() for plot in self.list_plots))
class TripleFieldPlot(PlotSet):
"""
Draws electric and magnetic field plots on the grid on a given list of axes.
Parameters
----------
S : Simulation
Simulation to pull data from.
axes : list
List of matplotlib axes.
"""
def __init__(self, S, axes: list):
assert len(axes) <= 3, "Too many axes, we ran out of directions!"
plots = [FieldPlot(S, ax, j) for j, ax in enumerate(axes)]
super().__init__(axes, plots)
class TripleCurrentPlot(PlotSet):
"""
Draws currents on the grid on a given list of axes.
Parameters
----------
S : Simulation
Simulation to pull data from.
axes : list
List of matplotlib axes.
"""
def __init__(self, S, axes: list):
assert len(axes) <= 3, "Too many axes, we ran out of directions!"
plots = [CurrentPlot(S, ax, j) for j, ax in enumerate(axes)]
super().__init__(axes, plots)
|
[
"numpy.ones_like",
"numpy.abs",
"numpy.histogram",
"numpy.max",
"numpy.min",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] |
[((9547, 9575), 'numpy.histogram', 'np.histogram', (['arr'], {'bins': 'bins'}), '(arr, bins=bins)\n', (9559, 9575), True, 'import numpy as np\n'), ((839, 859), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(7)'], {}), '(0, L, 7)\n', (850, 859), True, 'import numpy as np\n'), ((2385, 2424), 'numpy.ones_like', 'np.ones_like', (['S.grid.k_plot'], {'dtype': 'bool'}), '(S.grid.k_plot, dtype=bool)\n', (2397, 2424), True, 'import numpy as np\n'), ((6789, 6826), 'numpy.min', 'np.min', (['S.grid.charge_density_history'], {}), '(S.grid.charge_density_history)\n', (6795, 6826), True, 'import numpy as np\n'), ((6847, 6884), 'numpy.max', 'np.max', (['S.grid.charge_density_history'], {}), '(S.grid.charge_density_history)\n', (6853, 6884), True, 'import numpy as np\n'), ((602, 616), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (614, 616), True, 'import matplotlib.pyplot as plt\n'), ((10797, 10843), 'numpy.abs', 'np.abs', (['S.grid.electric_field_history[:, :, j]'], {}), '(S.grid.electric_field_history[:, :, j])\n', (10803, 10843), True, 'import numpy as np\n'), ((11865, 11901), 'numpy.abs', 'np.abs', (['S.grid.poynting_history[...]'], {}), '(S.grid.poynting_history[...])\n', (11871, 11901), True, 'import numpy as np\n'), ((10981, 11027), 'numpy.abs', 'np.abs', (['S.grid.magnetic_field_history[:, :, j]'], {}), '(S.grid.magnetic_field_history[:, :, j])\n', (10987, 11027), True, 'import numpy as np\n'), ((4219, 4228), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (4225, 4228), True, 'import numpy as np\n'), ((7970, 7979), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (7976, 7979), True, 'import numpy as np\n')]
|
from PIL import Image
import numpy as np
import math
def floating_point(number):
temp=int(number)
if number>temp:
return temp+1
else :
return number
def mat_Multi(angle,x,y,k):
tangent=math.tan(angle/2)
if k==0 or k==2:
X=x-y*tangent
Y=y
else:
X=x
Y=x*math.sin(angle)+y
return round(X),round(Y)
def rot(image,angle):
# Define the most occuring variables
angle=math.radians(angle)
cosine=math.cos(angle)
sine=math.sin(angle)
image1=np.zeros_like(image)
# Find the centre of the image about which we have to rotate the image
centre_row = round(((image.shape[0]+1)/2)-1)
centre_column= round(((image.shape[1]+1)/2)-1)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
y=image.shape[0]-1-i-centre_row
x=image.shape[1]-1-j-centre_column
X=round(x*cosine+y*sine)
Y=round(-x*sine+y*cosine)
X=centre_column-X
Y=centre_row-Y
if X<image1.shape[1] and Y<image1.shape[0] and X>=0 and Y>=0:
image1[Y,X,:]=image[i,j,:]
for i in range(image1.shape[0]):
prev = [image1[i][0][0], image1[i][0][1], image1[i][0][2], image1[i][0][3]]
for j in range(image1.shape[1]-1):
if (not any(image1[i][j][:])):
if (any(image1[i][j+1][:])):
image1[i][j][:] = prev
else:
prev = image1[i][j][:]
return image1
file_name=input("Enter the name of the file:- ")
im = np.array(Image.open(file_name))
rotation_angle=float(input("Enter the angle :- "))
im_copy=rot(im,rotation_angle)
pil_img=Image.fromarray((im_copy).astype(np.uint8))
pil_img.save("rotated_without_bound.png")
|
[
"numpy.zeros_like",
"math.radians",
"math.tan",
"math.sin",
"PIL.Image.open",
"math.cos"
] |
[((219, 238), 'math.tan', 'math.tan', (['(angle / 2)'], {}), '(angle / 2)\n', (227, 238), False, 'import math\n'), ((448, 467), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (460, 467), False, 'import math\n'), ((479, 494), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (487, 494), False, 'import math\n'), ((504, 519), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (512, 519), False, 'import math\n'), ((531, 551), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (544, 551), True, 'import numpy as np\n'), ((1644, 1665), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (1654, 1665), False, 'from PIL import Image\n'), ((326, 341), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (334, 341), False, 'import math\n')]
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore import Tensor
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net_Pool(nn.Cell):
def __init__(self):
super(Net_Pool, self).__init__()
self.maxpool_fun = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="VALID")
def construct(self, x):
return self.maxpool_fun(x)
class Net_Pool2(nn.Cell):
def __init__(self):
super(Net_Pool2, self).__init__()
self.maxpool_fun = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME")
def construct(self, x):
return self.maxpool_fun(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maxpool2d():
x = Tensor(np.array([[[
[0, 1, 2, 3, -4, -5],
[6, 7, 8, 9, -10, -11],
[12, 13, 14, -15, -16, -17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]
]]]).astype(np.float32))
maxpool2d = Net_Pool()
maxpool2d2 = Net_Pool2()
output2 = maxpool2d2(x)
output = maxpool2d(x)
expect_result = (np.array([[[
[7, 9, -4],
[19, 21, 23],
[31, 33, 35]
]]]))
expect_result2 = (np.array([[[
[14, 14, -4],
[26, 28, 29],
[32, 34, 35]
]]]))
print(output.asnumpy())
assert (output.asnumpy() == expect_result).all()
print(output2.asnumpy())
assert (output2.asnumpy() == expect_result2).all()
|
[
"mindspore.context.set_context",
"numpy.array",
"mindspore.nn.MaxPool2d"
] |
[((793, 858), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""CPU"""'}), "(mode=context.GRAPH_MODE, device_target='CPU')\n", (812, 858), True, 'import mindspore.context as context\n'), ((1820, 1874), 'numpy.array', 'np.array', (['[[[[7, 9, -4], [19, 21, 23], [31, 33, 35]]]]'], {}), '([[[[7, 9, -4], [19, 21, 23], [31, 33, 35]]]])\n', (1828, 1874), True, 'import numpy as np\n'), ((1928, 1984), 'numpy.array', 'np.array', (['[[[[14, 14, -4], [26, 28, 29], [32, 34, 35]]]]'], {}), '([[[[14, 14, -4], [26, 28, 29], [32, 34, 35]]]])\n', (1936, 1984), True, 'import numpy as np\n'), ((977, 1032), 'mindspore.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'pad_mode': '"""VALID"""'}), "(kernel_size=2, stride=2, pad_mode='VALID')\n", (989, 1032), True, 'import mindspore.nn as nn\n'), ((1216, 1270), 'mindspore.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'pad_mode': '"""SAME"""'}), "(kernel_size=3, stride=2, pad_mode='SAME')\n", (1228, 1270), True, 'import mindspore.nn as nn\n'), ((1447, 1622), 'numpy.array', 'np.array', (['[[[[0, 1, 2, 3, -4, -5], [6, 7, 8, 9, -10, -11], [12, 13, 14, -15, -16, -17\n ], [18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29], [30, 31, 32, 33,\n 34, 35]]]]'], {}), '([[[[0, 1, 2, 3, -4, -5], [6, 7, 8, 9, -10, -11], [12, 13, 14, -15,\n -16, -17], [18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29], [30, 31,\n 32, 33, 34, 35]]]])\n', (1455, 1622), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import numpy as np
import pandas as pd
import pickle
from timeit import default_timer as timer
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
import nltk as nltk
import math
import os
import cProfile, pstats, io
#import memory_profiler
import psutil
import gc
# # Enabling eager execution
tf.enable_eager_execution()
process = psutil.Process(os.getpid())
print('Memory initial : ',process.memory_info().rss / (1024*1024), 'MB') # to get memory used by this process in MB
dirname = os.getcwd()
datasetpath = os.path.join(dirname, 'datasets/')
# # Load Google vectors
UNK = '</s>'
outfile = datasetpath +'google_word_corpus.pic'
with open(outfile, 'rb') as pickle_file:
googleCorpus, google_corpus_word_to_int, google_corpus_int_to_word = pickle.load(pickle_file)
googleSet = pd.read_csv(datasetpath+'GoogleNews-vectors-negative10.txt', sep=' ', header=None)
print(googleSet.shape)
print(googleSet.head())
googleWords = googleSet.iloc[:,0:1]
googleVectors = googleSet.iloc[:,1:]
outfile = os.path.join(datasetpath, 'parameters.pic')
with open(outfile, 'rb') as pickle_file:
wVal, bVal, wscoreVal, bscoreVal = pickle.load(pickle_file)
print('Parameter values : ', wVal, bVal, wscoreVal, bscoreVal)
treeDataframe = pd.read_csv(datasetpath+'constituency-parsing-data-all-UNK-less-40-words.csv', sep=' ', header=None )
treeDataframe.columns =['sentence', 'tree']
treeDataframe['tree'] = treeDataframe['tree'].apply(nltk.Tree.fromstring)
def convert_imdb_corpus_into_int(sentence):
words = sentence.split()
words_to_num = [google_corpus_word_to_int[word] for word in words]
return words_to_num
treeDataframe_num = treeDataframe.copy()
treeDataframe_num['sentence'] = treeDataframe_num['sentence'].apply(convert_imdb_corpus_into_int)
#treeDataframe_num.head()
# # Model and the Parameters
STATE_SIZE = 10
embeddings = tfe.Variable(name='embeddings', validate_shape= googleVectors.shape,
initial_value=googleVectors.values,
dtype=tf.float32, trainable=False)
w = tfe.Variable(name='w', validate_shape=(2*googleVectors.shape[1], STATE_SIZE),
initial_value=wVal.numpy(),
dtype=tf.float32)
b = tfe.Variable(name='b', validate_shape=(1, STATE_SIZE),
initial_value=bVal.numpy(),
dtype=tf.float32)
w_score = tfe.Variable(name='w_score', validate_shape=(STATE_SIZE, 1),
initial_value=wscoreVal.numpy(),
dtype=tf.float32)
b_score = tfe.Variable(name='b_score', validate_shape=(1, 1),
initial_value=bscoreVal.numpy(),
dtype=tf.float32)
#print(w)
#print(b)
#print(w_score)
#print(b_score)
def embedding_lookup(input_words):
words = tf.nn.embedding_lookup(embeddings, input_words)
return words
def predict(data):
total_loss_list = []
total_train_accuracy = 0.0
total_train_count = 0.0
predicted_tree_list = []
for j in range(data.shape[0]):
# get the word vectors based on the word ids (word id for each word)
print(j)
words = embedding_lookup(data.iat[j,0])
end = timer()
#print('Time taken to lookup embeddings (seconds): ', end-start)
#words matrix - unstack
words_unstack = tf.unstack(words)
words_len = len(words_unstack)
pred_score_list = []
predicted_tree = [nltk.Tree(UNK,[google_corpus_int_to_word[index]]) for index in data.iat[j,0]]
state_vec_list = []
score_list = []
start_k = 0
stop_k = words_len - 1
#loop until all the words are merged together
while(words_len > 1):
#compute scores for the list of word combinations
# for each word combination compute the score of it
scores = np.zeros(shape=(words_len-1, 1))
for k in range(start_k, stop_k):
words_concat = tf.concat([words_unstack[k], words_unstack[k+1]], axis=0)
#reshape the tensor to be a matrix with 1 row rather than vector
words_concat = tf.reshape(words_concat, shape=(1, words_concat.shape[0]))
# matrix computation and activation
z = tf.matmul(words_concat, w) + b
state_vec = tf.tanh(z)
state_vec_list.append(state_vec)
score = tf.matmul(state_vec, w_score) + b_score
score_list.append(score)
scores[k] = score
end = timer()
#print('Time taken to calculate all subsequent word combinations (seconds): ', end-start)
#compare the scores and pick the maximum one.
max_score_index = np.argmax(scores)
pred_score_list.append(scores[max_score_index])
# remove the words which is used to combine and replace with combined state vector
words_unstack.pop(max_score_index+1)
words_unstack.pop(max_score_index)
# statevector needs to be reshaped as matrix to update
state_vec_vector = tf.reshape(state_vec, shape = [state_vec.shape[1]])
words_unstack.insert(max_score_index, state_vec_vector)
words_len = len(words_unstack)
right_tree = predicted_tree.pop(max_score_index+1)
left_tree = predicted_tree.pop(max_score_index)
predicted_tree.insert(max_score_index, nltk.Tree(UNK, [left_tree, right_tree]))
start_k = max(0, max_score_index - 1)
stop_k = min(max_score_index+2, words_len-1)
#print([max_score_index, start_k, stop_k, words_len])
end = timer()
#print('Time taken to make one decision (seconds): ', end-start)
predicted_tree_list.append(str(predicted_tree[0]))
#print(str(predicted_tree))
print(str(predicted_tree[0]))
#print(str(predicted_tree[0][0]))
return predicted_tree_list
predicted_tree_list = predict(treeDataframe_num.iloc[39000:40000])
print(predicted_tree_list[0])
# In[51]:
with open(datasetpath+'predict-output.txt', 'w') as f:
for predicted_tree in predicted_tree_list:
f.write("%s\n" % predicted_tree)
print('Memory consumed : ',process.memory_info().rss / (1024*1024), 'MB') # to get memory used by this process in MB
predicted_tree_list = None
gc.collect()
|
[
"nltk.Tree",
"os.getpid",
"numpy.argmax",
"os.getcwd",
"pandas.read_csv",
"tensorflow.nn.embedding_lookup",
"timeit.default_timer",
"numpy.zeros",
"tensorflow.contrib.eager.Variable",
"tensorflow.reshape",
"tensorflow.concat",
"gc.collect",
"tensorflow.matmul",
"pickle.load",
"tensorflow.enable_eager_execution",
"tensorflow.tanh",
"os.path.join",
"tensorflow.unstack"
] |
[((462, 489), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (487, 489), True, 'import tensorflow as tf\n'), ((656, 667), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (665, 667), False, 'import os\n'), ((682, 716), 'os.path.join', 'os.path.join', (['dirname', '"""datasets/"""'], {}), "(dirname, 'datasets/')\n", (694, 716), False, 'import os\n'), ((962, 1050), 'pandas.read_csv', 'pd.read_csv', (["(datasetpath + 'GoogleNews-vectors-negative10.txt')"], {'sep': '""" """', 'header': 'None'}), "(datasetpath + 'GoogleNews-vectors-negative10.txt', sep=' ',\n header=None)\n", (973, 1050), True, 'import pandas as pd\n'), ((1177, 1220), 'os.path.join', 'os.path.join', (['datasetpath', '"""parameters.pic"""'], {}), "(datasetpath, 'parameters.pic')\n", (1189, 1220), False, 'import os\n'), ((1412, 1523), 'pandas.read_csv', 'pd.read_csv', (["(datasetpath + 'constituency-parsing-data-all-UNK-less-40-words.csv')"], {'sep': '""" """', 'header': 'None'}), "(datasetpath +\n 'constituency-parsing-data-all-UNK-less-40-words.csv', sep=' ', header=None\n )\n", (1423, 1523), True, 'import pandas as pd\n'), ((2029, 2171), 'tensorflow.contrib.eager.Variable', 'tfe.Variable', ([], {'name': '"""embeddings"""', 'validate_shape': 'googleVectors.shape', 'initial_value': 'googleVectors.values', 'dtype': 'tf.float32', 'trainable': '(False)'}), "(name='embeddings', validate_shape=googleVectors.shape,\n initial_value=googleVectors.values, dtype=tf.float32, trainable=False)\n", (2041, 2171), True, 'import tensorflow.contrib.eager as tfe\n'), ((6584, 6596), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6594, 6596), False, 'import gc\n'), ((515, 526), 'os.getpid', 'os.getpid', ([], {}), '()\n', (524, 526), False, 'import os\n'), ((925, 949), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (936, 949), False, 'import pickle\n'), ((1301, 1325), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (1312, 1325), False, 'import pickle\n'), ((2932, 2979), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'input_words'], {}), '(embeddings, input_words)\n', (2954, 2979), True, 'import tensorflow as tf\n'), ((3328, 3335), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3333, 3335), True, 'from timeit import default_timer as timer\n'), ((3465, 3482), 'tensorflow.unstack', 'tf.unstack', (['words'], {}), '(words)\n', (3475, 3482), True, 'import tensorflow as tf\n'), ((3578, 3628), 'nltk.Tree', 'nltk.Tree', (['UNK', '[google_corpus_int_to_word[index]]'], {}), '(UNK, [google_corpus_int_to_word[index]])\n', (3587, 3628), True, 'import nltk as nltk\n'), ((3994, 4028), 'numpy.zeros', 'np.zeros', ([], {'shape': '(words_len - 1, 1)'}), '(shape=(words_len - 1, 1))\n', (4002, 4028), True, 'import numpy as np\n'), ((4728, 4735), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4733, 4735), True, 'from timeit import default_timer as timer\n'), ((4928, 4945), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (4937, 4945), True, 'import numpy as np\n'), ((5297, 5346), 'tensorflow.reshape', 'tf.reshape', (['state_vec'], {'shape': '[state_vec.shape[1]]'}), '(state_vec, shape=[state_vec.shape[1]])\n', (5307, 5346), True, 'import tensorflow as tf\n'), ((5870, 5877), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5875, 5877), True, 'from timeit import default_timer as timer\n'), ((4116, 4175), 'tensorflow.concat', 'tf.concat', (['[words_unstack[k], words_unstack[k + 1]]'], {'axis': '(0)'}), '([words_unstack[k], words_unstack[k + 1]], axis=0)\n', (4125, 4175), True, 'import tensorflow as tf\n'), ((4286, 4344), 'tensorflow.reshape', 'tf.reshape', (['words_concat'], {'shape': '(1, words_concat.shape[0])'}), '(words_concat, shape=(1, words_concat.shape[0]))\n', (4296, 4344), True, 'import tensorflow as tf\n'), ((4476, 4486), 'tensorflow.tanh', 'tf.tanh', (['z'], {}), '(z)\n', (4483, 4486), True, 'import tensorflow as tf\n'), ((5635, 5674), 'nltk.Tree', 'nltk.Tree', (['UNK', '[left_tree, right_tree]'], {}), '(UNK, [left_tree, right_tree])\n', (5644, 5674), True, 'import nltk as nltk\n'), ((4417, 4443), 'tensorflow.matmul', 'tf.matmul', (['words_concat', 'w'], {}), '(words_concat, w)\n', (4426, 4443), True, 'import tensorflow as tf\n'), ((4577, 4606), 'tensorflow.matmul', 'tf.matmul', (['state_vec', 'w_score'], {}), '(state_vec, w_score)\n', (4586, 4606), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
import time
import numpy as np
import collections
import torch
import torch.nn as nn
import torch.optim as optim
COLAB = False
CUDA = True
if not COLAB:
from lib import wrappers
from lib import dqn_model
import argparse
from tensorboardX import SummaryWriter
ENV_NAME = "PongNoFrameskip-v4"
MEAN_REWARD_BOUND = 19.5
GAMMA = 0.99
BATCH_SIZE = 32
REPLAY_SIZE = 10 ** 4 * 2
LEARNING_RATE = 1e-4
TARGET_UPDATE_FREQ = 1000
LEARNING_STARTS = 10000
EPSILON_DECAY = 10**5
EPSILON_START = 1.0
EPSILON_FINAL = 0.02
MODEL = "PretrainedModels/PongNoFrameskip-v4-407.dat"
LOAD_MODEL = True
Experience = collections.namedtuple('Experience', field_names=['state', 'action', 'reward', 'done', 'new_state'])
class ExperienceReplay:
def __init__(self, capacity):
self.buffer = collections.deque(maxlen=capacity)
def __len__(self):
return len(self.buffer)
def append(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
indices = np.random.choice(len(self.buffer), batch_size, replace=False)
states, actions, rewards, dones, next_states = zip(*[self.buffer[idx] for idx in indices])
return np.array(states), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(next_states)
class Agent:
def __init__(self, env, replay_memory):
self.env = env
self.replay_memory = replay_memory
self._reset()
self.last_action = 0
def _reset(self):
self.state = env.reset()
self.total_reward = 0.0
def play_step(self, net, epsilon=0.0, device="cpu"):
"""
Select action
Execute action and step environment
Add state/action/reward to experience replay
"""
done_reward = None
if np.random.random() < epsilon:
action = env.action_space.sample()
else:
state_a = np.array([self.state], copy=False)
state_v = torch.tensor(state_a).to(device)
q_vals_v = net(state_v)
_, act_v = torch.max(q_vals_v, dim=1)
action = int(act_v.item())
# do step in the environment
new_state, reward, is_done, _ = self.env.step(action)
self.total_reward += reward
new_state = new_state
exp = Experience(self.state, action, reward, is_done, new_state)
self.replay_memory.append(exp)
self.state = new_state
if is_done:
done_reward = self.total_reward
self._reset()
return done_reward
def calculate_loss(batch, net, target_net, device="cpu"):
"""
Calculate MSE between actual state action values,
and expected state action values from DQN
"""
states, actions, rewards, dones, next_states = batch
states_v = torch.tensor(states).to(device)
next_states_v = torch.tensor(next_states).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done = torch.ByteTensor(dones).to(device)
state_action_values = net(states_v).gather(1, actions_v.long().unsqueeze(-1)).squeeze(-1)
next_state_values = target_net(next_states_v).max(1)[0]
next_state_values[done] = 0.0
next_state_values = next_state_values.detach()
expected_state_action_values = next_state_values * GAMMA + rewards_v
return nn.MSELoss()(state_action_values, expected_state_action_values)
print("ReplayMemory will require {}gb of GPU RAM".format(round(REPLAY_SIZE * 32 * 84 * 84 / 1e+9, 2)))
if __name__ == "__main__":
if COLAB:
"""Default argparse does not work on colab"""
class ColabArgParse():
def __init__(self, cuda, env, reward, model):
self.cuda = cuda
self.env = env
self.reward = reward
self.model = model
args = ColabArgParse(CUDA, ENV_NAME, MEAN_REWARD_BOUND, MODEL)
else:
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=True, action="store_true", help="Enable cuda")
parser.add_argument("--env", default=ENV_NAME,
help="Name of the environment, default=" + ENV_NAME)
parser.add_argument("--reward", type=float, default=MEAN_REWARD_BOUND,
help="Mean reward to stop training, default={}".format(round(MEAN_REWARD_BOUND, 2)))
parser.add_argument("-m", "--model", help="Model file to load")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
# Make Gym environement and DQNs
if COLAB:
env = make_env(args.env)
net = DQN(env.observation_space.shape, env.action_space.n).to(device)
target_net = DQN(env.observation_space.shape, env.action_space.n).to(device)
else:
env = wrappers.make_env(args.env)
net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
target_net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
writer = SummaryWriter(comment="-" + args.env)
print(net)
replay_memory = ExperienceReplay(REPLAY_SIZE)
agent = Agent(env, replay_memory)
epsilon = EPSILON_START
if LOAD_MODEL:
net.load_state_dict(torch.load(args.model, map_location=lambda storage, loc: storage))
target_net.load_state_dict(net.state_dict())
print("Models loaded from disk!")
# Lower exploration rate
EPSILON_START = EPSILON_FINAL
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
total_rewards = []
best_mean_reward = None
frame_idx = 0
timestep_frame = 0
timestep = time.time()
while True:
frame_idx += 1
epsilon = max(EPSILON_FINAL, EPSILON_START - frame_idx / EPSILON_DECAY)
reward = agent.play_step(net, epsilon, device=device)
if reward is not None:
total_rewards.append(reward)
speed = (frame_idx - timestep_frame) / (time.time() - timestep)
timestep_frame = frame_idx
timestep = time.time()
mean_reward = np.mean(total_rewards[-100:])
print("{} frames: done {} games, mean reward {}, eps {}, speed {} f/s".format(
frame_idx, len(total_rewards), round(mean_reward, 3), round(epsilon,2), round(speed, 2)))
if not COLAB:
writer.add_scalar("epsilon", epsilon, frame_idx)
writer.add_scalar("speed", speed, frame_idx)
writer.add_scalar("reward_100", mean_reward, frame_idx)
writer.add_scalar("reward", reward, frame_idx)
if best_mean_reward is None or best_mean_reward < mean_reward:
torch.save(net.state_dict(), args.env + "-" + str(len(total_rewards)) + ".dat")
if COLAB:
gsync.update_file_to_folder(args.env + "-" + str(len(total_rewards)) + ".dat")
if best_mean_reward is not None:
print("New best mean reward {} -> {}, model saved".format(round(best_mean_reward, 3), round(mean_reward, 3)))
best_mean_reward = mean_reward
if mean_reward > args.reward and len(total_rewards) > 10:
print("Game solved in {} frames! Average score of {}".format(frame_idx, mean_reward))
break
if len(replay_memory) < LEARNING_STARTS:
continue
if frame_idx % TARGET_UPDATE_FREQ == 0:
target_net.load_state_dict(net.state_dict())
optimizer.zero_grad()
batch = replay_memory.sample(BATCH_SIZE)
loss_t = calculate_loss(batch, net, target_net, device=device)
loss_t.backward()
optimizer.step()
env.close()
if not COLAB:
writer.close()
|
[
"torch.nn.MSELoss",
"lib.wrappers.make_env",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"torch.ByteTensor",
"torch.load",
"time.time",
"numpy.random.random",
"numpy.array",
"collections.namedtuple",
"torch.max",
"torch.device",
"numpy.mean",
"torch.tensor",
"collections.deque",
"lib.dqn_model.DQN"
] |
[((639, 743), 'collections.namedtuple', 'collections.namedtuple', (['"""Experience"""'], {'field_names': "['state', 'action', 'reward', 'done', 'new_state']"}), "('Experience', field_names=['state', 'action',\n 'reward', 'done', 'new_state'])\n", (661, 743), False, 'import collections\n'), ((4582, 4626), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (4594, 4626), False, 'import torch\n'), ((5750, 5761), 'time.time', 'time.time', ([], {}), '()\n', (5759, 5761), False, 'import time\n'), ((822, 856), 'collections.deque', 'collections.deque', ([], {'maxlen': 'capacity'}), '(maxlen=capacity)\n', (839, 856), False, 'import collections\n'), ((3426, 3438), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3436, 3438), True, 'import torch.nn as nn\n'), ((4014, 4039), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4037, 4039), False, 'import argparse\n'), ((4899, 4926), 'lib.wrappers.make_env', 'wrappers.make_env', (['args.env'], {}), '(args.env)\n', (4916, 4926), False, 'from lib import wrappers\n'), ((5127, 5164), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': "('-' + args.env)"}), "(comment='-' + args.env)\n", (5140, 5164), False, 'from tensorboardX import SummaryWriter\n'), ((1216, 1232), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (1224, 1232), True, 'import numpy as np\n'), ((1234, 1251), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1242, 1251), True, 'import numpy as np\n'), ((1253, 1288), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float32'}), '(rewards, dtype=np.float32)\n', (1261, 1288), True, 'import numpy as np\n'), ((1307, 1338), 'numpy.array', 'np.array', (['dones'], {'dtype': 'np.uint8'}), '(dones, dtype=np.uint8)\n', (1315, 1338), True, 'import numpy as np\n'), ((1340, 1361), 'numpy.array', 'np.array', (['next_states'], {}), '(next_states)\n', (1348, 1361), True, 'import numpy as np\n'), ((1865, 1883), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1881, 1883), True, 'import numpy as np\n'), ((1978, 2012), 'numpy.array', 'np.array', (['[self.state]'], {'copy': '(False)'}), '([self.state], copy=False)\n', (1986, 2012), True, 'import numpy as np\n'), ((2127, 2153), 'torch.max', 'torch.max', (['q_vals_v'], {'dim': '(1)'}), '(q_vals_v, dim=1)\n', (2136, 2153), False, 'import torch\n'), ((2868, 2888), 'torch.tensor', 'torch.tensor', (['states'], {}), '(states)\n', (2880, 2888), False, 'import torch\n'), ((2920, 2945), 'torch.tensor', 'torch.tensor', (['next_states'], {}), '(next_states)\n', (2932, 2945), False, 'import torch\n'), ((2973, 2994), 'torch.tensor', 'torch.tensor', (['actions'], {}), '(actions)\n', (2985, 2994), False, 'import torch\n'), ((3022, 3043), 'torch.tensor', 'torch.tensor', (['rewards'], {}), '(rewards)\n', (3034, 3043), False, 'import torch\n'), ((3066, 3089), 'torch.ByteTensor', 'torch.ByteTensor', (['dones'], {}), '(dones)\n', (3082, 3089), False, 'import torch\n'), ((5346, 5411), 'torch.load', 'torch.load', (['args.model'], {'map_location': '(lambda storage, loc: storage)'}), '(args.model, map_location=lambda storage, loc: storage)\n', (5356, 5411), False, 'import torch\n'), ((6155, 6166), 'time.time', 'time.time', ([], {}), '()\n', (6164, 6166), False, 'import time\n'), ((6193, 6222), 'numpy.mean', 'np.mean', (['total_rewards[-100:]'], {}), '(total_rewards[-100:])\n', (6200, 6222), True, 'import numpy as np\n'), ((4941, 5003), 'lib.dqn_model.DQN', 'dqn_model.DQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (4954, 5003), False, 'from lib import dqn_model\n'), ((5036, 5098), 'lib.dqn_model.DQN', 'dqn_model.DQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (5049, 5098), False, 'from lib import dqn_model\n'), ((2035, 2056), 'torch.tensor', 'torch.tensor', (['state_a'], {}), '(state_a)\n', (2047, 2056), False, 'import torch\n'), ((6069, 6080), 'time.time', 'time.time', ([], {}), '()\n', (6078, 6080), False, 'import time\n')]
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for memory_len."""
from typing import Sequence
from bsuite.experiments.memory_len import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
LEARNING_THRESH = 0.75
def memory_preprocess(df_in: pd.DataFrame) -> pd.DataFrame:
"""Preprocess data for memory environments = regret relative to random."""
df = df_in.copy()
df['perfection_regret'] = df.episode - df.total_perfect
# a random agent always has 50% chance on each episode
# independently from memory length and number of bits.
df['base_rate'] = 0.5
df['regret_ratio'] = df.perfection_regret / df.base_rate
return df
def score(df: pd.DataFrame, group_col: str = 'memory_length') -> float:
"""Output a single score for memory_len."""
df = memory_preprocess(df_in=df)
regret_list = [] # Loop to handle partially-finished runs.
for _, sub_df in df.groupby(group_col):
max_eps = np.minimum(sub_df.episode.max(), sweep.NUM_EPISODES)
ave_perfection = (
sub_df.loc[sub_df.episode == max_eps, 'regret_ratio'].mean() / max_eps)
regret_list.append(ave_perfection)
return np.mean(np.array(regret_list) < LEARNING_THRESH)
def plot_learning(df: pd.DataFrame,
sweep_vars: Sequence[str] = None,
group_col: str = 'memory_length') -> gg.ggplot:
"""Plots the average return through time by memory_length."""
df = memory_preprocess(df_in=df)
p = plotting.plot_regret_group_nosmooth(
df_in=df,
group_col=group_col,
sweep_vars=sweep_vars,
regret_col='regret_ratio',
max_episode=sweep.NUM_EPISODES,
)
return p + gg.ylab('average % of correct episodes compared to random.')
def plot_scale(df: pd.DataFrame,
sweep_vars: Sequence[str] = None,
group_col: str = 'memory_length') -> gg.ggplot:
"""Plots the regret_ratio through time by memory_length."""
df = memory_preprocess(df_in=df)
p = plotting.plot_regret_ave_scaling(
df_in=df,
group_col=group_col,
episode=sweep.NUM_EPISODES,
regret_thresh=LEARNING_THRESH,
sweep_vars=sweep_vars,
regret_col='regret_ratio'
)
return p + gg.ylab('% correct episodes after\n{} episodes compared to random'
.format(sweep.NUM_EPISODES))
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Sequence[str] = None,
colour_var: str = 'memory_length') -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = df.total_return.diff() / df.episode.diff()
p = plotting.plot_individual_returns(
df_in=df[df.episode > 10],
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
[
"plotnine.ylab",
"bsuite.utils.plotting.plot_regret_group_nosmooth",
"bsuite.utils.plotting.plot_regret_ave_scaling",
"numpy.array",
"bsuite.utils.plotting.plot_individual_returns"
] |
[((2233, 2390), 'bsuite.utils.plotting.plot_regret_group_nosmooth', 'plotting.plot_regret_group_nosmooth', ([], {'df_in': 'df', 'group_col': 'group_col', 'sweep_vars': 'sweep_vars', 'regret_col': '"""regret_ratio"""', 'max_episode': 'sweep.NUM_EPISODES'}), "(df_in=df, group_col=group_col,\n sweep_vars=sweep_vars, regret_col='regret_ratio', max_episode=sweep.\n NUM_EPISODES)\n", (2268, 2390), False, 'from bsuite.utils import plotting\n'), ((2741, 2923), 'bsuite.utils.plotting.plot_regret_ave_scaling', 'plotting.plot_regret_ave_scaling', ([], {'df_in': 'df', 'group_col': 'group_col', 'episode': 'sweep.NUM_EPISODES', 'regret_thresh': 'LEARNING_THRESH', 'sweep_vars': 'sweep_vars', 'regret_col': '"""regret_ratio"""'}), "(df_in=df, group_col=group_col, episode=\n sweep.NUM_EPISODES, regret_thresh=LEARNING_THRESH, sweep_vars=\n sweep_vars, regret_col='regret_ratio')\n", (2773, 2923), False, 'from bsuite.utils import plotting\n'), ((3388, 3560), 'bsuite.utils.plotting.plot_individual_returns', 'plotting.plot_individual_returns', ([], {'df_in': 'df[df.episode > 10]', 'max_episode': 'NUM_EPISODES', 'return_column': '"""average_return"""', 'colour_var': 'colour_var', 'sweep_vars': 'sweep_vars'}), "(df_in=df[df.episode > 10], max_episode=\n NUM_EPISODES, return_column='average_return', colour_var=colour_var,\n sweep_vars=sweep_vars)\n", (3420, 3560), False, 'from bsuite.utils import plotting\n'), ((2430, 2490), 'plotnine.ylab', 'gg.ylab', (['"""average % of correct episodes compared to random."""'], {}), "('average % of correct episodes compared to random.')\n", (2437, 2490), True, 'import plotnine as gg\n'), ((3600, 3634), 'plotnine.ylab', 'gg.ylab', (['"""average episodic return"""'], {}), "('average episodic return')\n", (3607, 3634), True, 'import plotnine as gg\n'), ((1931, 1952), 'numpy.array', 'np.array', (['regret_list'], {}), '(regret_list)\n', (1939, 1952), True, 'import numpy as np\n')]
|
#%%
from graph_data import graph_data
import numpy as np
from scipy.linalg import block_diag
from typing import Final
from networkx.generators.random_graphs import watts_strogatz_graph, barabasi_albert_graph
from networkx.linalg.graphmatrix import adjacency_matrix
class Gnp:
def __init__(self, n0, p0, n1, p1):
self.n = [n0,n1]
self.p = [p0,p1]
name: Final = "Gnp"
def get_gnp(self, label):
n = self.n[label]
p = self.p[label]
a = np.random.uniform(size = [n, n])
a = (a + a.T)/2
adj = (a < p).astype(int)
features = np.ones([n,1])
g = graph_data(adj, features, label)
return(g)
def get_sample(self, count):
sample = []
y = np.random.choice([0,1], size=(count))
X = [self.get_gnp(y_) for y_ in y]
return(X, y)
class Gnp2:
def __init__(self, max_size, p):
self.max_size = max_size
self.p = p
name: Final = "Gnp2"
def get_gnp(self, size):
n = size
p = self.p
a = np.random.uniform(size = [n, n])
a = (a + a.T)/2
adj = (a < p).astype(int)
return(adj)
def get_2gnp(self, size1, size2):
adj1 = self.get_gnp(size1)
adj2 = self.get_gnp(size2)
return(block_diag(adj1, adj2))
def get_graph(self, size, label):
if (size < 2):
raise ValueError("The size of the graph cannot be smaller than 2")
size1 = size2 = 0
while (size1 == 0 | size2 ==0):
size1 = np.random.binomial(size, 0.5)
size2 = size - size1
adj = self.get_2gnp(size1, size2)
features = np.ones((size,2))
features[0:size1, 1] = 0
if (label <= 0):
features[:, 1] = np.random.permutation(features[:, 1])
return (graph_data(adj,features,label))
def get_sample(self, count):
sample = []
y = np.random.choice([0,1], size=(count))
sizes = np.random.binomial(size=(count), p = 0.5, n = self.max_size)
X = [self.get_graph(sizes[i], y[i]) for i in range(0, count)]
return(X, y)
class GnpMax:
def __init__(self):
pass
name: Final = "GnpMax"
def get_gnp(self,n, p):
a = np.random.uniform(size = [n, n])
a = (a + a.T)/2
adj = (a < p).astype(int)
features = np.ones([n,1])
label = (np.max(np.sum(adj,1))>= 0.75*n).astype(int)
g = graph_data(adj, features, label)
return(g)
def get_sample(self, count):
sizes = np.random.binomial(size=(count), p = 0.5, n = 30)
X = [self.get_gnp(sizes[i], 0.5) for i in range(0, count)]
y = [g.label for g in X]
return(X,y)
class BA_vs_Watts_Strogatz:
name: Final = "BA vs Watts Strogatz"
def get_WS(self, n, k, beta):
g = watts_strogatz_graph(n, k, beta)
adj = adjacency_matrix(g).todense()
features = np.ones([n,1])
g = graph_data(adj, features, 1)
return(g)
def get_BA(self, n, m):
g = barabasi_albert_graph(n, m)
adj = adjacency_matrix(g).todense()
features = np.ones([n,1])
while(np.sum(adj) < 4 * n):
i = np.random.randint(0, n)
j = np.random.randint(0, n)
if (i == j):
continue
if (adj[i,j]> 0):
continue
adj[i,j] = 1
adj[j,i] = 1
g = graph_data(adj, features, 0)
return(g)
def get_graph(self, size, label):
if (label > 0):
return(self.get_WS(size, 4, 0.1))
else:
return(self.get_BA(size, 2))
def get_sample(self, count):
y = np.random.choice([0,1], size=(count))
sizes = np.random.binomial(size=(count), p = 0.5, n = 30)
X = [self.get_graph(sizes[i], y[i]) for i in range(0, count)]
return(X,y)
class BAmax:
name: Final = "BA max"
def get_BA(self, n, m, label):
g = barabasi_albert_graph(n, m)
adj = adjacency_matrix(g).todense()
features = np.ones([n,2])
if (label > 0):
d = np.array(np.sum(adj, 1)).flatten()
ind = np.argpartition(d, -4)[-4:]
features[ind, 1] = 0
else:
ind = np.random.choice(range(0,n), size=(4), replace = False)
features[ind, 1] = 0
g = graph_data(adj, features, label)
return(g)
def get_sample(self, count):
y = np.random.choice([0,1], size=(count))
sizes = np.random.binomial(size=(count), p = 0.5, n = 30)
X = [self.get_BA(sizes[i], 2, y[i]) for i in range(0, count)]
return(X,y)
class BAone:
name: Final = "BA one"
def get_BA(self, n, m):
g = barabasi_albert_graph(n, m)
adj = adjacency_matrix(g).todense()
features = np.ones([n,2])
features[:,1] = np.random.random(size=(n))
if (features[0,1] > 0.5):
label = 1
else:
label = 0
g = graph_data(adj, features, label)
return(g)
def get_sample(self, count):
sizes = np.random.binomial(size=(count), p = 0.5, n = 30)
X = [self.get_BA(sizes[i], 2) for i in range(0, count)]
y = [g.label for g in X]
return(X,y)
class GnpMaxFeature:
def __init__(self):
pass
name: Final = "GnpMaxFeature"
def get_gnp(self,n, p):
a = np.random.uniform(size = [n, n])
a = (a + a.T)/2
adj = (a < p).astype(int)
features = np.ones([n,2])
features[:,1] = np.random.random(size=(n))
d = np.array(np.sum(adj, 1)).flatten()
k = int(n/2)
ind = np.argpartition(d, -k)[-k:]
m = np.mean(features[ind,1])
label = (m>= 0.5).astype(int)
g = graph_data(adj, features, label)
return(g)
def get_sample(self, count):
sizes = np.random.binomial(size=(count), p = 0.5, n = 50)
X = [self.get_gnp(sizes[i], 0.5) for i in range(0, count)]
y = [g.label for g in X]
return(X,y)
class Gnp1Q:
def __init__(self):
self.name = self.__class__.__name__
def get_gnp(self,n, p):
a = np.random.uniform(size = [n, n])
a = (a + a.T)/2
adj = (a < p).astype(int)
features = np.ones([n,3])
features[:,1] = np.random.choice([-1,1], size=(n))
features[:,2] = np.random.choice([-1,1], size=(n))
n = np.matmul(adj, features[:,0])
d1 = np.matmul(adj, features[:,1])
same_color = np.multiply(d1, features[:,2])
label = np.any(same_color == n).astype(int)
g = graph_data(adj, features, label)
return(g)
def get_sample(self, count):
sizes = np.random.binomial(size=(count), p = 0.5, n = 100)
X = [self.get_gnp(sizes[i], 0.3) for i in range(0, count)]
y = [g.label for g in X]
return(X,y)
#%%
class Gnp2Q:
def __init__(self):
self.name = self.__class__.__name__
def get_gnp(self,n, p):
a = np.random.uniform(size = [n, n])
a = (a + a.T)/2
adj = (a < p).astype(int)
features = np.ones([n,3])
features[:,1] = np.random.choice([-1,1], size=(n))
features[:,2] = np.random.choice([-1,1], size=(n))
n = np.matmul(adj, features[:,0])
d1 = np.matmul(adj, features[:,1])
d2 = np.matmul(adj, features[:,1])
same_color = np.multiply(d1, d2)
n_square = np.multiply(n, n)
d1_n = (np.sum(d1 == n) > 3)
d2_n = (np.sum(d2 == n) == 0)
label = (d1_n | d2_n).astype(int)
g = graph_data(adj, features, label)
return(g)
def get_sample(self, count):
X = [self.get_gnp(50, 0.25) for _ in range(0, count)]
y = [g.label for g in X]
return(X,y)
#%%
#g =Gnp2Q()
#_,Y = g.get_sample(1000)
#print(np.sum(Y))
# %%
|
[
"numpy.random.uniform",
"numpy.random.binomial",
"graph_data.graph_data",
"numpy.multiply",
"networkx.generators.random_graphs.barabasi_albert_graph",
"numpy.sum",
"scipy.linalg.block_diag",
"numpy.ones",
"numpy.any",
"networkx.generators.random_graphs.watts_strogatz_graph",
"numpy.argpartition",
"numpy.random.random",
"numpy.mean",
"numpy.random.randint",
"numpy.matmul",
"numpy.random.choice",
"numpy.random.permutation",
"networkx.linalg.graphmatrix.adjacency_matrix"
] |
[((488, 518), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n, n]'}), '(size=[n, n])\n', (505, 518), True, 'import numpy as np\n'), ((598, 613), 'numpy.ones', 'np.ones', (['[n, 1]'], {}), '([n, 1])\n', (605, 613), True, 'import numpy as np\n'), ((625, 657), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (635, 657), False, 'from graph_data import graph_data\n'), ((744, 780), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'count'}), '([0, 1], size=count)\n', (760, 780), True, 'import numpy as np\n'), ((1055, 1085), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n, n]'}), '(size=[n, n])\n', (1072, 1085), True, 'import numpy as np\n'), ((1290, 1312), 'scipy.linalg.block_diag', 'block_diag', (['adj1', 'adj2'], {}), '(adj1, adj2)\n', (1300, 1312), False, 'from scipy.linalg import block_diag\n'), ((1670, 1688), 'numpy.ones', 'np.ones', (['(size, 2)'], {}), '((size, 2))\n', (1677, 1688), True, 'import numpy as np\n'), ((1829, 1861), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (1839, 1861), False, 'from graph_data import graph_data\n'), ((1945, 1981), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'count'}), '([0, 1], size=count)\n', (1961, 1981), True, 'import numpy as np\n'), ((1999, 2053), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'count', 'p': '(0.5)', 'n': 'self.max_size'}), '(size=count, p=0.5, n=self.max_size)\n', (2017, 2053), True, 'import numpy as np\n'), ((2273, 2303), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n, n]'}), '(size=[n, n])\n', (2290, 2303), True, 'import numpy as np\n'), ((2383, 2398), 'numpy.ones', 'np.ones', (['[n, 1]'], {}), '([n, 1])\n', (2390, 2398), True, 'import numpy as np\n'), ((2471, 2503), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (2481, 2503), False, 'from graph_data import graph_data\n'), ((2572, 2615), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'count', 'p': '(0.5)', 'n': '(30)'}), '(size=count, p=0.5, n=30)\n', (2590, 2615), True, 'import numpy as np\n'), ((2860, 2892), 'networkx.generators.random_graphs.watts_strogatz_graph', 'watts_strogatz_graph', (['n', 'k', 'beta'], {}), '(n, k, beta)\n', (2880, 2892), False, 'from networkx.generators.random_graphs import watts_strogatz_graph, barabasi_albert_graph\n'), ((2956, 2971), 'numpy.ones', 'np.ones', (['[n, 1]'], {}), '([n, 1])\n', (2963, 2971), True, 'import numpy as np\n'), ((2983, 3011), 'graph_data.graph_data', 'graph_data', (['adj', 'features', '(1)'], {}), '(adj, features, 1)\n', (2993, 3011), False, 'from graph_data import graph_data\n'), ((3071, 3098), 'networkx.generators.random_graphs.barabasi_albert_graph', 'barabasi_albert_graph', (['n', 'm'], {}), '(n, m)\n', (3092, 3098), False, 'from networkx.generators.random_graphs import watts_strogatz_graph, barabasi_albert_graph\n'), ((3162, 3177), 'numpy.ones', 'np.ones', (['[n, 1]'], {}), '([n, 1])\n', (3169, 3177), True, 'import numpy as np\n'), ((3462, 3490), 'graph_data.graph_data', 'graph_data', (['adj', 'features', '(0)'], {}), '(adj, features, 0)\n', (3472, 3490), False, 'from graph_data import graph_data\n'), ((3720, 3756), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'count'}), '([0, 1], size=count)\n', (3736, 3756), True, 'import numpy as np\n'), ((3774, 3817), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'count', 'p': '(0.5)', 'n': '(30)'}), '(size=count, p=0.5, n=30)\n', (3792, 3817), True, 'import numpy as np\n'), ((4004, 4031), 'networkx.generators.random_graphs.barabasi_albert_graph', 'barabasi_albert_graph', (['n', 'm'], {}), '(n, m)\n', (4025, 4031), False, 'from networkx.generators.random_graphs import watts_strogatz_graph, barabasi_albert_graph\n'), ((4095, 4110), 'numpy.ones', 'np.ones', (['[n, 2]'], {}), '([n, 2])\n', (4102, 4110), True, 'import numpy as np\n'), ((4397, 4429), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (4407, 4429), False, 'from graph_data import graph_data\n'), ((4495, 4531), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'count'}), '([0, 1], size=count)\n', (4511, 4531), True, 'import numpy as np\n'), ((4549, 4592), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'count', 'p': '(0.5)', 'n': '(30)'}), '(size=count, p=0.5, n=30)\n', (4567, 4592), True, 'import numpy as np\n'), ((4779, 4806), 'networkx.generators.random_graphs.barabasi_albert_graph', 'barabasi_albert_graph', (['n', 'm'], {}), '(n, m)\n', (4800, 4806), False, 'from networkx.generators.random_graphs import watts_strogatz_graph, barabasi_albert_graph\n'), ((4870, 4885), 'numpy.ones', 'np.ones', (['[n, 2]'], {}), '([n, 2])\n', (4877, 4885), True, 'import numpy as np\n'), ((4909, 4933), 'numpy.random.random', 'np.random.random', ([], {'size': 'n'}), '(size=n)\n', (4925, 4933), True, 'import numpy as np\n'), ((5040, 5072), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (5050, 5072), False, 'from graph_data import graph_data\n'), ((5142, 5185), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'count', 'p': '(0.5)', 'n': '(30)'}), '(size=count, p=0.5, n=30)\n', (5160, 5185), True, 'import numpy as np\n'), ((5444, 5474), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n, n]'}), '(size=[n, n])\n', (5461, 5474), True, 'import numpy as np\n'), ((5554, 5569), 'numpy.ones', 'np.ones', (['[n, 2]'], {}), '([n, 2])\n', (5561, 5569), True, 'import numpy as np\n'), ((5593, 5617), 'numpy.random.random', 'np.random.random', ([], {'size': 'n'}), '(size=n)\n', (5609, 5617), True, 'import numpy as np\n'), ((5742, 5767), 'numpy.mean', 'np.mean', (['features[ind, 1]'], {}), '(features[ind, 1])\n', (5749, 5767), True, 'import numpy as np\n'), ((5818, 5850), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (5828, 5850), False, 'from graph_data import graph_data\n'), ((5919, 5962), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'count', 'p': '(0.5)', 'n': '(50)'}), '(size=count, p=0.5, n=50)\n', (5937, 5962), True, 'import numpy as np\n'), ((6218, 6248), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n, n]'}), '(size=[n, n])\n', (6235, 6248), True, 'import numpy as np\n'), ((6328, 6343), 'numpy.ones', 'np.ones', (['[n, 3]'], {}), '([n, 3])\n', (6335, 6343), True, 'import numpy as np\n'), ((6367, 6400), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': 'n'}), '([-1, 1], size=n)\n', (6383, 6400), True, 'import numpy as np\n'), ((6426, 6459), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': 'n'}), '([-1, 1], size=n)\n', (6442, 6459), True, 'import numpy as np\n'), ((6473, 6503), 'numpy.matmul', 'np.matmul', (['adj', 'features[:, 0]'], {}), '(adj, features[:, 0])\n', (6482, 6503), True, 'import numpy as np\n'), ((6516, 6546), 'numpy.matmul', 'np.matmul', (['adj', 'features[:, 1]'], {}), '(adj, features[:, 1])\n', (6525, 6546), True, 'import numpy as np\n'), ((6567, 6598), 'numpy.multiply', 'np.multiply', (['d1', 'features[:, 2]'], {}), '(d1, features[:, 2])\n', (6578, 6598), True, 'import numpy as np\n'), ((6663, 6695), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (6673, 6695), False, 'from graph_data import graph_data\n'), ((6764, 6808), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'count', 'p': '(0.5)', 'n': '(100)'}), '(size=count, p=0.5, n=100)\n', (6782, 6808), True, 'import numpy as np\n'), ((7068, 7098), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[n, n]'}), '(size=[n, n])\n', (7085, 7098), True, 'import numpy as np\n'), ((7178, 7193), 'numpy.ones', 'np.ones', (['[n, 3]'], {}), '([n, 3])\n', (7185, 7193), True, 'import numpy as np\n'), ((7217, 7250), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': 'n'}), '([-1, 1], size=n)\n', (7233, 7250), True, 'import numpy as np\n'), ((7276, 7309), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': 'n'}), '([-1, 1], size=n)\n', (7292, 7309), True, 'import numpy as np\n'), ((7323, 7353), 'numpy.matmul', 'np.matmul', (['adj', 'features[:, 0]'], {}), '(adj, features[:, 0])\n', (7332, 7353), True, 'import numpy as np\n'), ((7366, 7396), 'numpy.matmul', 'np.matmul', (['adj', 'features[:, 1]'], {}), '(adj, features[:, 1])\n', (7375, 7396), True, 'import numpy as np\n'), ((7409, 7439), 'numpy.matmul', 'np.matmul', (['adj', 'features[:, 1]'], {}), '(adj, features[:, 1])\n', (7418, 7439), True, 'import numpy as np\n'), ((7460, 7479), 'numpy.multiply', 'np.multiply', (['d1', 'd2'], {}), '(d1, d2)\n', (7471, 7479), True, 'import numpy as np\n'), ((7499, 7516), 'numpy.multiply', 'np.multiply', (['n', 'n'], {}), '(n, n)\n', (7510, 7516), True, 'import numpy as np\n'), ((7647, 7679), 'graph_data.graph_data', 'graph_data', (['adj', 'features', 'label'], {}), '(adj, features, label)\n', (7657, 7679), False, 'from graph_data import graph_data\n'), ((1546, 1575), 'numpy.random.binomial', 'np.random.binomial', (['size', '(0.5)'], {}), '(size, 0.5)\n', (1564, 1575), True, 'import numpy as np\n'), ((1775, 1812), 'numpy.random.permutation', 'np.random.permutation', (['features[:, 1]'], {}), '(features[:, 1])\n', (1796, 1812), True, 'import numpy as np\n'), ((3191, 3202), 'numpy.sum', 'np.sum', (['adj'], {}), '(adj)\n', (3197, 3202), True, 'import numpy as np\n'), ((3229, 3252), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n'], {}), '(0, n)\n', (3246, 3252), True, 'import numpy as np\n'), ((3269, 3292), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n'], {}), '(0, n)\n', (3286, 3292), True, 'import numpy as np\n'), ((5702, 5724), 'numpy.argpartition', 'np.argpartition', (['d', '(-k)'], {}), '(d, -k)\n', (5717, 5724), True, 'import numpy as np\n'), ((7533, 7548), 'numpy.sum', 'np.sum', (['(d1 == n)'], {}), '(d1 == n)\n', (7539, 7548), True, 'import numpy as np\n'), ((7570, 7585), 'numpy.sum', 'np.sum', (['(d2 == n)'], {}), '(d2 == n)\n', (7576, 7585), True, 'import numpy as np\n'), ((2907, 2926), 'networkx.linalg.graphmatrix.adjacency_matrix', 'adjacency_matrix', (['g'], {}), '(g)\n', (2923, 2926), False, 'from networkx.linalg.graphmatrix import adjacency_matrix\n'), ((3113, 3132), 'networkx.linalg.graphmatrix.adjacency_matrix', 'adjacency_matrix', (['g'], {}), '(g)\n', (3129, 3132), False, 'from networkx.linalg.graphmatrix import adjacency_matrix\n'), ((4046, 4065), 'networkx.linalg.graphmatrix.adjacency_matrix', 'adjacency_matrix', (['g'], {}), '(g)\n', (4062, 4065), False, 'from networkx.linalg.graphmatrix import adjacency_matrix\n'), ((4203, 4225), 'numpy.argpartition', 'np.argpartition', (['d', '(-4)'], {}), '(d, -4)\n', (4218, 4225), True, 'import numpy as np\n'), ((4821, 4840), 'networkx.linalg.graphmatrix.adjacency_matrix', 'adjacency_matrix', (['g'], {}), '(g)\n', (4837, 4840), False, 'from networkx.linalg.graphmatrix import adjacency_matrix\n'), ((6614, 6637), 'numpy.any', 'np.any', (['(same_color == n)'], {}), '(same_color == n)\n', (6620, 6637), True, 'import numpy as np\n'), ((5641, 5655), 'numpy.sum', 'np.sum', (['adj', '(1)'], {}), '(adj, 1)\n', (5647, 5655), True, 'import numpy as np\n'), ((2422, 2436), 'numpy.sum', 'np.sum', (['adj', '(1)'], {}), '(adj, 1)\n', (2428, 2436), True, 'import numpy as np\n'), ((4159, 4173), 'numpy.sum', 'np.sum', (['adj', '(1)'], {}), '(adj, 1)\n', (4165, 4173), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
## Autor: <NAME>
import numpy as np
from vispy.scene.visuals import Line
CV = np.arange(0, 2.05, 0.05, dtype=np.float32) * 3.14159
ZCV = np.zeros(CV.size, dtype=np.float32)
C_xy = np.array([np.cos(CV), np.sin(CV), ZCV]).T
C_xz = np.array([np.cos(CV), ZCV, np.sin(CV)]).T
C_yz = np.array([ZCV, np.cos(CV), np.sin(CV)]).T
sphere_pt = np.concatenate([C_xy, C_xz, C_yz])
class Balloon:
"""Balloon Class. It uses vispy to visualization."""
def __init__(self, position, velocity, boundaries = None, color = (1.0, 1.0, 1.0, 1.0)):
self.pos = position
self.vel = velocity
self.color = color
self.rad = 0.5
self.bound = None
self.sizexyz = [None] * 3
if boundaries is not None:
self.set_bound(boundaries)
self.visual = None
def set_bound(self, boundaries):
"""Updates the boundaries."""
self.bound = boundaries
self.sizexyz = np.abs(boundaries[:,1] - boundaries[:,0])
def step(self, time_step):
"""Does nothing."""
pass
def init_visual(self, view):
"""Initialize the object visual."""
self.visual = Line(pos = sphere_pt * self.rad + self.pos, color=self.color)
view.add(self.visual)
def update_visual(self):
"""Updates the object visual."""
self.visual.set_data(pos = sphere_pt * self.rad + self.pos)
def shake(self):
"""Changes to a random color."""
self.color = np.random.rand(4) / 2 + 0.5
self.visual.set_data(color=self.color)
if __name__ == '__main__':
print(Ball.__doc__)
exit()
|
[
"numpy.abs",
"numpy.zeros",
"numpy.sin",
"numpy.arange",
"vispy.scene.visuals.Line",
"numpy.cos",
"numpy.random.rand",
"numpy.concatenate"
] |
[((182, 217), 'numpy.zeros', 'np.zeros', (['CV.size'], {'dtype': 'np.float32'}), '(CV.size, dtype=np.float32)\n', (190, 217), True, 'import numpy as np\n'), ((377, 411), 'numpy.concatenate', 'np.concatenate', (['[C_xy, C_xz, C_yz]'], {}), '([C_xy, C_xz, C_yz])\n', (391, 411), True, 'import numpy as np\n'), ((123, 165), 'numpy.arange', 'np.arange', (['(0)', '(2.05)', '(0.05)'], {'dtype': 'np.float32'}), '(0, 2.05, 0.05, dtype=np.float32)\n', (132, 165), True, 'import numpy as np\n'), ((976, 1019), 'numpy.abs', 'np.abs', (['(boundaries[:, 1] - boundaries[:, 0])'], {}), '(boundaries[:, 1] - boundaries[:, 0])\n', (982, 1019), True, 'import numpy as np\n'), ((1191, 1250), 'vispy.scene.visuals.Line', 'Line', ([], {'pos': '(sphere_pt * self.rad + self.pos)', 'color': 'self.color'}), '(pos=sphere_pt * self.rad + self.pos, color=self.color)\n', (1195, 1250), False, 'from vispy.scene.visuals import Line\n'), ((235, 245), 'numpy.cos', 'np.cos', (['CV'], {}), '(CV)\n', (241, 245), True, 'import numpy as np\n'), ((247, 257), 'numpy.sin', 'np.sin', (['CV'], {}), '(CV)\n', (253, 257), True, 'import numpy as np\n'), ((284, 294), 'numpy.cos', 'np.cos', (['CV'], {}), '(CV)\n', (290, 294), True, 'import numpy as np\n'), ((301, 311), 'numpy.sin', 'np.sin', (['CV'], {}), '(CV)\n', (307, 311), True, 'import numpy as np\n'), ((338, 348), 'numpy.cos', 'np.cos', (['CV'], {}), '(CV)\n', (344, 348), True, 'import numpy as np\n'), ((350, 360), 'numpy.sin', 'np.sin', (['CV'], {}), '(CV)\n', (356, 360), True, 'import numpy as np\n'), ((1506, 1523), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (1520, 1523), True, 'import numpy as np\n')]
|
'''
The detection code is partially derived and modified from the object_detection_tutorial.ipynb.
The original author should be honoured:
"Speed/accuracy trade-offs for modern convolutional object detectors."
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, CVPR 2017
'''
from flask import Flask, request, render_template, redirect
import cv2
import numpy as np
import tensorflow as tf
from utils import label_map_util
from utils import visualization_utils as vis_util
app = Flask(__name__, template_folder='')
from datetime import timedelta
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1) # avoid caching, which prevent showing the detection/splash result
import os
import sys
import random
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR) # To find local version of the library
# Directory to save logs and trained model
CKPT_DIR = os.path.join(ROOT_DIR, "research/object_detection/data/faster_RCNN_banana_and_pear/frozen_inference_graph.pb")
LABEL_DIR = os.path.join(ROOT_DIR, "research/object_detection/data/faster_RCNN_banana_and_pear/fruit_labelmap.pbtxt")
IMAGE_DIR = os.path.join(ROOT_DIR, "research/object_detection/static/images/")
UPLOAD_FOLDER = os.path.join(ROOT_DIR, "research/object_detection/upload_images")
ALLOWED_EXTENSIONS = set(['jpg'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# avoid caching, which prevent showing the detection/splash result
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
class TOD(object):
def __init__(self):
self.PATH_TO_CKPT = CKPT_DIR
self.PATH_TO_LABELS = LABEL_DIR
self.NUM_CLASSES = 2
self.detection_graph = self._load_model()
self.category_index = self._load_label_map()
# load the pre-trained model via the frozen inference graph
def _load_model(self):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
# load the label map so that we know what object has been detected
def _load_label_map(self):
label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=self.NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def detect(self, image):
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=8)
print("___________________detection complete___________________")
# cv2.namedWindow("detection", cv2.WINDOW_NORMAL)
cv2.imwrite(os.path.join(IMAGE_DIR , 'detection_result.jpg'), image)
cv2.waitKey(0)
################################################################
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def run_detection():
user_file_names = next(os.walk(UPLOAD_FOLDER))[2]
names_chosen = random.choice(user_file_names)
image = cv2.imread(os.path.join(UPLOAD_FOLDER, names_chosen))
print('\n-----------------', len([image]), '---------------\n')
detecotr = TOD()
detecotr.detect(image)
@app.route('/')
def home():
if request.method == 'GET':
return render_template('index.html')
return render_template('index.html')
@app.route('/UploadDetect', methods=['GET', 'POST'])
def upload_file_detect():
if request.method == 'GET':
return render_template('upload_detect.html')
if request.method == 'POST':
f = request.files['file']
print(request.files)
if f and allowed_file(f.filename):
f.save(os.path.join(app.config['UPLOAD_FOLDER'], 'uploaded_image.jpg'))
return redirect('/detect')
else:
print('file type is not correct')
return render_template('upload_detect.html')
@app.route('/detect')
def detect():
run_detection()
return render_template('result_detect.html')
'''
Main function to run Flask server
'''
if __name__ == '__main__':
app.run()
|
[
"os.walk",
"os.path.join",
"sys.path.append",
"os.path.abspath",
"flask.redirect",
"utils.label_map_util.convert_label_map_to_categories",
"datetime.timedelta",
"flask.render_template",
"tensorflow.GraphDef",
"cv2.waitKey",
"utils.label_map_util.load_labelmap",
"tensorflow.Session",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"numpy.squeeze",
"tensorflow.import_graph_def",
"utils.label_map_util.create_category_index",
"flask.Flask",
"random.choice",
"numpy.expand_dims"
] |
[((520, 555), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '""""""'}), "(__name__, template_folder='')\n", (525, 555), False, 'from flask import Flask, request, render_template, redirect\n'), ((629, 649), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (638, 649), False, 'from datetime import timedelta\n'), ((799, 824), 'os.path.abspath', 'os.path.abspath', (['"""../../"""'], {}), "('../../')\n", (814, 824), False, 'import os\n'), ((825, 850), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (840, 850), False, 'import sys\n'), ((946, 1065), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""research/object_detection/data/faster_RCNN_banana_and_pear/frozen_inference_graph.pb"""'], {}), "(ROOT_DIR,\n 'research/object_detection/data/faster_RCNN_banana_and_pear/frozen_inference_graph.pb'\n )\n", (958, 1065), False, 'import os\n'), ((1069, 1183), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""research/object_detection/data/faster_RCNN_banana_and_pear/fruit_labelmap.pbtxt"""'], {}), "(ROOT_DIR,\n 'research/object_detection/data/faster_RCNN_banana_and_pear/fruit_labelmap.pbtxt'\n )\n", (1081, 1183), False, 'import os\n'), ((1188, 1254), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""research/object_detection/static/images/"""'], {}), "(ROOT_DIR, 'research/object_detection/static/images/')\n", (1200, 1254), False, 'import os\n'), ((1272, 1337), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""research/object_detection/upload_images"""'], {}), "(ROOT_DIR, 'research/object_detection/upload_images')\n", (1284, 1337), False, 'import os\n'), ((1525, 1545), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (1534, 1545), False, 'from datetime import timedelta\n'), ((4749, 4779), 'random.choice', 'random.choice', (['user_file_names'], {}), '(user_file_names)\n', (4762, 4779), False, 'import random\n'), ((5081, 5110), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (5096, 5110), False, 'from flask import Flask, request, render_template, redirect\n'), ((5724, 5761), 'flask.render_template', 'render_template', (['"""result_detect.html"""'], {}), "('result_detect.html')\n", (5739, 5761), False, 'from flask import Flask, request, render_template, redirect\n'), ((1916, 1926), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1924, 1926), True, 'import tensorflow as tf\n'), ((2398, 2447), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['self.PATH_TO_LABELS'], {}), '(self.PATH_TO_LABELS)\n', (2426, 2447), False, 'from utils import label_map_util\n'), ((2469, 2588), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'self.NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n self.NUM_CLASSES, use_display_name=True)\n', (2515, 2588), False, 'from utils import label_map_util\n'), ((2745, 2793), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (2781, 2793), False, 'from utils import label_map_util\n'), ((4453, 4467), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4464, 4467), False, 'import cv2\n'), ((4803, 4844), 'os.path.join', 'os.path.join', (['UPLOAD_FOLDER', 'names_chosen'], {}), '(UPLOAD_FOLDER, names_chosen)\n', (4815, 4844), False, 'import os\n'), ((5039, 5068), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (5054, 5068), False, 'from flask import Flask, request, render_template, redirect\n'), ((5238, 5275), 'flask.render_template', 'render_template', (['"""upload_detect.html"""'], {}), "('upload_detect.html')\n", (5253, 5275), False, 'from flask import Flask, request, render_template, redirect\n'), ((1997, 2010), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2008, 2010), True, 'import tensorflow as tf\n'), ((4388, 4435), 'os.path.join', 'os.path.join', (['IMAGE_DIR', '"""detection_result.jpg"""'], {}), "(IMAGE_DIR, 'detection_result.jpg')\n", (4400, 4435), False, 'import os\n'), ((4703, 4725), 'os.walk', 'os.walk', (['UPLOAD_FOLDER'], {}), '(UPLOAD_FOLDER)\n', (4710, 4725), False, 'import os\n'), ((5519, 5538), 'flask.redirect', 'redirect', (['"""/detect"""'], {}), "('/detect')\n", (5527, 5538), False, 'from flask import Flask, request, render_template, redirect\n'), ((5618, 5655), 'flask.render_template', 'render_template', (['"""upload_detect.html"""'], {}), "('upload_detect.html')\n", (5633, 5655), False, 'from flask import Flask, request, render_template, redirect\n'), ((2028, 2067), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.PATH_TO_CKPT', '"""rb"""'], {}), "(self.PATH_TO_CKPT, 'rb')\n", (2042, 2067), True, 'import tensorflow as tf\n'), ((2201, 2243), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2220, 2243), True, 'import tensorflow as tf\n'), ((2919, 2957), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (2929, 2957), True, 'import tensorflow as tf\n'), ((3104, 3133), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3118, 3133), True, 'import numpy as np\n'), ((5435, 5498), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", '"""uploaded_image.jpg"""'], {}), "(app.config['UPLOAD_FOLDER'], 'uploaded_image.jpg')\n", (5447, 5498), False, 'import os\n'), ((3987, 4004), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3997, 4004), True, 'import numpy as np\n'), ((4084, 4102), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (4094, 4102), True, 'import numpy as np\n'), ((4026, 4045), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (4036, 4045), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import timeit
import datetime
import os
import time
import math
from line import get_points
#for SERVER SIDE
import json
import requests
#convert img to JSON object
import base64
import pickle
#API endpoint
#api = 'https://tdispeeddetection.free.beeceptor.com/success'
api = 'https://tdinightday.free.beeceptor.com/success'
speed_limit = int(input('Enter The Speed Limit: '))
distance =int(input('Enter distance between 2 lines in Meters(for better results use 10 Meters): '))
global start_time,start_time1,later,later1,starttime,endtime
def show_angle(speed_limit):
if speed_limit !=0:
show_direction = cv2.imread("PromptAngleinfo.JPG")
cv2.imshow("Angle Help",show_direction)
k = cv2.waitKey(1) & 0xff
cv2.waitKey(50)
Angle = int(input("Enter apporximate Angle with road :"))
return Angle
#Prompts user with demo image for choosing right angle.
Angle = show_angle(speed_limit) #get Angle input
# Play until the user decides to stop ## SENDING IMAGES TO SERVER FOR PROCESSING THERE>>>>>>>>>>>>>>>>>>>>>
#for sending data to server
def send(img):
retval, buffer = cv2.imencode(".jpg", img)
img = base64.b64encode(buffer).decode('utf-8')
data = json.dumps({"image1": img, "id" : "2345AB"})
response = requests.post(api, data=data, timeout=5, headers = {'Content-type': 'application/json', 'Accept': 'text/plain'})
try:
data = response.json()
print(data)
except requests.exceptions.RequestException:
print(response.text)
# Initialize the video & get FPS
cap = cv2.VideoCapture('night1.mp4')
fps = cap.get(cv2.CAP_PROP_FPS)
# Collects ROI cropped images from lanes
lane_1_1 = []
lane_1_2 = []
#collect mask
road_cropped = "regions.p"
with open(road_cropped,'rb')as f:
mask_list =pickle.load(f)
print(mask_list[0])
print(mask_list[1])
#getting mask
mask1 = cv2.imread('m1.jpeg')
mask1 = cv2.cvtColor(mask1, cv2.COLOR_BGR2GRAY)
ret1, thresh_MASK_1 = cv2.threshold(mask1, 127, 255, cv2.THRESH_BINARY_INV)
mask2 = cv2.imread('m2.jpeg')
mask2 = cv2.cvtColor(mask2, cv2.COLOR_BGR2GRAY)
ret2, thresh_MASK_2 = cv2.threshold(mask2, 127, 255, cv2.THRESH_BINARY_INV)
# Create the background subtraction object
method = 1
if method == 0:
bgSubtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
elif method == 1:
bgSubtractor = cv2.createBackgroundSubtractorMOG2()
else:
bgSubtractor = cv2.bgsegm.createBackgroundSubtractorGMG()
# Create the kernel that will be used to remove the noise in the foreground mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel_di = np.ones((5, 1), np.uint8)
# define variables
cnt = 0
cnt1 = 0
flag = True
flag1 = True
cy = 0
cy1 = 0
#distance = 0.003
#distance = 3
#Prompt user to draw 2 lines
_, img = cap.read()
line1, line2 = get_points(img)
#for line 1
l1_x1,l1_x2,l1_y1,l1_y2 = line1[0][0],line1[1][0],line1[0][1],line1[1][1]
#for line2
l2_x1,l2_x2,l2_y1,l2_y2 = line2[0][0],line2[1][0],line2[0][1],line2[1][1]
#last check point for reference of centroid tracking
'''find dist between frst 2 lines '''
starttrack = l1_y1
midtrack = l2_y1
lasttrack = int((midtrack-starttrack))
if lasttrack < 100 :
lasttrack = (int(midtrack-starttrack)*3)+l2_y1
else:
lasttrack = (int(midtrack-starttrack)*2)+l2_y1
print("start",starttrack)
print("last",lasttrack)
print("mid",midtrack)
## Function to Auto Calculate the detection range
'''takes input from users - speed_limit,gets FPS,distance,//pt2 and pt1 from line.py
auto calibrates the last reference line on frame, in order to get min of 2 images for detection, code calibrates for ANY SPEED RANGE and any
actual distance marked in Meters --- physically on ground, if distance is less say 2 Meters and you want to detect high speed of 120 KMph,code auto
calculates the new reference line, provided it doesnt fall outside the frame height'''
def max_images(speed_limit,fps,distance,midtrack,starttrack): #midtrack is last line Y pt and starttrack is first line Y pt. First line from TOP.
time2coverdistance = (distance/(speed_limit*0.277))
max_img = (time2coverdistance*fps)
if max_img <2.0:
#cal distance which will ensure we get atleast 2 images of vehicle d = s*t
max_dstnc = (speed_limit*0.277)*1/fps*2
delta = (max_dstnc-distance)
pxl_mtr = ((midtrack-starttrack)/distance)
pt3 = delta*pxl_mtr
pt3_pxl = round(midtrack+pt3)
print("max_dstnc",max_dstnc)
print("distance",distance)
print("delta",delta)
print("pxl_mtr",pxl_mtr)
print("pt3",pt3)
print("pt3_pxl",pt3_pxl)
else:
pt3_pxl = midtrack+100
print("pt3",pt3_pxl)
return pt3_pxl
pt3_pxl = max_images(speed_limit,fps,distance,midtrack,starttrack)
locationX =[]
locationY=[]
area_s=[]
#defining time variables: WARNING : DONT CHANGE THESE AT ALL>>>>>>>
start_time= datetime.datetime.now()
start_time1= datetime.datetime.now()
later= datetime.datetime.now()
later1= datetime.datetime.now()
starttime = datetime.datetime.now()
endtime= datetime.datetime.now()
# Play until the user decides to stop
while True:
flag2 = True
start = timeit.default_timer()
ret, frame = cap.read()
if ret:
score = np.average(np.linalg.norm(frame, axis=2)) / np.sqrt(3)
else:
continue
if score > 60: # DAY TIME
print('Day frame?!')
time.sleep(100000)
framespersecond= int(cap.get(cv2.CAP_PROP_FPS))
print(framespersecond)
#frame = cv2.detailEnhance(frame, sigma_s=10, sigma_r=0.15)
#frame =cv2.edgePreservingFilter(frame, flags=1, sigma_s=64, sigma_r=0.2) #suitable for high speed GPU -Nighttime. reduces FPS drastically
frame_og = frame
l, a, b = cv2.split(frame)
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(1, 1)) #for improving the brightness and illumination
frame = clahe.apply(l)
cv2.line(frame_og, (l1_x1, l1_y1), (l1_x2, l1_y2), (0, 255, 0), 1)
cv2.line(frame_og, (l2_x1, l2_y1), (l2_x2, l2_y2), (0, 0, 255), 1)
cv2.line(frame_og, (l1_x1, int((lasttrack))), (l1_x2, int((lasttrack))),(0, 0, 0), 1)
cv2.line(frame_og,(l1_x1,pt3_pxl),(l1_x2,pt3_pxl),(200,0,127),3)
if ret == True:
foregroundMask = bgSubtractor.apply(frame)
foregroundMask = cv2.morphologyEx(foregroundMask, cv2.MORPH_OPEN, kernel)
foregroundMask = cv2.erode(foregroundMask, kernel, iterations=3)
foregroundMask = cv2.morphologyEx(foregroundMask, cv2.MORPH_CLOSE, kernel,iterations=6)
foregroundMask = cv2.dilate(foregroundMask, kernel_di, iterations=7)
foregroundMask = cv2.medianBlur(foregroundMask,5)
thresh = cv2.threshold(foregroundMask, 25, 255, cv2.THRESH_BINARY)[1]
thresh1 = np.bitwise_and(thresh, thresh_MASK_1)
thresh2 = np.bitwise_and(thresh, thresh_MASK_2)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
hierarchy = hierarchy[0]
except:
hierarchy = []
for contour, hier in zip(contours, hierarchy):
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
(x, y, w, h) = cv2.boundingRect(cnt)
area_=(w*h)
area_s.append(area_)
cx = int((w / 2) + x)
cy = int((h / 2) + y)
if w > 10 and h > 10:
cv2.rectangle(frame_og, (x - 10, y - 10), (x + w, y + h), (0, 255, 0), 2)
cv2.circle(frame_og, (cx, cy), 10, (0, 0, 255), -1)
distA =None
if cy > starttrack and w > 10 and h > 10:
if flag is True and cy <midtrack:
print("cy",cy)
start_time = datetime.datetime.now()
flag = False
if flag is False and cy > midtrack and cy < pt3_pxl:
later = datetime.datetime.now()
seconds = (later - start_time).total_seconds()
frame_crossed1 = seconds*framespersecond
speed_insta = (distance/frame_crossed1)*framespersecond*3.6
Angle = math.radians(Angle)
Angle = math.cos(Angle)
speed = speed_insta*Angle
print("SPEED",speed)
print("frame_crossed1",frame_crossed1)
print("Time taken",seconds)
if seconds <= 0.2:
print("diff 0")
else:
#print("seconds : " + str(seconds))
if flag is False:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame_og, str(int(speed)), (x, y), font, 2, (255, 255, 255), 4, cv2.LINE_AA)
cv2.putText(frame, str(int(speed)), (x, y), font, 2, (255, 255, 255), 4, cv2.LINE_AA)
# if not os.path.exists(path):
# os.makedirs(path)
if int(speed) > speed_limit and cy <= lasttrack and w > 70 and h > 100:
roi = frame[y-50:y + h, x:x + w]
cv2.imshow("Lane_1", roi)
lane_1_1.append(roi)
# write_name = 'corners_found' + str(cnt1) + '.jpg'
# cv2.imwrite(write_name, roi)
# cv2.imwrite(os.path.join(path, 'carimage_l2_' + str(cnt1)) + '.jpg', roi)
cnt += 1
# flag = True
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, str(int(speed)), (x, y), font, 2, (255, 255, 255), 8, cv2.LINE_AA)
flag = True
contours1, hierarchy1= cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
hierarchy1 = hierarchy1[0]
except:
hierarchy1 = []
for contour1, hier1 in zip(contours1, hierarchy1):
areas1 = [cv2.contourArea(c) for c in contours1]
max_index1 = np.argmax(areas1)
cnt1 = contours1[max_index1]
(x1, y1, w1, h1) = cv2.boundingRect(cnt1)
cx1 = int((w1 / 2) + x1)
cy1 = int((h1 / 2) + y1)
if w1 > 10 and h1 > 10:
cv2.rectangle(frame_og, (x1 - 10, y1 - 10), (x1 + w1, y1 + h1), (255, 255, 0), 2)
cv2.circle(frame_og, (cx1, cy1), 5, (0, 255, 0), -1)
if cy1 > starttrack and w1 > 10 and h1 > 10:
if flag1 is True and cy1 < midtrack:
start_time1 = datetime.datetime.now()
flag1 = False
if flag1 is False and cy1> midtrack and cy1 < pt3_pxl:
later1 = datetime.datetime.now()
seconds1 = (later1 - start_time1).total_seconds()
frame_crossed2 = seconds1*framespersecond
speed1 = (distance/frame_crossed2)*framespersecond*3.6
Angle = math.radians(Angle)
Angle = math.cos(Angle)
speed1 = speed1*Angle #COSINE CORRECTION
print("SPEED1",speed1)
if seconds1 <= 0.2:
print("diff1 0")
else:
#print("seconds1 : " + str(seconds1))
if flag1 is False:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame_og, str(int(speed1)), (x1, y1), font, 2, (255, 255, 255), 8, cv2.LINE_AA)
cv2.putText(frame, str(int(speed1)), (x1, y1), font, 2, (255, 255, 255), 8, cv2.LINE_AA)
# if not os.path.exists(path):
# os.makedirs(path)
if int(speed1) > speed_limit and cy1 <= pt3_pxl and w1 > 70 and h1 > 100:
roi = frame[y1-50:y1 + h1, x1:x1 + w1]
cv2.imshow("Lane_2", roi)
lane_1_2.append(roi)
#cv2.imwrite(os.path.join('Offenders/', 'carimage_l2_' + str(cnt1)) + '.jpg', roi)
cnt1 += 1
flag1 = True
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame_og, str(int(speed1)), (x1, y1), font, 2, (255, 255, 255), 8, cv2.LINE_AA)
flag1 = True
#cv2.imshow('background subtraction', foregroundMask)
#cv2.imshow('Sub',thresh)
#cv2.imshow('Sub', thresh1)
#cv2.imshow('Sub', frame)
cv2.imshow('Robust', frame_og)
stop = timeit.default_timer()
time = stop-start
print('One_frame = ',time)
# k = cv2.waitKey(1) & 0xff
# if k == ord('q'):
# break
# else:
# break
else: #NIGHT TIME
print('Night frame')
flag2 = True
list_speed=[]
framespersecond= int(cap.get(cv2.CAP_PROP_FPS))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (41, 41), 0)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
h,w,_=(frame.shape)
#to find speed
cx2,cy2 = maxLoc
print("cx",cx2)
print("cy2",cy2)
if cy2 > starttrack:
if flag2 is True and cy2 < midtrack:
starttime= datetime.datetime.now()
flag2 = False
if cy2> midtrack and cy2< lasttrack:
endtime = datetime.datetime.now()
timedelta = (endtime - starttime).total_seconds()
frame_crossed3 = timedelta*framespersecond
speed1 = (distance/frame_crossed3)*framespersecond*3.6
Angle = math.radians(Angle)
Angle = math.cos(Angle)
speed_night = speed1*Angle
list_speed.append(speed_night)
#cal avg speed
avg_speed = sum(list_speed)/len(list_speed)
if cy2> lasttrack:
print("frame_night",frame_crossed3) #COSINE CORRECTION
print("SPEED_NIGHT",avg_speed)
print("timedelta",timedelta)
speed = (distance/timedelta)
print("speed_night_without_adjustments",speed)
if int(avg_speed) > speed_limit and cy2 > lasttrack:
#roi = frame[y-50:y + h, x:x + w]
roi = frame
cv2.imshow("Lane_1", roi)
lane_1_1.append(roi)
send(roi)
cv2.circle(frame, maxLoc, 10, (255, 0, 255), -1)
cv2.line(frame, (l1_x1, l1_y1), (l1_x2, l1_y2), (0, 255, 0), 1)
cv2.line(frame, (l2_x1, l2_y1), (l2_x2, l2_y2), (0, 0, 255), 1)
cv2.line(frame, (l1_x1, int((lasttrack))), (l1_x2, int((lasttrack))),(0, 0, 0), 1)
cv2.imshow("Robust", frame)
k = cv2.waitKey(1) & 0xff
if k == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.GaussianBlur",
"cv2.medianBlur",
"numpy.argmax",
"numpy.ones",
"json.dumps",
"cv2.bgsegm.createBackgroundSubtractorGMG",
"pickle.load",
"numpy.linalg.norm",
"cv2.imencode",
"cv2.rectangle",
"requests.post",
"cv2.minMaxLoc",
"cv2.imshow",
"cv2.erode",
"cv2.line",
"cv2.contourArea",
"cv2.dilate",
"cv2.cvtColor",
"math.radians",
"cv2.split",
"math.cos",
"cv2.destroyAllWindows",
"datetime.datetime.now",
"cv2.boundingRect",
"cv2.circle",
"cv2.bgsegm.createBackgroundSubtractorMOG",
"cv2.waitKey",
"cv2.morphologyEx",
"time.sleep",
"line.get_points",
"cv2.createCLAHE",
"cv2.createBackgroundSubtractorMOG2",
"cv2.getStructuringElement",
"cv2.threshold",
"timeit.default_timer",
"cv2.VideoCapture",
"cv2.imread",
"base64.b64encode",
"numpy.bitwise_and",
"cv2.findContours",
"numpy.sqrt"
] |
[((1650, 1680), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""night1.mp4"""'], {}), "('night1.mp4')\n", (1666, 1680), False, 'import cv2\n'), ((1964, 1985), 'cv2.imread', 'cv2.imread', (['"""m1.jpeg"""'], {}), "('m1.jpeg')\n", (1974, 1985), False, 'import cv2\n'), ((1994, 2033), 'cv2.cvtColor', 'cv2.cvtColor', (['mask1', 'cv2.COLOR_BGR2GRAY'], {}), '(mask1, cv2.COLOR_BGR2GRAY)\n', (2006, 2033), False, 'import cv2\n'), ((2056, 2109), 'cv2.threshold', 'cv2.threshold', (['mask1', '(127)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(mask1, 127, 255, cv2.THRESH_BINARY_INV)\n', (2069, 2109), False, 'import cv2\n'), ((2118, 2139), 'cv2.imread', 'cv2.imread', (['"""m2.jpeg"""'], {}), "('m2.jpeg')\n", (2128, 2139), False, 'import cv2\n'), ((2148, 2187), 'cv2.cvtColor', 'cv2.cvtColor', (['mask2', 'cv2.COLOR_BGR2GRAY'], {}), '(mask2, cv2.COLOR_BGR2GRAY)\n', (2160, 2187), False, 'import cv2\n'), ((2210, 2263), 'cv2.threshold', 'cv2.threshold', (['mask2', '(127)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(mask2, 127, 255, cv2.THRESH_BINARY_INV)\n', (2223, 2263), False, 'import cv2\n'), ((2633, 2685), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv2.MORPH_ELLIPSE, (3, 3))\n', (2658, 2685), False, 'import cv2\n'), ((2698, 2723), 'numpy.ones', 'np.ones', (['(5, 1)', 'np.uint8'], {}), '((5, 1), np.uint8)\n', (2705, 2723), True, 'import numpy as np\n'), ((2902, 2917), 'line.get_points', 'get_points', (['img'], {}), '(img)\n', (2912, 2917), False, 'from line import get_points\n'), ((4934, 4957), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4955, 4957), False, 'import datetime\n'), ((4971, 4994), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4992, 4994), False, 'import datetime\n'), ((5002, 5025), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5023, 5025), False, 'import datetime\n'), ((5034, 5057), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5055, 5057), False, 'import datetime\n'), ((5070, 5093), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5091, 5093), False, 'import datetime\n'), ((5103, 5126), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5124, 5126), False, 'import datetime\n'), ((15585, 15608), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15606, 15608), False, 'import cv2\n'), ((1192, 1217), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img'], {}), "('.jpg', img)\n", (1204, 1217), False, 'import cv2\n'), ((1280, 1323), 'json.dumps', 'json.dumps', (["{'image1': img, 'id': '2345AB'}"], {}), "({'image1': img, 'id': '2345AB'})\n", (1290, 1323), False, 'import json\n'), ((1340, 1454), 'requests.post', 'requests.post', (['api'], {'data': 'data', 'timeout': '(5)', 'headers': "{'Content-type': 'application/json', 'Accept': 'text/plain'}"}), "(api, data=data, timeout=5, headers={'Content-type':\n 'application/json', 'Accept': 'text/plain'})\n", (1353, 1454), False, 'import requests\n'), ((1878, 1892), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1889, 1892), False, 'import pickle\n'), ((2357, 2399), 'cv2.bgsegm.createBackgroundSubtractorMOG', 'cv2.bgsegm.createBackgroundSubtractorMOG', ([], {}), '()\n', (2397, 2399), False, 'import cv2\n'), ((5207, 5229), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5227, 5229), False, 'import timeit\n'), ((678, 711), 'cv2.imread', 'cv2.imread', (['"""PromptAngleinfo.JPG"""'], {}), "('PromptAngleinfo.JPG')\n", (688, 711), False, 'import cv2\n'), ((720, 760), 'cv2.imshow', 'cv2.imshow', (['"""Angle Help"""', 'show_direction'], {}), "('Angle Help', show_direction)\n", (730, 760), False, 'import cv2\n'), ((802, 817), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (813, 817), False, 'import cv2\n'), ((2437, 2473), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (2471, 2473), False, 'import cv2\n'), ((2499, 2541), 'cv2.bgsegm.createBackgroundSubtractorGMG', 'cv2.bgsegm.createBackgroundSubtractorGMG', ([], {}), '()\n', (2539, 2541), False, 'import cv2\n'), ((5435, 5453), 'time.sleep', 'time.sleep', (['(100000)'], {}), '(100000)\n', (5445, 5453), False, 'import time\n'), ((5800, 5816), 'cv2.split', 'cv2.split', (['frame'], {}), '(frame)\n', (5809, 5816), False, 'import cv2\n'), ((5833, 5882), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2)', 'tileGridSize': '(1, 1)'}), '(clipLimit=2, tileGridSize=(1, 1))\n', (5848, 5882), False, 'import cv2\n'), ((5969, 6035), 'cv2.line', 'cv2.line', (['frame_og', '(l1_x1, l1_y1)', '(l1_x2, l1_y2)', '(0, 255, 0)', '(1)'], {}), '(frame_og, (l1_x1, l1_y1), (l1_x2, l1_y2), (0, 255, 0), 1)\n', (5977, 6035), False, 'import cv2\n'), ((6044, 6110), 'cv2.line', 'cv2.line', (['frame_og', '(l2_x1, l2_y1)', '(l2_x2, l2_y2)', '(0, 0, 255)', '(1)'], {}), '(frame_og, (l2_x1, l2_y1), (l2_x2, l2_y2), (0, 0, 255), 1)\n', (6052, 6110), False, 'import cv2\n'), ((6213, 6285), 'cv2.line', 'cv2.line', (['frame_og', '(l1_x1, pt3_pxl)', '(l1_x2, pt3_pxl)', '(200, 0, 127)', '(3)'], {}), '(frame_og, (l1_x1, pt3_pxl), (l1_x2, pt3_pxl), (200, 0, 127), 3)\n', (6221, 6285), False, 'import cv2\n'), ((13580, 13619), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (13592, 13619), False, 'import cv2\n'), ((13636, 13671), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(41, 41)', '(0)'], {}), '(gray, (41, 41), 0)\n', (13652, 13671), False, 'import cv2\n'), ((13716, 13735), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['gray'], {}), '(gray)\n', (13729, 13735), False, 'import cv2\n'), ((15177, 15225), 'cv2.circle', 'cv2.circle', (['frame', 'maxLoc', '(10)', '(255, 0, 255)', '(-1)'], {}), '(frame, maxLoc, 10, (255, 0, 255), -1)\n', (15187, 15225), False, 'import cv2\n'), ((15234, 15297), 'cv2.line', 'cv2.line', (['frame', '(l1_x1, l1_y1)', '(l1_x2, l1_y2)', '(0, 255, 0)', '(1)'], {}), '(frame, (l1_x1, l1_y1), (l1_x2, l1_y2), (0, 255, 0), 1)\n', (15242, 15297), False, 'import cv2\n'), ((15306, 15369), 'cv2.line', 'cv2.line', (['frame', '(l2_x1, l2_y1)', '(l2_x2, l2_y2)', '(0, 0, 255)', '(1)'], {}), '(frame, (l2_x1, l2_y1), (l2_x2, l2_y2), (0, 0, 255), 1)\n', (15314, 15369), False, 'import cv2\n'), ((15469, 15496), 'cv2.imshow', 'cv2.imshow', (['"""Robust"""', 'frame'], {}), "('Robust', frame)\n", (15479, 15496), False, 'import cv2\n'), ((15512, 15526), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (15523, 15526), False, 'import cv2\n'), ((772, 786), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (783, 786), False, 'import cv2\n'), ((1228, 1252), 'base64.b64encode', 'base64.b64encode', (['buffer'], {}), '(buffer)\n', (1244, 1252), False, 'import base64\n'), ((5330, 5340), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5337, 5340), True, 'import numpy as np\n'), ((6387, 6443), 'cv2.morphologyEx', 'cv2.morphologyEx', (['foregroundMask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(foregroundMask, cv2.MORPH_OPEN, kernel)\n', (6403, 6443), False, 'import cv2\n'), ((6473, 6520), 'cv2.erode', 'cv2.erode', (['foregroundMask', 'kernel'], {'iterations': '(3)'}), '(foregroundMask, kernel, iterations=3)\n', (6482, 6520), False, 'import cv2\n'), ((6550, 6621), 'cv2.morphologyEx', 'cv2.morphologyEx', (['foregroundMask', 'cv2.MORPH_CLOSE', 'kernel'], {'iterations': '(6)'}), '(foregroundMask, cv2.MORPH_CLOSE, kernel, iterations=6)\n', (6566, 6621), False, 'import cv2\n'), ((6650, 6701), 'cv2.dilate', 'cv2.dilate', (['foregroundMask', 'kernel_di'], {'iterations': '(7)'}), '(foregroundMask, kernel_di, iterations=7)\n', (6660, 6701), False, 'import cv2\n'), ((6731, 6764), 'cv2.medianBlur', 'cv2.medianBlur', (['foregroundMask', '(5)'], {}), '(foregroundMask, 5)\n', (6745, 6764), False, 'import cv2\n'), ((6868, 6905), 'numpy.bitwise_and', 'np.bitwise_and', (['thresh', 'thresh_MASK_1'], {}), '(thresh, thresh_MASK_1)\n', (6882, 6905), True, 'import numpy as np\n'), ((6928, 6965), 'numpy.bitwise_and', 'np.bitwise_and', (['thresh', 'thresh_MASK_2'], {}), '(thresh, thresh_MASK_2)\n', (6942, 6965), True, 'import numpy as np\n'), ((7000, 7065), 'cv2.findContours', 'cv2.findContours', (['thresh1', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (7016, 7065), False, 'import cv2\n'), ((10113, 10178), 'cv2.findContours', 'cv2.findContours', (['thresh2', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (10129, 10178), False, 'import cv2\n'), ((13110, 13140), 'cv2.imshow', 'cv2.imshow', (['"""Robust"""', 'frame_og'], {}), "('Robust', frame_og)\n", (13120, 13140), False, 'import cv2\n'), ((13160, 13182), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (13180, 13182), False, 'import timeit\n'), ((5297, 5326), 'numpy.linalg.norm', 'np.linalg.norm', (['frame'], {'axis': '(2)'}), '(frame, axis=2)\n', (5311, 5326), True, 'import numpy as np\n'), ((6785, 6842), 'cv2.threshold', 'cv2.threshold', (['foregroundMask', '(25)', '(255)', 'cv2.THRESH_BINARY'], {}), '(foregroundMask, 25, 255, cv2.THRESH_BINARY)\n', (6798, 6842), False, 'import cv2\n'), ((7327, 7343), 'numpy.argmax', 'np.argmax', (['areas'], {}), '(areas)\n', (7336, 7343), True, 'import numpy as np\n'), ((7418, 7439), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (7434, 7439), False, 'import cv2\n'), ((10450, 10467), 'numpy.argmax', 'np.argmax', (['areas1'], {}), '(areas1)\n', (10459, 10467), True, 'import numpy as np\n'), ((10548, 10570), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt1'], {}), '(cnt1)\n', (10564, 10570), False, 'import cv2\n'), ((13966, 13989), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13987, 13989), False, 'import datetime\n'), ((14095, 14118), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14116, 14118), False, 'import datetime\n'), ((14339, 14358), 'math.radians', 'math.radians', (['Angle'], {}), '(Angle)\n', (14351, 14358), False, 'import math\n'), ((14383, 14398), 'math.cos', 'math.cos', (['Angle'], {}), '(Angle)\n', (14391, 14398), False, 'import math\n'), ((7261, 7279), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (7276, 7279), False, 'import cv2\n'), ((7640, 7713), 'cv2.rectangle', 'cv2.rectangle', (['frame_og', '(x - 10, y - 10)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame_og, (x - 10, y - 10), (x + w, y + h), (0, 255, 0), 2)\n', (7653, 7713), False, 'import cv2\n'), ((7736, 7787), 'cv2.circle', 'cv2.circle', (['frame_og', '(cx, cy)', '(10)', '(0, 0, 255)', '(-1)'], {}), '(frame_og, (cx, cy), 10, (0, 0, 255), -1)\n', (7746, 7787), False, 'import cv2\n'), ((7985, 8008), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8006, 8008), False, 'import datetime\n'), ((8139, 8162), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8160, 8162), False, 'import datetime\n'), ((8399, 8418), 'math.radians', 'math.radians', (['Angle'], {}), '(Angle)\n', (8411, 8418), False, 'import math\n'), ((8447, 8462), 'math.cos', 'math.cos', (['Angle'], {}), '(Angle)\n', (8455, 8462), False, 'import math\n'), ((10382, 10400), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (10397, 10400), False, 'import cv2\n'), ((10713, 10799), 'cv2.rectangle', 'cv2.rectangle', (['frame_og', '(x1 - 10, y1 - 10)', '(x1 + w1, y1 + h1)', '(255, 255, 0)', '(2)'], {}), '(frame_og, (x1 - 10, y1 - 10), (x1 + w1, y1 + h1), (255, 255, \n 0), 2)\n', (10726, 10799), False, 'import cv2\n'), ((10815, 10867), 'cv2.circle', 'cv2.circle', (['frame_og', '(cx1, cy1)', '(5)', '(0, 255, 0)', '(-1)'], {}), '(frame_og, (cx1, cy1), 5, (0, 255, 0), -1)\n', (10825, 10867), False, 'import cv2\n'), ((11013, 11036), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11034, 11036), False, 'import datetime\n'), ((11171, 11194), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11192, 11194), False, 'import datetime\n'), ((11430, 11449), 'math.radians', 'math.radians', (['Angle'], {}), '(Angle)\n', (11442, 11449), False, 'import math\n'), ((11478, 11493), 'math.cos', 'math.cos', (['Angle'], {}), '(Angle)\n', (11486, 11493), False, 'import math\n'), ((15072, 15097), 'cv2.imshow', 'cv2.imshow', (['"""Lane_1"""', 'roi'], {}), "('Lane_1', roi)\n", (15082, 15097), False, 'import cv2\n'), ((9466, 9491), 'cv2.imshow', 'cv2.imshow', (['"""Lane_1"""', 'roi'], {}), "('Lane_1', roi)\n", (9476, 9491), False, 'import cv2\n'), ((12438, 12463), 'cv2.imshow', 'cv2.imshow', (['"""Lane_2"""', 'roi'], {}), "('Lane_2', roi)\n", (12448, 12463), False, 'import cv2\n')]
|
from __future__ import division
import numpy as np
from .chemistry import *
#==============================================================================#
class Species():
"""An abstract class representing a molecular species,
composed of some monomer types, atoms, and bonded functionality.
All concrete species derive from this parent class.
A species implements methods for adding atoms and bonded units
to a Topology graph.
Parameters
----------
id : integer
Unique ID for this species
monomers : array-like of MonomerType
All MonomerType objects present in species
natoms : integer
Number of atoms in a given unit of this species
initializer : function
A function of the form `func(box)`
that returns an initial position for placing the species
"""
def __init__(self, id, monomers, natoms, **kwargs):
self.id = id
self.natoms = natoms
self.monomers = monomers
self.initializer_ = kwargs.get("initializer", None)
def generate_molecules(self, *args, **kwargs):
"""
Add a given number of molecules of the species to a system topology.
"""
pass
def initial_position(self, box):
if self.initializer_ is not None:
return self.initializer_(box)
else:
return box.random_position()
def charge(self):
"""
Return the total charge for a molecule of this species.
"""
return 0.0
def volume(self):
"""
Return the total volume occupied by atoms in a molecule of this species.
"""
return 1.0
class Point(Species):
"""
Species representing a single coarse-grained point-like object.
"""
def __init__(self, id, mon, **kwargs):
super().__init__(id, [mon], 1, **kwargs)
def charge(self):
return self.monomers[0].charge
def volume(self):
return self.monomers[0].size**3
def generate(self, nmol, mid0, topology, box):
mon = self.monomers[0] # Only one monomer type for a point
for mid in range(1, nmol+1):
# Atom ID is set when adding to the topology
ai = Atom(mon, mid = mid0 + mid, sid = self.id)
ai.set_position(self.initial_position(box))
topology.add_atom(ai)
class Multiblock(Species):
"""
Species representing a linear homopolymer with multiple distinct block types of monomers.
"""
def __init__(self, id, block_mons, block_lens, **kwargs):
if len(block_mons) != len(block_lens):
raise ValueError("Number of monomers and blocks not equal.")
mons = list(np.unique(block_mons))
natoms = np.sum(block_lens)
super().__init__(id, mons, natoms, **kwargs)
self.block_mons = block_mons
self.block_lens = np.array(block_lens)
self.block_ids = np.array([mon.id for mon in block_mons])
self.block_ends = np.cumsum(block_lens)
self.block_starts = np.append([0], self.block_ends[:-1])
self.nblocks = len(block_mons)
self.bond_scale = kwargs.get("bond_scale", 1.25)
self.bond_type = kwargs.get("bond_type", 1)
self.wrap = kwargs.get("wrap", True)
def charge(self):
charge = 0.0
for blk in range(self.nblocks):
mon = self.blk2mon(blk)
charge += self.block_lens[blk] * mon.charge
return charge
def volume(self):
vol = 0.0
for blk in range(self.nblocks):
mon = self.blk2mon(blk)
vol += self.block_lens[blk] * mon.size**3
return vol
def idx2mon(self, idx):
assert 0 <= idx < self.natoms, "Monomer index greater than number of atoms in chain."
for blk in range(self.nblocks):
if idx < self.block_ends[blk]:
return self.block_mons[blk]
def blk2mon(self, blk):
assert 0 <= blk < self.nblocks, "Block ID greater than number of blocks in chain."
return self.block_mons[blk]
def generate(self, nmol, mid0, topology, box):
mon0 = self.blk2mon(0)
for mid in range(1, nmol+1):
a0 = Atom(mon0, mid = mid0 + mid, sid = self.id)
a0.set_position(self.initial_position(box))
topology.add_atom(a0)
aprev = a0
for ni in range(1, self.natoms):
# Loop over and add all the connected atoms
mon = self.idx2mon(ni)
rbond = self.bond_scale*mon.size
ai = Atom(mon, mid = mid0 + mid, sid = self.id)
ai.set_image(aprev.img)
# Add a random Gaussian displacement from previous
delta = np.random.randn(3)
delta *= rbond / np.linalg.norm(delta)
ai.set_position(aprev.pos + delta)
if self.wrap:
box.wrap(ai.pos, ai.img)
topology.add_atom_bonded_to(aprev.id, ai)
aprev = ai
# Rebuild the topology angle and dihedral lists
topology.rebuild()
class Homopolymer(Multiblock):
"""
Species representing a linear homopolymer containing a chain of identical beads.
"""
def __init__(self, id, mon, N, **kwargs):
super().__init__(id, [mon], [N], **kwargs)
class Diblock(Multiblock):
"""
Species representing a diblock copolymer containing two blocks with different monomer types.
"""
def __init__(self, id, amon, bmon, na, nb, **kwargs):
super().__init__(id, [amon, bmon], [na, nb], **kwargs)
|
[
"numpy.sum",
"numpy.random.randn",
"numpy.cumsum",
"numpy.append",
"numpy.array",
"numpy.linalg.norm",
"numpy.unique"
] |
[((2738, 2756), 'numpy.sum', 'np.sum', (['block_lens'], {}), '(block_lens)\n', (2744, 2756), True, 'import numpy as np\n'), ((2874, 2894), 'numpy.array', 'np.array', (['block_lens'], {}), '(block_lens)\n', (2882, 2894), True, 'import numpy as np\n'), ((2920, 2960), 'numpy.array', 'np.array', (['[mon.id for mon in block_mons]'], {}), '([mon.id for mon in block_mons])\n', (2928, 2960), True, 'import numpy as np\n'), ((2987, 3008), 'numpy.cumsum', 'np.cumsum', (['block_lens'], {}), '(block_lens)\n', (2996, 3008), True, 'import numpy as np\n'), ((3037, 3073), 'numpy.append', 'np.append', (['[0]', 'self.block_ends[:-1]'], {}), '([0], self.block_ends[:-1])\n', (3046, 3073), True, 'import numpy as np\n'), ((2698, 2719), 'numpy.unique', 'np.unique', (['block_mons'], {}), '(block_mons)\n', (2707, 2719), True, 'import numpy as np\n'), ((4748, 4766), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (4763, 4766), True, 'import numpy as np\n'), ((4800, 4821), 'numpy.linalg.norm', 'np.linalg.norm', (['delta'], {}), '(delta)\n', (4814, 4821), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Compute an optimal storage control policy
for a PV plant
supposing a perfect knowledge of the future inputs.
<NAME> — November 2013
"""
from __future__ import division, print_function, unicode_literals
import sys
from datetime import datetime
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# Tweak how images are plotted with imshow
mpl.rcParams['image.interpolation'] = 'none' # no interpolation
mpl.rcParams['image.origin'] = 'lower' # origin at lower left corner
mpl.rcParams['image.aspect'] = 'auto'
try:
from stodynprog import SysDescription, DPSolver
except ImportError:
sys.path.append('../..')
from stodynprog import SysDescription, DPSolver
# Read the PV production data:
t,P_prod_data = np.loadtxt('pv_prod.csv', skiprows=1, delimiter=',').T
## Storage dynamics description
dt = t[1]-t[0]
# Storage rated energy and power:
E_rated = 2 # [MWh]
P_rated = 1 # [MW]
print('Storage ratings: {:.2f} MW / {:.2f} MWh'.format(P_rated, E_rated))
# storage loss factor
a = 0.0
print(' storage loss factor: {:.1%}'.format(a))
T_horiz = len(P_prod_data)
def dyn_sto(k, E_sto, P_sto):
'''state transition of the "deterministic storage" system
State variables:
* E_sto
Control:
* P_sto
'''
# Stored energy:
E_sto_n = E_sto + (P_sto - a*abs(P_sto))*dt
return (E_sto_n, )
def admissible_controls(k, E_sto):
'''set of admissible control U(x_k) of an Energy storage
Controls is the stored power P_sto
Contrainsts of the Energy Storage are:
1) Energy stock boundaries : 0 ≤ E(k + 1) ≤ E_rated
2) Power limitation : -P_rated ≤ P_sto ≤ P_rated
'''
# 1) Constraints on P_sto:
P_neg = np.max(( -E_sto/(1+a)/dt, -P_rated))
P_pos = np.min(( (E_rated - E_sto)/(1-a)/dt, P_rated))
U1 = (P_neg, P_pos)
return (U1, )
def cost_model(k, E_sto, P_sto):
'''penalty on the power injected to the grid
P_grid = P_prod - P_sto
'''
P_prod = P_prod_data[k]
P_grid = P_prod - P_sto
P_grid_over = np.where(P_grid > 0.4, P_grid-0.4, 0)
P_grid_neg = np.where(P_grid < 0, P_grid, 0)
penal = P_grid_over**2 + P_grid_neg**2 + 0*P_sto**2
return penal
### Create the system description:
sto_sys = SysDescription((1,1,0), name='Deterministic Storage for PV', stationnary=False)
sto_sys.dyn = dyn_sto
sto_sys.control_box = admissible_controls
sto_sys.cost = cost_model
sto_sys.print_summary()
### Create the DP solver:
dpsolv = DPSolver(sto_sys)
# discretize the state space
N_E = 50
E_grid = dpsolv.discretize_state(0, E_rated, N_E)[0]
dpsolv.control_steps=(.001,)
dpsolv.print_summary()
J_fin = np.zeros(N_E)
J, pol = dpsolv.bellman_recursion(T_horiz, J_fin)
pol_sto = pol[..., 0]
### Plot the policy
plt.figure('policy')
plt.subplot(111, title='$P_{grid}(t,E)$ policy', xlabel='time', ylabel='$E_{sto}$')
plt.imshow(P_prod_data - pol_sto.T, extent=(0, (T_horiz-1)/24, 0, 2))
#plt.plot(t/24., E[:-1], 'k-', alpha=0.5, label='$E_{sto}$')
#plt.plot(t/24., P_prod_data+E_rated, label='$P_{prod}$')
plt.colorbar()
#### Simulation: ####
N_sim = T_horiz
# Time vector
k_range = np.arange(N_sim)
k_range_x= np.arange(N_sim+1)
# State variables
E = np.zeros(N_sim+1)
E[0] = E_rated/2
# Control variables
P_sto = np.zeros(N_sim)
# Simulation loop:
for k in k_range:
# Control computation:
P_sto_law = dpsolv.interp_on_state(pol_sto[k])
P_sto[k] = P_sto_law(E[k])
# State evolution:
E[k+1], = sto_sys.dyn(k, E[k], P_sto[k])
# Compute state variables derivatives:
E_full = np.ma.array(E, mask = (E<E_rated*0.9999))
E_empty = np.ma.array(E, mask = (E>E_rated*0.0001))
# Deviation from commitment:
P_grid = P_prod_data - P_sto
P_grid_l2 = np.sqrt(np.mean(P_grid**2))
print('RMS deviation: {:.4f}'.format(P_grid_l2))
### Plot:
fig, ax = plt.subplots(2,1, sharex=True)
ax[0].set_title('Power flows of the PV-storage system')
ax[0].plot(k_range, P_prod_data, 'b-', label='$P_{prod}$')
ax[0].plot(k_range, P_sto, 'c-', label='$P_{sto}$')
ax[0].plot(k_range, P_grid, 'r-', label='$P_{grid}$')
ax[0].legend()
ax[1].set_title('Stored energy')
ax[1].plot(k_range_x, E, 'b-', label='$E_{sto}$')
ax[1].plot(k_range_x, E_full, 'D-', color='red', label='full')
ax[1].plot(k_range_x, E_empty, 'D-', color='orange', label='empty')
plt.show()
|
[
"sys.path.append",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"stodynprog.SysDescription",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.ma.array",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.arange",
"numpy.loadtxt",
"numpy.min",
"numpy.where",
"numpy.mean",
"stodynprog.DPSolver"
] |
[((2285, 2371), 'stodynprog.SysDescription', 'SysDescription', (['(1, 1, 0)'], {'name': '"""Deterministic Storage for PV"""', 'stationnary': '(False)'}), "((1, 1, 0), name='Deterministic Storage for PV', stationnary=\n False)\n", (2299, 2371), False, 'from stodynprog import SysDescription, DPSolver\n'), ((2516, 2533), 'stodynprog.DPSolver', 'DPSolver', (['sto_sys'], {}), '(sto_sys)\n', (2524, 2533), False, 'from stodynprog import SysDescription, DPSolver\n'), ((2688, 2701), 'numpy.zeros', 'np.zeros', (['N_E'], {}), '(N_E)\n', (2696, 2701), True, 'import numpy as np\n'), ((2797, 2817), 'matplotlib.pyplot.figure', 'plt.figure', (['"""policy"""'], {}), "('policy')\n", (2807, 2817), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2906), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'title': '"""$P_{grid}(t,E)$ policy"""', 'xlabel': '"""time"""', 'ylabel': '"""$E_{sto}$"""'}), "(111, title='$P_{grid}(t,E)$ policy', xlabel='time', ylabel=\n '$E_{sto}$')\n", (2829, 2906), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2975), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(P_prod_data - pol_sto.T)'], {'extent': '(0, (T_horiz - 1) / 24, 0, 2)'}), '(P_prod_data - pol_sto.T, extent=(0, (T_horiz - 1) / 24, 0, 2))\n', (2912, 2975), True, 'import matplotlib.pyplot as plt\n'), ((3092, 3106), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3104, 3106), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3189), 'numpy.arange', 'np.arange', (['N_sim'], {}), '(N_sim)\n', (3182, 3189), True, 'import numpy as np\n'), ((3201, 3221), 'numpy.arange', 'np.arange', (['(N_sim + 1)'], {}), '(N_sim + 1)\n', (3210, 3221), True, 'import numpy as np\n'), ((3242, 3261), 'numpy.zeros', 'np.zeros', (['(N_sim + 1)'], {}), '(N_sim + 1)\n', (3250, 3261), True, 'import numpy as np\n'), ((3306, 3321), 'numpy.zeros', 'np.zeros', (['N_sim'], {}), '(N_sim)\n', (3314, 3321), True, 'import numpy as np\n'), ((3587, 3628), 'numpy.ma.array', 'np.ma.array', (['E'], {'mask': '(E < E_rated * 0.9999)'}), '(E, mask=E < E_rated * 0.9999)\n', (3598, 3628), True, 'import numpy as np\n'), ((3640, 3681), 'numpy.ma.array', 'np.ma.array', (['E'], {'mask': '(E > E_rated * 0.0001)'}), '(E, mask=E > E_rated * 0.0001)\n', (3651, 3681), True, 'import numpy as np\n'), ((3852, 3883), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (3864, 3883), True, 'import matplotlib.pyplot as plt\n'), ((4340, 4350), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4348, 4350), True, 'import matplotlib.pyplot as plt\n'), ((793, 845), 'numpy.loadtxt', 'np.loadtxt', (['"""pv_prod.csv"""'], {'skiprows': '(1)', 'delimiter': '""","""'}), "('pv_prod.csv', skiprows=1, delimiter=',')\n", (803, 845), True, 'import numpy as np\n'), ((1748, 1789), 'numpy.max', 'np.max', (['(-E_sto / (1 + a) / dt, -P_rated)'], {}), '((-E_sto / (1 + a) / dt, -P_rated))\n', (1754, 1789), True, 'import numpy as np\n'), ((1797, 1848), 'numpy.min', 'np.min', (['((E_rated - E_sto) / (1 - a) / dt, P_rated)'], {}), '(((E_rated - E_sto) / (1 - a) / dt, P_rated))\n', (1803, 1848), True, 'import numpy as np\n'), ((2079, 2118), 'numpy.where', 'np.where', (['(P_grid > 0.4)', '(P_grid - 0.4)', '(0)'], {}), '(P_grid > 0.4, P_grid - 0.4, 0)\n', (2087, 2118), True, 'import numpy as np\n'), ((2134, 2165), 'numpy.where', 'np.where', (['(P_grid < 0)', 'P_grid', '(0)'], {}), '(P_grid < 0, P_grid, 0)\n', (2142, 2165), True, 'import numpy as np\n'), ((3762, 3782), 'numpy.mean', 'np.mean', (['(P_grid ** 2)'], {}), '(P_grid ** 2)\n', (3769, 3782), True, 'import numpy as np\n'), ((666, 690), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (681, 690), False, 'import sys\n')]
|
import dgl
import mxnet as mx
import numpy as np
import logging, time
from operator import attrgetter, itemgetter
from mxnet import nd, gluon
from mxnet.gluon import nn
from dgl.utils import toindex
from dgl.nn.mxnet import GraphConv
from gluoncv.model_zoo import get_model
from gluoncv.data.batchify import Pad
def iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA) * max(0, yB - yA)
if interArea < 1e-7 :
return 0
boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
if boxAArea + boxBArea - interArea < 1e-7:
return 0
iou_val = interArea / float(boxAArea + boxBArea - interArea)
return iou_val
def object_iou_thresh(gt_object, pred_object, iou_thresh=0.5):
obj_iou = iou(gt_object[1:5], pred_object[1:5])
if obj_iou >= iou_thresh:
return True
return False
def triplet_iou_thresh(pred_triplet, gt_triplet, iou_thresh=0.5):
sub_iou = iou(gt_triplet[5:9], pred_triplet[5:9])
if sub_iou >= iou_thresh:
ob_iou = iou(gt_triplet[9:13], pred_triplet[9:13])
if ob_iou >= iou_thresh:
return True
return False
@mx.metric.register
@mx.metric.alias('auc')
class AUCMetric(mx.metric.EvalMetric):
def __init__(self, name='auc', eps=1e-12):
super(AUCMetric, self).__init__(name)
self.eps = eps
def update(self, labels, preds):
mx.metric.check_label_shapes(labels, preds)
label_weight = labels[0].asnumpy()
preds = preds[0].asnumpy()
tmp = []
for i in range(preds.shape[0]):
tmp.append((label_weight[i], preds[i][1]))
tmp = sorted(tmp, key=itemgetter(1), reverse=True)
label_sum = label_weight.sum()
if label_sum == 0 or label_sum == label_weight.size:
return
label_one_num = np.count_nonzero(label_weight)
label_zero_num = len(label_weight) - label_one_num
total_area = label_zero_num * label_one_num
height = 0
width = 0
area = 0
for a, _ in tmp:
if a == 1.0:
height += 1.0
else:
width += 1.0
area += height
self.sum_metric += area / total_area
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('predcls')
class PredCls(mx.metric.EvalMetric):
'''Metric with ground truth object location and label'''
def __init__(self, topk=20, iou_thresh=0.99):
super(PredCls, self).__init__('predcls@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
if labels is None or preds is None:
self.num_inst += 1
return
preds = preds[preds[:,0].argsort()[::-1]]
m = min(self.topk, preds.shape[0])
count = 0
gt_edge_num = labels.shape[0]
label_matched = [False for label in labels]
for i in range(m):
pred = preds[i]
for j in range(gt_edge_num):
if label_matched[j]:
continue
label = labels[j]
if int(label[2]) == int(pred[2]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
label_matched[j] = True
total = labels.shape[0]
self.sum_metric += count / total
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('phrcls')
class PhrCls(mx.metric.EvalMetric):
'''Metric with ground truth object location and predicted object label from detector'''
def __init__(self, topk=20, iou_thresh=0.99):
super(PhrCls, self).__init__('phrcls@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
if labels is None or preds is None:
self.num_inst += 1
return
preds = preds[preds[:,1].argsort()[::-1]]
m = min(self.topk, preds.shape[0])
count = 0
gt_edge_num = labels.shape[0]
label_matched = [False for label in labels]
for i in range(m):
pred = preds[i]
for j in range(gt_edge_num):
if label_matched[j]:
continue
label = labels[j]
if int(label[2]) == int(pred[2]) and \
int(label[3]) == int(pred[3]) and \
int(label[4]) == int(pred[4]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
label_matched[j] = True
total = labels.shape[0]
self.sum_metric += count / total
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('sgdet')
class SGDet(mx.metric.EvalMetric):
'''Metric with predicted object information by the detector'''
def __init__(self, topk=20, iou_thresh=0.5):
super(SGDet, self).__init__('sgdet@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
if labels is None or preds is None:
self.num_inst += 1
return
preds = preds[preds[:,1].argsort()[::-1]]
m = min(self.topk, len(preds))
count = 0
gt_edge_num = labels.shape[0]
label_matched = [False for label in labels]
for i in range(m):
pred = preds[i]
for j in range(gt_edge_num):
if label_matched[j]:
continue
label = labels[j]
if int(label[2]) == int(pred[2]) and \
int(label[3]) == int(pred[3]) and \
int(label[4]) == int(pred[4]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
label_matched[j] =True
total = labels.shape[0]
self.sum_metric += count / total
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('sgdet+')
class SGDetPlus(mx.metric.EvalMetric):
'''Metric proposed by `Graph R-CNN for Scene Graph Generation`'''
def __init__(self, topk=20, iou_thresh=0.5):
super(SGDetPlus, self).__init__('sgdet+@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
label_objects, label_triplets = labels
pred_objects, pred_triplets = preds
if label_objects is None or pred_objects is None:
self.num_inst += 1
return
count = 0
# count objects
object_matched = [False for obj in label_objects]
m = len(pred_objects)
gt_obj_num = label_objects.shape[0]
for i in range(m):
pred = pred_objects[i]
for j in range(gt_obj_num):
if object_matched[j]:
continue
label = label_objects[j]
if int(label[0]) == int(pred[0]) and \
object_iou_thresh(pred, label, self.iou_thresh):
count += 1
object_matched[j] = True
# count predicate and triplet
pred_triplets = pred_triplets[pred_triplets[:,1].argsort()[::-1]]
m = min(self.topk, len(pred_triplets))
gt_triplet_num = label_triplets.shape[0]
triplet_matched = [False for label in label_triplets]
predicate_matched = [False for label in label_triplets]
for i in range(m):
pred = pred_triplets[i]
for j in range(gt_triplet_num):
label = label_triplets[j]
if not predicate_matched:
if int(label[2]) == int(pred[2]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += label[3]
predicate_matched[j] = True
if not triplet_matched[j]:
if int(label[2]) == int(pred[2]) and \
int(label[3]) == int(pred[3]) and \
int(label[4]) == int(pred[4]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
triplet_matched[j] = True
# compute sum
total = labels.shape[0]
N = gt_obj_num + 2 * total
self.sum_metric += count / N
self.num_inst += 1
def extract_gt(g, img_size):
'''extract prediction from ground truth graph'''
if g is None or g.number_of_nodes() == 0:
return None, None
gt_eids = np.where(g.edata['rel_class'].asnumpy() > 0)[0]
if len(gt_eids) == 0:
return None, None
gt_class = g.ndata['node_class'][:,0].asnumpy()
gt_bbox = g.ndata['bbox'].asnumpy()
gt_bbox[:, 0] /= img_size[1]
gt_bbox[:, 1] /= img_size[0]
gt_bbox[:, 2] /= img_size[1]
gt_bbox[:, 3] /= img_size[0]
gt_objects = np.vstack([gt_class, gt_bbox.transpose(1, 0)]).transpose(1, 0)
gt_node_ids = g.find_edges(gt_eids)
gt_node_sub = gt_node_ids[0].asnumpy()
gt_node_ob = gt_node_ids[1].asnumpy()
gt_rel_class = g.edata['rel_class'][gt_eids,0].asnumpy() - 1
gt_sub_class = gt_class[gt_node_sub]
gt_ob_class = gt_class[gt_node_ob]
gt_sub_bbox = gt_bbox[gt_node_sub]
gt_ob_bbox = gt_bbox[gt_node_ob]
n = len(gt_eids)
gt_triplets = np.vstack([np.ones(n), np.ones(n),
gt_rel_class, gt_sub_class, gt_ob_class,
gt_sub_bbox.transpose(1, 0),
gt_ob_bbox.transpose(1, 0)]).transpose(1, 0)
return gt_objects, gt_triplets
def extract_pred(g, topk=100, joint_preds=False):
'''extract prediction from prediction graph for validation and visualization'''
if g is None or g.number_of_nodes() == 0:
return None, None
pred_class = g.ndata['node_class_pred'].asnumpy()
pred_class_prob = g.ndata['node_class_logit'].asnumpy()
pred_bbox = g.ndata['pred_bbox'][:,0:4].asnumpy()
pred_objects = np.vstack([pred_class, pred_bbox.transpose(1, 0)]).transpose(1, 0)
score_pred = g.edata['score_pred'].asnumpy()
score_phr = g.edata['score_phr'].asnumpy()
score_pred_topk_eids = (-score_pred).argsort()[0:topk].tolist()
score_phr_topk_eids = (-score_phr).argsort()[0:topk].tolist()
topk_eids = sorted(list(set(score_pred_topk_eids + score_phr_topk_eids)))
pred_rel_prob = g.edata['preds'][topk_eids].asnumpy()
if joint_preds:
pred_rel_class = pred_rel_prob[:,1:].argmax(axis=1)
else:
pred_rel_class = pred_rel_prob.argmax(axis=1)
pred_node_ids = g.find_edges(topk_eids)
pred_node_sub = pred_node_ids[0].asnumpy()
pred_node_ob = pred_node_ids[1].asnumpy()
pred_sub_class = pred_class[pred_node_sub]
pred_sub_class_prob = pred_class_prob[pred_node_sub]
pred_sub_bbox = pred_bbox[pred_node_sub]
pred_ob_class = pred_class[pred_node_ob]
pred_ob_class_prob = pred_class_prob[pred_node_ob]
pred_ob_bbox = pred_bbox[pred_node_ob]
pred_triplets = np.vstack([score_pred[topk_eids], score_phr[topk_eids],
pred_rel_class, pred_sub_class, pred_ob_class,
pred_sub_bbox.transpose(1, 0),
pred_ob_bbox.transpose(1, 0)]).transpose(1, 0)
return pred_objects, pred_triplets
|
[
"numpy.count_nonzero",
"mxnet.metric.check_label_shapes",
"mxnet.metric.alias",
"numpy.ones",
"operator.itemgetter"
] |
[((1375, 1397), 'mxnet.metric.alias', 'mx.metric.alias', (['"""auc"""'], {}), "('auc')\n", (1390, 1397), True, 'import mxnet as mx\n'), ((2485, 2511), 'mxnet.metric.alias', 'mx.metric.alias', (['"""predcls"""'], {}), "('predcls')\n", (2500, 2511), True, 'import mxnet as mx\n'), ((3632, 3657), 'mxnet.metric.alias', 'mx.metric.alias', (['"""phrcls"""'], {}), "('phrcls')\n", (3647, 3657), True, 'import mxnet as mx\n'), ((4915, 4939), 'mxnet.metric.alias', 'mx.metric.alias', (['"""sgdet"""'], {}), "('sgdet')\n", (4930, 4939), True, 'import mxnet as mx\n'), ((6163, 6188), 'mxnet.metric.alias', 'mx.metric.alias', (['"""sgdet+"""'], {}), "('sgdet+')\n", (6178, 6188), True, 'import mxnet as mx\n'), ((1599, 1642), 'mxnet.metric.check_label_shapes', 'mx.metric.check_label_shapes', (['labels', 'preds'], {}), '(labels, preds)\n', (1627, 1642), True, 'import mxnet as mx\n'), ((2036, 2066), 'numpy.count_nonzero', 'np.count_nonzero', (['label_weight'], {}), '(label_weight)\n', (2052, 2066), True, 'import numpy as np\n'), ((1863, 1876), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1873, 1876), False, 'from operator import attrgetter, itemgetter\n'), ((9558, 9568), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9565, 9568), True, 'import numpy as np\n'), ((9570, 9580), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9577, 9580), True, 'import numpy as np\n')]
|
import numpy as np
pts = np.load('point_cloud.npy')
np.savetxt('point_cloud_manual_map.txt', pts)
|
[
"numpy.load",
"numpy.savetxt"
] |
[((27, 53), 'numpy.load', 'np.load', (['"""point_cloud.npy"""'], {}), "('point_cloud.npy')\n", (34, 53), True, 'import numpy as np\n'), ((54, 99), 'numpy.savetxt', 'np.savetxt', (['"""point_cloud_manual_map.txt"""', 'pts'], {}), "('point_cloud_manual_map.txt', pts)\n", (64, 99), True, 'import numpy as np\n')]
|
# Question 1
import pandas as pd
def read_url_and_create_csv(url_to_read):
data = pd.read_csv(url_to_read)
return data
# Question 2
import numpy as np
def same_type(data_list):
is_same_type = True
first_item = data_list[0]
for item in data_list[1:]:
if type(first_item) == type(item) or np.isnan(item):
pass
else:
is_same_type = False
return is_same_type
def test_create_dataframe(pd_df, col_list):
result = True
df_col_list = pd_df.columns.tolist()
if pd_df[df_col_list[0]].count() + 1 >= 10:
for df_col in df_col_list:
if df_col in col_list:
if same_type(pd_df[df_col].tolist()):
pass
else:
result = False
else:
result = False
for col in col_list:
if col in df_col_list:
pass
else:
result = False
else:
result = False
return result
|
[
"pandas.read_csv",
"numpy.isnan"
] |
[((87, 111), 'pandas.read_csv', 'pd.read_csv', (['url_to_read'], {}), '(url_to_read)\n', (98, 111), True, 'import pandas as pd\n'), ((318, 332), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (326, 332), True, 'import numpy as np\n')]
|
"""
Library to produce specific plots using the nerscsplot library.
"""
from commonLib.nerscPlot import (paintHistogramMulti, paintBoxPlotGeneral,
paintBarsHistogram)
import getopt
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import sys
from array import array
from numpy import ndarray, arange, asarray
from stats.trace import ResultTrace
from orchestration.definition import ExperimentDefinition
from stats import NumericStats
from matplotlib.cbook import unique
def get_args(default_trace_id=1, lim=False):
try:
opts, args = getopt.getopt(sys.argv[1:],"i:ln",
["id=", "lim", "nolim"])
except getopt.GetoptError:
print('test.py [-i <trace_id>] [-l]')
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--id"):
default_trace_id=int(arg)
elif opt in ("-l", "--lim"):
lim=True
elif opt in ("-n", "--nolim"):
lim=False
return default_trace_id, lim
def profile(data, name, file_name, x_axis_label, x_log_scale=False):
"""
Produces a png with a histogram and CDF of the numeric values in data.
Args:
- data: list of values to analyze.
- name: string with the title to write on the figure.
- file_name: string with the file system route where to place the out file.
No need to include an extension, png will be appended.
- x_axis_label: Label to ilsutrate the x_axis of the histogram.
- x_log_scale: if True, x axis uses log scale.
"""
data_dic = {name:data}
paintHistogramMulti(name, data_dic, bins=100,
graphFileName=file_name,
labelX=x_axis_label,
xLogScale=x_log_scale,
labelY="Number Of Jobs")
def profile_compare(log_data, trace_data, name, file_name, x_axis_label,
x_log_scale=False, filterCut=0):
"""
Produces a histogram and boxplot comparing two series of values that
represent a random variable of the original workload data and the same
random variable of the derived synthetic workload.
Args:
- log_data: list of values of a random variable of a real workload.
- trace_data: list of values of a random variable of a synthetic workload.
- name: string with the title to write on the figure.
- file_name: string with the file system route where to place the out file.
No need to include an extension, png will be appended. It will be
used for th histogram file, the boxplot file will have the same name
with "-boxplot" added.
- x_axis_label: Label to ilsutrate the x_axis of the histogram.
- x_log_scale: if True, x axis uses log scale.
- filterCut: if set to 0, it has no effect. Otherwise, filgerCut will be
the upper bound of values shown in the xaxis of the histogram.
"""
data_dic = {"original jobs":log_data,
"synthetic jobs":trace_data}
if x_log_scale:
paintHistogramMulti(name, data_dic, bins=100,
graphFileName=file_name,
labelX=x_axis_label,
xLogScale=x_log_scale,
labelY="Number Of Jobs",
xLim=filterCut)
else:
paintHistogramMulti(name, data_dic, bins=100,
graphFileName=file_name,
labelX=x_axis_label,
xLogScale=x_log_scale,
labelY="Number Of Jobs",
filterCut=filterCut)
paintBoxPlotGeneral(name, data_dic, labelY=x_axis_label,
yLogScale=True,
graphFileName=file_name+"-boxplot")
def histogram_cdf(edges, hist, name, file_name, x_axis_label,
y_axis_label, target_folder="",
hists_order=None,
do_cdf=False,
x_log_scale=False,
y_log_scale=False,
cdf_y_log_scale=False,
min_max=None,
cdf_min_max=None):
"""Plots histograms using their edges and bins content and stores the
resulting bitmap it in a png file. All histograms must have the same edges.
It is also capable of plotting the CDF of the histograms.
Args:
- edges: list of numbers representing the edges of all the histograms.
- hist: list of bin values or dictionary of lists. Each element in the
dictionary represents a histogram. Its key is its name, its value
is a list of ordered numbers, representing the number of elements in
each bin (or share).
- name: string with the name of the plot. Will be used for the title of the
plot.
- file_name: file system route pointing to the file to save the graph to.
".png" will be appended to the name.
- x_axis_label: string with the label for x-axis.
- y_axis_label: string with the label for y-axis.
- target_folder: string with a file-system folder route where output file
should be stored.
- hists_order: list of stings with the names series present in
histograms_value_dictionary. Series will be plotted in the order
hist in this list.
- do_cdf: if True, the cumulative distribution function will be plotted for
each histogram.
- x_log_scale: if True, x-axis will use log scale.
- y_log_scale: if True, y-axis for the histograms will use log scale.
- cdf_y_log_scale: if True, the y-axis for the CDF lines will use log scale.
- min_max: If set to a two element tuple of the shape (None, None),
(x1,None), (None, x2), (x1, x2). If first Element is not None it will
be used as the minimum value in the y axis for the histograms. If
second is not None, it will be used as the maximum value.
- cdf_min_max: If set to a two element tuple of the shape (None, None),
(x1,None), (None, x2), (x1, x2). If first Element is not None it will
be used as the minimum value in the y axis for the CDFs. If
second is not None, it will be used as the maximum value.
"""
if type(hist) is not dict:
hist = {"":hist}
if hists_order is not None:
if set(hists_order)!=set(hist.keys()):
raise ValueError("hists_order list of keys must have the same keys"
" as hist dictionary")
else:
hists_order=sorted(hist.keys())
paintBarsHistogram(name, hists_order, edges, hist,
target_folder=target_folder,
file_name=file_name,
labelX=x_axis_label, labelY=y_axis_label,
cdf=do_cdf,
x_log_scale=x_log_scale,
y_log_scale=y_log_scale,
cdf_y_log_scale=cdf_y_log_scale,
min_max=min_max,
cdf_min_max=cdf_min_max)
def create_legend(ax,legend):
"""Creates a legend for a plot. It is placed over the top of the axis, in
a wide distribution.
Args:
- ax: matplotlib axis on which the legend is placed.
- legend: list of pairs ("series name", "color name) to be used to
construct the legend. List order matches order of on-screen legend,
"""
handles=[]
for key in legend:
hatch=None
if len(key)>3:
hatch=key[3]
handles.append(mpatches.Patch(facecolor=key[1], label=key[0],
edgecolor="black",
hatch=hatch))
bbox = ax.get_window_extent()
correct=0
if bbox.height<100:
correct=0.1
ax.legend(handles=handles, fontsize=10,
bbox_to_anchor=(0.0, 1.00-correct, 1., 0.00), loc=3,
ncol=len(legend), mode="expand", borderaxespad=0.0,
frameon=False)
def do_list_like(the_list, ref_list, force=False):
""" if the_list is not a list of lists, it returns a list with n copies
of the_list, where n is len(ref_list)."""
if force or (the_list is not None and not type(the_list[0]) is list):
return [the_list for x in range(len(ref_list))]
else:
return the_list
def join_rows(row_list_1, row_list_2):
"""Returns a list of list, in which element is the concatenation of the
elements in the same position of row_list1 and row_list."""
if not row_list_1:
return row_list_2
if not row_list_2:
return row_list_1
row_list = []
for (row1, row2) in zip(row_list_1, row_list_2):
row_list.append(row1+row2)
return row_list
def calculate_diffs(result_list, base_index=0, group_count=3, percent=True,
groups=None, speedup=False, field="median"):
""" Calculate the absolute or relative arithmetic distance between groups
of values. Each list in result list is sliced in ordered groups of results
of size group_count. In each group difference is calculated between the
element in position base_index in the group and the rest.
Args:
- result_list: list of lists of NumericStats objects.
- base_index: position of the reference resul in each result group.
- group_count: number of elements in each group.
- percent: if True the distance calculated is relative,
if False is absolute.
- groups: list of numbers. It overrides group_count. It contains a list
of the sizes of the groups in the result_list. e.g. [2, 3], means that
the first group has two elements, and the second three elements.
- speedup: if True, the relative distance is calculated using the non
base_index element as the base of the comparison.
"""
diffs =[]
if groups:
for row in result_list:
index=0
diffs_row = []
diffs.append(diffs_row)
for group in groups:
base_res=row[index+base_index]
base_median=base_res._get(field)
for j in range(group):
res_median=row[index+j]._get(field)
if speedup:
if base_median==0:
diff_value=0
else:
diff_value=res_median/base_median
else:
diff_value=res_median-base_median
if percent and base_res!=0:
if base_median==0:
base_median=1
diff_value=float(diff_value)/float(base_median)
if j!=base_index:
diffs_row.append(diff_value)
index+=group
else:
for row in result_list:
diffs_row = []
diffs.append(diffs_row)
for i in range(0, len(row), group_count):
base_res=row[i+base_index]
base_median=base_res._get(field)
for j in range(group_count):
res_median=row[i+j]._get(field)
diff_value=res_median-base_median
if speedup:
if base_median==0:
diff_value=0
else:
diff_value=res_median/base_median
else:
if percent and base_res!=0:
if diff_value!=0:
if base_median==0:
base_median=1
diff_value=float(diff_value)/float(base_median)
if j!=base_index:
diffs_row.append(diff_value)
return diffs
def adjust_number_ticks(ax, tick_count, log_scale=False, extra=None):
""" Adjusts the y-axis of ax to show only tick_count labels."""
my_t=ax.get_yticks()
y_lim = (float(str(my_t[0])), float(str(my_t[-1])))
print("INTERNAL", y_lim)
step = float(max(y_lim)-min(y_lim))/(tick_count-1)
step=float(str(step))
upper_limit=float(str(max(y_lim)+step))
lower_limit=float(str(min(y_lim)))
ticks = arange(lower_limit, upper_limit,step)
if extra is not None:
ticks=sorted(list(ticks)+[float(extra)])
if log_scale:
ticks=_log_down(ticks)
ax.set_yticks(ticks)
def _log_down(num):
from numpy import log10, power, floor
power_l = log10(num)
return power(10, sorted(list(set(floor(power_l)))))
def remove_ids(list_of_rows, group_size=3, list_of_pos_to_remove=[0]):
new_list_of_rows=[]
for row in list_of_rows:
new_row=[]
new_list_of_rows.append(new_row)
index=0
for (index, elem) in zip(list(range(len(row))), row):
if not index%group_size in list_of_pos_to_remove:
new_row.append(elem)
return new_list_of_rows
def replace(trace_id_rows, original, replacement):
new_trace_id_rows=[]
for row in trace_id_rows:
new_row=[]
new_trace_id_rows.append(new_row)
for item in row:
if item in original:
index=original.index(item)
new_row.append(replacement[index])
else:
new_row.append(item)
return new_trace_id_rows
def gen_trace_ids_exps(base_id, base_exp=None, group_size=3, group_count=5,
block_count=6, group_jump=18,inverse=False,
base_exp_group=None,skip=0):
""" Generates the list of trace_ids to load and plot. Returns a list of
lists of trace_id.
Args:
- base_id: first trace_id in the first list of lists.
- base_exp: if set, base_exp is added at the beginning. of each list.
- group_size: size of the group of experiments.
- group_count: number of groups per block of experiments.
- block_count: number of lists in the returned list of lists.
- group_jump: number trace_ids to jump from one group to the next.
- inverse: if True, the first group is at the end of each row list.
- base_ex_group: if set, base_ex_group is added at the begining of each
group.
- skip: number of trace_ids to jump between blocks,
Returns:
a list of block_count lists. Each list may be started by base_exp if
set. Each list is composed by group_count groups of group_size size.
If base_exp_group, is set, it is added to each group.
"""
trace_id_rows_colors = []
for block_i in range(block_count):
trace_id_row = []
trace_id_rows_colors.append(trace_id_row)
if base_exp is not None:
trace_id_row.append(base_exp)
group_index_list=list(range(group_count))
if inverse:
group_index_list=reversed(group_index_list)
for group_i in group_index_list:
if base_exp_group is not None:
trace_id_row.append(base_exp_group)
for exp_i in range(group_size):
trace_id=(base_id
+ (group_size+skip)*block_i
+ (group_jump)*group_i
+ exp_i)
trace_id_row.append(trace_id)
return trace_id_rows_colors
def plot_multi_exp_boxplot(name, file_name, title,
exp_rows,
y_axis_labels,
x_axis_labels,
y_axis_general_label=None,
grouping=None,
colors=None,
hatches=None,
aspect_ratio=None,
y_limits=None,
y_log_scale=False,
legend=None,
percent_diff=False,
base_diff=0,
group_count=3,
grouping_alt=None,
precalc_diffs=None,
y_tick_count=None,
y_tick_count_alt=None,
y_axis_label_alt=None):
"""Plots a matrix of comboboxes os series that are defined by two variables
with multiple values. Series in the same row share the same value for the
first variable. Series in the same column share the saame value for the
second.
Args:
- name: Matplot lib name string.
- file_name: string containing file system route pointing to a file where
the plot will be stored.
- exp_rows: input data to be used to produce the plot expressed as a list of
lists of NumericStats objects. the results are plotted as rows of,
results, each row a list item contained in the first list. All rows
must contain the same number of NumericStats objects.
- x_axis_label: list of strings to label each "column" of combo boxes.
Must have the same dimension as the number of columns in exp_rows.
Printed at the bottom of the plot.
- y_axis_lables: list of strings to label each "row" of combo boxes in
the plot.
- grouping: list of numbers that control how the layout wihtin rows. Each
number in the list indicates how many are back to back, a 0 indicates
an extra space. SUM(grouping) must be equal to the second dimension
of exp__rows. e.g. with a row has 10 elements, [1, 5, 0, 4] will
present: one combo-box, a space, five back-to-back combo-boxes, two
spaces and four combo-boxes back to back.
- colors: list of text colors corresponding to the background filling of
each column of combo-boxes. If list of list, each element correspond
to each its same positioned combox box.
- hatches: list of matplotlib hatches (e.g. '-', '/', '\') corresponding
to each column of combo-boxes. If list of list, each element correspond
to each its same positioned combox box.
- aspect_ratio: if set to a number, it represents the width/height of the
final plot.
- y_limits: if set to a list of integer pairs (min, max), it will apply
each list element as a limit for the series in a row of results.
- y_log_scale: if True all y_scale in all comboxboxes will use logaritmic
scale.
- legend: if set, the plot will contain a legebd based on the list of pairs
("series name", "color name). List order matches order of on-screen
legend, Color names will be mapped on matplor lib colors.
- percent_diff: If True, the figure includes barcharts showing the
difference between one of the values in each group and the rest.
- base_diff: position of the difference reference in each group.
- group_count: Size of the comparison groups.
- grouping_alt: List of comparisons present per group.
- precalc_diffs: list of list of floats. Tt set, the differences are not
generated, and the values are used as differences.
- y_tick_count: If set to a number, it sets the number of labels used in
the left y_axis.
- y_tick_count_alt: If set to a number, it sets the number of labels used in
the right y_axis.,
- y_axis_label_alt: String value used as legend for the right y-axis.
"""
num_rows=len(exp_rows)
fig, axes = plt.subplots(nrows=num_rows, ncols=1)
if not (type(axes) is ndarray):
axes=[axes]
colors=do_list_like(colors, exp_rows)
hatches=do_list_like(hatches, exp_rows)
if percent_diff:
if precalc_diffs is None:
diffs_results = calculate_diffs(exp_rows, base_index=base_diff,
group_count=group_count,
groups=grouping_alt)
else:
diffs_results = precalc_diffs
else:
diffs_results = do_list_like([0], exp_rows)
extra_spacing=0
if percent_diff:
extra_spacing=group_count-1
label_ax=axes[len(axes)/2]
for (ax,results_row,y_axis_label, color_item, hatches_item, diffs) in zip(
axes,
exp_rows,
y_axis_labels,
colors,
hatches,
diffs_results):
median, p25, p75, min_val, max_val = _get_boxplot_data(results_row)
if ax==axes[0]:
ax.set_title(title)
the_labels=None
if ax==axes[-1]:
the_labels=x_axis_labels
if legend:
create_legend(ax,legend)
else:
the_labels=["" for x in results_row]
if y_axis_general_label:
if ax==label_ax:
y_axis_label="{0}\n{1}".format(y_axis_general_label,
y_axis_label)
else:
y_axis_label="{0}".format(y_axis_label)
ax.get_yaxis().set_label_coords(-0.06,0.5)
positions, widths, alt_positions, alt_width = (
_add_precalc_boxplot(ax,median, p25, p75, min_val, max_val,
grouping=grouping,
colors=color_item,
hatches=hatches_item,
labels=the_labels,
y_axis_label=y_axis_label,
y_limits=y_limits,
y_log_scale=y_log_scale,
extra_spacing=extra_spacing))
if grouping and percent_diff:
the_y_label_alt=None
if ax==label_ax:
the_y_label_alt=y_axis_label_alt
""" OOJOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO """
if grouping[0]!=group_count:
color_item=color_item[1:]
hatches_item=hatches_item[1:]
_add_diffs(ax, diffs, alt_positions, alt_width,
colors=_extract_pos(color_item, base_diff,
extra_spacing+1),
hatches=_extract_pos(hatches_item, base_diff,
extra_spacing+1),
y_tick_count=y_tick_count_alt,
y_label=the_y_label_alt)
if y_tick_count:
adjust_number_ticks(ax, y_tick_count, y_log_scale)
if aspect_ratio:
plt.axes().set_aspect(aspect_ratio)
if title:
axes[0].set_title(title)
fig.savefig(file_name, bbox_inches='tight')
def flatten_list(the_list):
""" Takes a lists of lists and puts all their elements in a list."""
return [item for sublist in the_list for item in sublist]
def extract_type(data_list, type_list, select_type):
"""Returns a sublist of data_list. An element es added to the returning list
if the element of the same position in type_list is "select_type".
Args:
- data_list: List of element
- type_list: List of string types. Same size as data_list
- select_list: type of the elements to be returns.
"""
new_data=[]
type_list=flatten_list(type_list)
for (the_data, the_type) in zip(data_list, type_list):
if the_type==select_type:
new_data.append(the_data)
return new_data
def plot_multi_boxplot_bars(name, file_name, title,
exp_rows,
type_rows,
y_axis_labels,
x_axis_labels,
y_axis_general_label=None,
colors=None,
hatches=None,
aspect_ratio=None,
y_limits=None,
y_log_scale=False,
legend=None,
y_tick_count=None,
y_tick_count_alt=None,
y_axis_label_alt=None):
"""
Similar to plot_multi_boxplot, but the it can paint experiments as
boxplot or barchart. The main arguments:
- exp_rows: list of lists of numericStats
- type_rows: list of lists of strings. Each sub list is a group of
results that will be plotted without spaces between. Sublists are
lists of strings signaling which method to plot the result. If
string is "bar", it is a barchart showing the result median, if "box",
it is a boxplot of the result.
"""
num_rows=len(exp_rows)
fig, axes = plt.subplots(nrows=num_rows, ncols=1)
if not (type(axes) is ndarray):
axes=[axes]
colors=do_list_like(colors, exp_rows)
hatches=do_list_like(hatches, exp_rows)
type_rows=do_list_like(type_rows, exp_rows, force=True)
label_ax=axes[len(axes)/2]
for (ax,results_row, type_grouping, y_axis_label, color_item,
hatches_item) in zip(axes,
exp_rows,
type_rows,
y_axis_labels,
colors,
hatches):
if ax==axes[0]:
ax.set_title(title)
if y_axis_general_label:
if ax==label_ax:
y_axis_label="{0}\n{1}".format(y_axis_general_label,
y_axis_label)
else:
y_axis_label="{0}".format(y_axis_label)
ax.get_yaxis().set_label_coords(-0.06,0.5)
boxplot_results=extract_type(results_row, type_grouping, "box")
bar_results=extract_type(results_row, type_grouping, "bar")
positions_dic, widths = _cal_positions_hybrid(type_grouping)
if boxplot_results:
the_labels=None
if ax==axes[-1]:
the_labels=extract_type(x_axis_labels, type_grouping,"box")
if legend:
create_legend(ax,legend)
else:
the_labels=["" for x in boxplot_results]
median, p25, p75, min_val, max_val = _get_boxplot_data(
boxplot_results)
_add_precalc_boxplot(ax,median, p25, p75, min_val, max_val,
x_position=positions_dic["box"],
x_widths=widths,
colors=extract_type(color_item, type_grouping,"box"),
hatches=extract_type(hatches_item, type_grouping,"box"),
labels=the_labels,
y_axis_label=y_axis_label,
y_limits=y_limits,
y_log_scale=y_log_scale)
if bar_results:
the_y_label_alt=None
if ax==label_ax:
the_y_label_alt=y_axis_label_alt
if ax==axes[-1]:
the_labels=extract_type(x_axis_labels, type_grouping,"bar")
else:
the_labels=["" for x in bar_results]
_add_diffs(ax, bar_results, positions_dic["bar"], widths,
colors=extract_type(color_item, type_grouping,"bar"),
hatches=extract_type(hatches_item, type_grouping,"bar"),
y_tick_count=y_tick_count_alt,
y_label=the_y_label_alt,
x_labels=the_labels)
if y_tick_count:
adjust_number_ticks(ax, y_tick_count, y_log_scale)
if aspect_ratio:
plt.axes().set_aspect(aspect_ratio)
if title:
axes[0].set_title(title)
fig.savefig(file_name, bbox_inches='tight')
def plot_multi_bars(name, file_name, title,
exp_rows,
type_rows,
y_axis_labels,
x_axis_labels,
y_axis_general_label=None,
colors=None,
hatches=None,
aspect_ratio=None,
y_limits=None,
y_log_scale=False,
legend=None,
y_tick_count=None,
y_tick_count_alt=None,
y_axis_label_alt=None,
ncols=1,
subtitle=None,
ref_line=None,
do_auto_label=True):
""" Plots the medians of a list of lists of results. It is similar to
the plot_multi_boxplot, but it admits to group the blocks in rows and
columns. Important arguments.
- exp_rows: list of list of results which median is plotted. Each list
of the list will be plotted in an individual subfigure.
- type_rows: list of lists of strings to shop the grouping. It shows how
the bars are gropued (leaving or not spaces between). It must contain
the work "bar" for each individual result.
"""
num_rows=len(exp_rows)
fig, axes = plt.subplots(nrows=num_rows/ncols, ncols=ncols)
print(axes)
if ncols>1:
axes=asarray(flatten_list(axes))
print(axes)
if not (type(axes) is ndarray):
axes=[axes]
colors=do_list_like(colors, exp_rows)
hatches=do_list_like(hatches, exp_rows)
type_rows=do_list_like(type_rows, exp_rows, force=True)
label_ax=axes[len(axes)/2-(ncols-1)]
if aspect_ratio:
plt.axes().set_aspect(aspect_ratio)
if ncols>1:
plt.tight_layout(pad=0)
for (ax,results_row, type_grouping, y_axis_label, color_item,
hatches_item) in zip(axes,
exp_rows,
type_rows,
y_axis_labels,
colors,
hatches):
if ax==axes[0]:
ax.set_title(title)
if len(axes)==1 or ax==axes[1] and ncols>1 and subtitle:
ax.set_title(subtitle)
if ref_line:
ax.axhline(ref_line, linestyle="--")
if y_axis_general_label:
if ax==label_ax:
y_axis_label="{0}\n{1}".format(y_axis_general_label,
y_axis_label)
else:
y_axis_label="{0}".format(y_axis_label)
#ax.get_yaxis().set_label_coords(-0.07*ncols,0.5)
bar_results=results_row
positions_dic, widths = _cal_positions_hybrid(type_grouping)
if ax==axes[-1] or ax==axes[-ncols]:
the_labels=x_axis_labels
if legend and ax==axes[-1]:
create_legend(ax,legend)
else:
the_labels=["" for x in bar_results]
_add_diffs(ax, bar_results, positions_dic["bar"], widths,
colors=extract_type(color_item, type_grouping,"bar"),
hatches=extract_type(hatches_item, type_grouping,"bar"),
y_tick_count=None,
y_label=y_axis_label,
x_labels=the_labels,
main_axis=True,
bigger_numbers=True,
do_auto_label=do_auto_label,
y_log_scale=y_log_scale,
y_limits=y_limits)
if y_limits:
ax.set_ylim(y_limits[0],y_limits[1])
if y_tick_count:
adjust_number_ticks(ax, y_tick_count, y_log_scale, extra=ref_line)
fig.savefig(file_name, bbox_inches='tight')
def _extract_pos(items, pos, size):
new_items=[]
for i in range(len(items)):
if i%size!=pos:
new_items.append(items[i])
return new_items
def _get_boxplot_data(numeric_results_list):
values=[[],[],[],[],[]]
for result in numeric_results_list:
a_value = result.get_values_boxplot()
for (target, src) in zip(values, a_value):
target.append(src)
return values[0], values[1], values[2], values[3], values[4]
def _autolabel(ax, rects, values,bigger_numbers=False, background=True,
y_limits=None):
extra_cad=""
max_value=max(values)
min_value=min(values)
y_lims=ax.get_ylim()
max_value=min(max_value, y_lims[1])
min_value=max(min_value, y_lims[0])
if y_limits is not None:
if y_limits[0] is not None:
min_value=max(min_value, y_limits[0])
if y_limits[1] is not None:
max_value=min(max_value, y_limits[1])
distance = max_value-min_value
mid_point=min_value+distance/2.0
print("values", min_value, max_value, distance, mid_point)
va="bottom"
margin=0.05
h_margin=0.0
for (rect, value) in zip(rects, values):
if value<0.3 and value>-0.3:
if mid_point>=0:
height=distance*margin
else:
height=-distance*margin
va="top"
elif value>0:
if abs(value)>distance/2:
height=distance*margin
else:
height = value+distance*margin
elif value<0:
if abs(value)>distance/2:
height=-distance*margin
va="top"
else:
height=value-distance*margin
horiz_position=rect.get_x() + (rect.get_width()/2)*(1+h_margin)
font_size="smaller"
if bigger_numbers:
font_size="large"
bbox=None
extraText=""
if y_limits is not None:
if y_limits[0] is not None:
height=max(height, y_limits[0])
if y_limits[1] is not None:
height=min(height, y_limits[1])
if background:
bbox=dict(facecolor='lightgrey', pad=0,
edgecolor="lightgrey", alpha=0.5)
extraText=" "
myt=ax.text(horiz_position, 1.01*height,
extraText+"{0:.2f}{1}".format(float(value), extra_cad),
ha='center', va=va, rotation="vertical",
fontsize=font_size,
bbox=bbox)
def precalc_boxplot(name,file_name, median, p25, p75, min_val, max_val,
grouping=None,
aspect_ratio=None,
colors=None,
hatches=None,
labels=None,
y_axis_label=None,
title=None):
""" Plots boxplots from their basic values, instead of the original list
of values. If median, p25,p75,min_val, max_val are lists of the same
dimension, it plots multiple ones."""
fig = plt.figure(name)
ax = fig.add_subplot(111)
_add_precalc_boxplot(ax,median, p25, p75, min_val, max_val,
grouping=grouping,
colors=colors,
hatches=hatches,
labels=labels,
y_axis_label=y_axis_label)
if aspect_ratio:
plt.axes().set_aspect(aspect_ratio)
if title:
ax.set_title(title)
fig.savefig(file_name, bbox_inches='tight')
def _add_diffs(ax, diff_values, positions, width,
colors=None, hatches=None,
y_tick_count=None, y_label=None,
x_labels=None,
main_axis=False,
bigger_numbers=False,
do_auto_label=True,
y_log_scale=False,
y_limits=None):
if main_axis:
ax_alt=ax
else:
ax_alt=ax.twinx()
bplot = ax_alt.bar(positions, diff_values, width=width,
tick_label=x_labels,
log=y_log_scale)
if y_label:
ax_alt.set_ylabel(y_label)
if colors:
for patch, color in zip(bplot, colors):
if color:
patch.set_facecolor(color)
if hatches:
for patch, hatch in zip(bplot, hatches):
if hatch:
patch.set_hatch(hatch)
if y_tick_count:
adjust_number_ticks(ax_alt, y_tick_count)
if do_auto_label:
_autolabel(ax_alt, bplot, diff_values,bigger_numbers=bigger_numbers,
y_limits=y_limits)
def _adjust_y_limits_margin(ax, values, margin=0.3):
max_value=float(max(values))
min_value=float(min(values))
if max_value>0:
max_value+=(max_value-min_value)*margin
if min_value<0:
min_value-=(max_value-min_value)*margin
ax.set_ylim((min_value, max_value))
def _add_precalc_boxplot(ax, median, p25, p75, min_val, max_val,
x_position=None,
x_widths=None,
grouping=None,
colors=None,
hatches=None,
labels=None,
y_axis_label=None,
y_limits=None,
y_log_scale=False,
extra_spacing=0,
alt_grouping=None):
"""Adds boxplots to a matplotlib axis."""
positions=None
alt_positions=None
widths=0.5
alt_width=0.25
if x_position and widths:
positions=x_position
widths = x_widths
elif grouping:
positions, widths, alt_positions, alt_width=_cal_positions_widths(
grouping,
extra_spacing=extra_spacing,
alt_grouping=alt_grouping)
fake_data=_create_fake_data(median, p25,p75, min_val, max_val)
bplot = ax.boxplot(fake_data, positions=positions,
widths=widths,patch_artist=True,
labels=labels,
whis=9999999999999)
if colors:
for patch, color in zip(bplot['boxes'], colors):
if color:
patch.set_facecolor(color)
if hatches:
for patch, hatch in zip(bplot['boxes'], hatches):
if hatch:
patch.set_hatch(hatch)
if y_axis_label:
ax.set_ylabel(y_axis_label)
if y_limits:
ax.set_ylim(y_limits)
if y_log_scale:
ax.set_yscale("log")
return positions, widths, alt_positions, alt_width
def _create_fake_data(median, p25, p75, min_val, max_val):
if (type(p25) is list):
fake_data=[]
for (median_i, p25_i, p75_i,min_val_i, max_val_i) in zip(
median, p25, p75, min_val, max_val):
fake_data.append(
_create_fake_data(median_i, p25_i, p75_i,min_val_i, max_val_i))
return fake_data
else:
return [min_val, p25,median,p75,max_val]
def _cal_positions_widths(grouping,extra_spacing=0, alt_grouping=None):
if grouping is None:
return None, 0.5
if alt_grouping is None:
alt_grouping=grouping
total_bp=sum(grouping)
total_blocks=total_bp+len(grouping)+1
if extra_spacing:
total_blocks+=len(grouping)*extra_spacing
widths=total_blocks/float(total_blocks)
space_width=float(widths)/2.0
current_pos=1.0
positions = []
alt_positions=[]
for (bp_group, alt_group) in zip(grouping, alt_grouping):
for bp in range(bp_group):
positions.append(current_pos)
current_pos+=widths
for i in range(min(extra_spacing, alt_group-1)):
alt_positions.append(current_pos-space_width)
current_pos+=space_width
current_pos+=space_width
return positions, widths, alt_positions, space_width
def _cal_positions_hybrid(grouping):
flat_grouping = flatten_list(grouping)
if grouping is None:
return None, 0.5
uniq_types=list(set(flat_grouping))
positions_dic = {}
for ut in uniq_types:
positions_dic[ut] = []
# total_bp=len(flat_grouping)
# total_blocks=float(total_bp)+(float(len(grouping))-1)*0.5
#
# widths=total_blocks/float(total_blocks)
# space_width=float(widths)/2
#
# if float(len(grouping))%2==0:
# widths*=2
# space_width*=2
# current_pos=widths
# else:
# current_pos=0.0
widths=1.0
space_width=widths
current_pos=0
for (bp_group) in grouping:
for bp in bp_group:
positions_dic[bp].append(current_pos)
current_pos+=widths
current_pos+=space_width
return positions_dic, widths
def extract_grouped_results(db_obj, trace_id_rows_colors, edges, result_type):
"""Takes a list of lists of trace_is and produces a list of lists of results
corresponding to them.
Args:
- db_obj: DBManager object connted to a db where the results will be pulled
from.
- trace_id_rows_colors: list of lists of integers as trace_ids of experiments.
- edges: if set to [""], it does no effect, the function extracts results
of the type result_type. If set to a list of items, results will be
pulled for each element as: "g"+str(edge)+_str(result_type)
- result_type: string indentifying which type of result are we pulling. It
correspond to the type of the NumericStats stored in db_obj.
Returns: a dictionary indexed by edges. Each element is a list of lists of
same dimension of trace_id_rows_colors, each element a NumericStats object
corresponding to the result of that component.
"""
exp_rows={}
for edge in edges:
exp_rows[edge]=extract_results(db_obj, trace_id_rows_colors,
ResultTrace.get_result_type_edge(edge,
result_type))
return exp_rows
exp_rows={}
for edge in edges:
exp_rows[edge]=[]
for row in trace_id_rows_colors:
these_rows={}
for edge in edges:
these_rows[edge]=[]
exp_rows[edge].append(these_rows[edge])
for trace_id in row:
exp=ExperimentDefinition()
exp.load(db_obj, trace_id)
for edge in edges:
result=None
if exp.is_it_ready_to_process():
if edge=="":
key = ResultTrace.get_result_type_edge(edge,
result_type)
else:
key=result_type
key+="_stats"
result = NumericStats()
result.load(db_obj, trace_id, key)
else:
result = NumericStats()
result.calculate([0, 0, 0])
these_rows[edge].append(result)
return exp_rows
def get_list_rows(rows, field_list):
new_rows=[]
for row in rows:
new_row = []
new_rows.append(new_row)
for (index,res) in zip(list(range(len(row))),row):
field=field_list[index%len(field_list)]
new_row.append(res._get(field))
return new_rows
def extract_usage(db_obj, trace_id_rows, fill_none=True, factor=1.0,
mean=False):
"""Takes a list of lists of trace_is and produces a list of lists of results
corresponding to them.
Args:
- db_obj: DBManager object connted to a db where the results will be pulled
from.
"""
exp_rows=[]
my=ResultTrace()
res_type="usage"
if mean:
res_type="usage_mean"
for row in trace_id_rows:
new_row=[]
exp_rows.append(new_row)
for trace_id in row:
exp=ExperimentDefinition()
exp.load(db_obj, trace_id)
result = my._get_utilization_result()
if exp.is_analysis_done():
result.load(db_obj, trace_id,res_type)
else:
result._set("utilization", 0)
result._set("waste", 0)
result._set("corrected_utilization", 0)
result.apply_factor(factor)
new_row.append(result)
return exp_rows
def extract_results(db_obj, trace_id_rows_colors, result_type, factor=None,
fill_none=True, second_pass=False):
"""Takes a list of lists of trace_is and produces a list of lists of results
corresponding to them.
Args:
- db_obj: DBManager object connted to a db where the results will be pulled
from.
- trace_id_rows_colors: list of lists of integers as trace_ids of experiments.
- result_type: string indentifying which type of result are we pulling. It
correspond to the type of the NumericStats stored in db_obj.
Returns: a list of lists of
same dimension of trace_id_rows_colors, each element a NumericStats object
corresponding to the result of that component.
"""
exp_rows=[]
for row in trace_id_rows_colors:
new_row=[]
exp_rows.append(new_row)
for trace_id in row:
exp=ExperimentDefinition()
exp.load(db_obj, trace_id)
if exp.is_analysis_done(second_pass=second_pass):
key=result_type+"_stats"
result = NumericStats()
result.load(db_obj, trace_id, key)
if factor:
result.apply_factor(factor)
else:
result = NumericStats()
result.calculate([0, 0, 0])
if fill_none and result._get("median") is None:
result = NumericStats()
result.calculate([0, 0, 0])
new_row.append(result)
return exp_rows
def get_dic_val(dic, val):
if val in list(dic.keys()):
return dic[val]
return dic[""]
def produce_plot_config(db_obj, trace_id_rows_colors):
""" Produces the coloring and hatches matrixes for a matrix style plot.
For that it conencts to a dabase, and depending on the scheduling algorithm
used in the experiment, it chooses a cooresponding coloring and hatches.
Args:
- db_obj: DBManager object connted to a db where the results will be pulled
from.
- trace_id_rows_colors: list of lists of integers as trace_ids of experiments.
returns:
- color_rows: list of list of matplotlib colors corresponding to each
experiment subplot.
- hatches_rows: list of lists of the hatches to be used in each experiment
subplot.
- legend: legend list of the format ("series names", "color"), listing the
scheduling algorithms present in the experiments.
"""
colors_dic = {"no":"white", "manifest":"lightgreen", "single":"lightblue",
"multi":"pink", "":"white"}
hatches_dic = {"no":None, "manifest": "-", "single":"\\",
"multi":"/", "":None}
detected_handling={}
color_rows = []
hatches_rows = []
for row in trace_id_rows_colors:
this_color_row=[]
color_rows.append(this_color_row)
this_hatches_row=[]
hatches_rows.append(this_hatches_row)
for trace_id in row:
exp = ExperimentDefinition()
exp.load(db_obj, trace_id)
handling=exp.get_true_workflow_handling()
detected_handling[handling]=1
this_color_row.append(get_dic_val(colors_dic,
handling))
this_hatches_row.append(get_dic_val(hatches_dic,
handling))
legend=[("n/a","white", "no", None),
("aware","lightgreen", "manifest","-"),
("waste","lightblue", "single", "\\"),
("wait","pink", "multi", "/")]
new_legend=[]
for item in legend:
if item[2] in list(detected_handling.keys()):
new_legend.append(item)
return color_rows, hatches_rows, new_legend
|
[
"matplotlib.pyplot.tight_layout",
"stats.trace.ResultTrace.get_result_type_edge",
"getopt.getopt",
"stats.trace.ResultTrace",
"stats.NumericStats",
"matplotlib.pyplot.axes",
"numpy.floor",
"matplotlib.patches.Patch",
"matplotlib.pyplot.subplots",
"commonLib.nerscPlot.paintHistogramMulti",
"orchestration.definition.ExperimentDefinition",
"matplotlib.pyplot.figure",
"numpy.arange",
"sys.exit",
"numpy.log10",
"commonLib.nerscPlot.paintBarsHistogram",
"commonLib.nerscPlot.paintBoxPlotGeneral"
] |
[((1609, 1752), 'commonLib.nerscPlot.paintHistogramMulti', 'paintHistogramMulti', (['name', 'data_dic'], {'bins': '(100)', 'graphFileName': 'file_name', 'labelX': 'x_axis_label', 'xLogScale': 'x_log_scale', 'labelY': '"""Number Of Jobs"""'}), "(name, data_dic, bins=100, graphFileName=file_name,\n labelX=x_axis_label, xLogScale=x_log_scale, labelY='Number Of Jobs')\n", (1628, 1752), False, 'from commonLib.nerscPlot import paintHistogramMulti, paintBoxPlotGeneral, paintBarsHistogram\n'), ((3649, 3763), 'commonLib.nerscPlot.paintBoxPlotGeneral', 'paintBoxPlotGeneral', (['name', 'data_dic'], {'labelY': 'x_axis_label', 'yLogScale': '(True)', 'graphFileName': "(file_name + '-boxplot')"}), "(name, data_dic, labelY=x_axis_label, yLogScale=True,\n graphFileName=file_name + '-boxplot')\n", (3668, 3763), False, 'from commonLib.nerscPlot import paintHistogramMulti, paintBoxPlotGeneral, paintBarsHistogram\n'), ((6551, 6849), 'commonLib.nerscPlot.paintBarsHistogram', 'paintBarsHistogram', (['name', 'hists_order', 'edges', 'hist'], {'target_folder': 'target_folder', 'file_name': 'file_name', 'labelX': 'x_axis_label', 'labelY': 'y_axis_label', 'cdf': 'do_cdf', 'x_log_scale': 'x_log_scale', 'y_log_scale': 'y_log_scale', 'cdf_y_log_scale': 'cdf_y_log_scale', 'min_max': 'min_max', 'cdf_min_max': 'cdf_min_max'}), '(name, hists_order, edges, hist, target_folder=\n target_folder, file_name=file_name, labelX=x_axis_label, labelY=\n y_axis_label, cdf=do_cdf, x_log_scale=x_log_scale, y_log_scale=\n y_log_scale, cdf_y_log_scale=cdf_y_log_scale, min_max=min_max,\n cdf_min_max=cdf_min_max)\n', (6569, 6849), False, 'from commonLib.nerscPlot import paintHistogramMulti, paintBoxPlotGeneral, paintBarsHistogram\n'), ((12304, 12342), 'numpy.arange', 'arange', (['lower_limit', 'upper_limit', 'step'], {}), '(lower_limit, upper_limit, step)\n', (12310, 12342), False, 'from numpy import ndarray, arange, asarray\n'), ((12577, 12587), 'numpy.log10', 'log10', (['num'], {}), '(num)\n', (12582, 12587), False, 'from numpy import log10, power, floor\n'), ((19318, 19355), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_rows', 'ncols': '(1)'}), '(nrows=num_rows, ncols=1)\n', (19330, 19355), True, 'import matplotlib.pyplot as plt\n'), ((24530, 24567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_rows', 'ncols': '(1)'}), '(nrows=num_rows, ncols=1)\n', (24542, 24567), True, 'import matplotlib.pyplot as plt\n'), ((28923, 28972), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(num_rows / ncols)', 'ncols': 'ncols'}), '(nrows=num_rows / ncols, ncols=ncols)\n', (28935, 28972), True, 'import matplotlib.pyplot as plt\n'), ((34462, 34478), 'matplotlib.pyplot.figure', 'plt.figure', (['name'], {}), '(name)\n', (34472, 34478), True, 'import matplotlib.pyplot as plt\n'), ((43227, 43240), 'stats.trace.ResultTrace', 'ResultTrace', ([], {}), '()\n', (43238, 43240), False, 'from stats.trace import ResultTrace\n'), ((601, 661), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""i:ln"""', "['id=', 'lim', 'nolim']"], {}), "(sys.argv[1:], 'i:ln', ['id=', 'lim', 'nolim'])\n", (614, 661), False, 'import getopt\n'), ((3065, 3228), 'commonLib.nerscPlot.paintHistogramMulti', 'paintHistogramMulti', (['name', 'data_dic'], {'bins': '(100)', 'graphFileName': 'file_name', 'labelX': 'x_axis_label', 'xLogScale': 'x_log_scale', 'labelY': '"""Number Of Jobs"""', 'xLim': 'filterCut'}), "(name, data_dic, bins=100, graphFileName=file_name,\n labelX=x_axis_label, xLogScale=x_log_scale, labelY='Number Of Jobs',\n xLim=filterCut)\n", (3084, 3228), False, 'from commonLib.nerscPlot import paintHistogramMulti, paintBoxPlotGeneral, paintBarsHistogram\n'), ((3359, 3527), 'commonLib.nerscPlot.paintHistogramMulti', 'paintHistogramMulti', (['name', 'data_dic'], {'bins': '(100)', 'graphFileName': 'file_name', 'labelX': 'x_axis_label', 'xLogScale': 'x_log_scale', 'labelY': '"""Number Of Jobs"""', 'filterCut': 'filterCut'}), "(name, data_dic, bins=100, graphFileName=file_name,\n labelX=x_axis_label, xLogScale=x_log_scale, labelY='Number Of Jobs',\n filterCut=filterCut)\n", (3378, 3527), False, 'from commonLib.nerscPlot import paintHistogramMulti, paintBoxPlotGeneral, paintBarsHistogram\n'), ((29398, 29421), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)'}), '(pad=0)\n', (29414, 29421), True, 'import matplotlib.pyplot as plt\n'), ((781, 792), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (789, 792), False, 'import sys\n'), ((7525, 7603), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'facecolor': 'key[1]', 'label': 'key[0]', 'edgecolor': '"""black"""', 'hatch': 'hatch'}), "(facecolor=key[1], label=key[0], edgecolor='black', hatch=hatch)\n", (7539, 7603), True, 'import matplotlib.patches as mpatches\n'), ((41411, 41462), 'stats.trace.ResultTrace.get_result_type_edge', 'ResultTrace.get_result_type_edge', (['edge', 'result_type'], {}), '(edge, result_type)\n', (41443, 41462), False, 'from stats.trace import ResultTrace\n'), ((41837, 41859), 'orchestration.definition.ExperimentDefinition', 'ExperimentDefinition', ([], {}), '()\n', (41857, 41859), False, 'from orchestration.definition import ExperimentDefinition\n'), ((43432, 43454), 'orchestration.definition.ExperimentDefinition', 'ExperimentDefinition', ([], {}), '()\n', (43452, 43454), False, 'from orchestration.definition import ExperimentDefinition\n'), ((44814, 44836), 'orchestration.definition.ExperimentDefinition', 'ExperimentDefinition', ([], {}), '()\n', (44834, 44836), False, 'from orchestration.definition import ExperimentDefinition\n'), ((46929, 46951), 'orchestration.definition.ExperimentDefinition', 'ExperimentDefinition', ([], {}), '()\n', (46949, 46951), False, 'from orchestration.definition import ExperimentDefinition\n'), ((22521, 22531), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (22529, 22531), True, 'import matplotlib.pyplot as plt\n'), ((27521, 27531), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (27529, 27531), True, 'import matplotlib.pyplot as plt\n'), ((29338, 29348), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (29346, 29348), True, 'import matplotlib.pyplot as plt\n'), ((34815, 34825), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (34823, 34825), True, 'import matplotlib.pyplot as plt\n'), ((45017, 45031), 'stats.NumericStats', 'NumericStats', ([], {}), '()\n', (45029, 45031), False, 'from stats import NumericStats\n'), ((45201, 45215), 'stats.NumericStats', 'NumericStats', ([], {}), '()\n', (45213, 45215), False, 'from stats import NumericStats\n'), ((45345, 45359), 'stats.NumericStats', 'NumericStats', ([], {}), '()\n', (45357, 45359), False, 'from stats import NumericStats\n'), ((12625, 12639), 'numpy.floor', 'floor', (['power_l'], {}), '(power_l)\n', (12630, 12639), False, 'from numpy import log10, power, floor\n'), ((42314, 42328), 'stats.NumericStats', 'NumericStats', ([], {}), '()\n', (42326, 42328), False, 'from stats import NumericStats\n'), ((42435, 42449), 'stats.NumericStats', 'NumericStats', ([], {}), '()\n', (42447, 42449), False, 'from stats import NumericStats\n'), ((42070, 42121), 'stats.trace.ResultTrace.get_result_type_edge', 'ResultTrace.get_result_type_edge', (['edge', 'result_type'], {}), '(edge, result_type)\n', (42102, 42121), False, 'from stats.trace import ResultTrace\n')]
|
import collections
import json
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tqdm import tqdm
import constants
import helpers
from models.recognition.recognizer import Recognizer
from visualization.visualize import plot_3x3_images_RW
def analyze(checkpoint_dir_path: str, dataset_name: str = constants.GTSR, model_img_res: int = 50, plot: bool = False,
n_plots: int = 10, verbose: int = 0):
"""
Method to analyze the performance of a model in recognition. Calculates the test accuracy, which classes
are misclassified and as what and plots the error rate per class, showing only the label of those that
have a worse error rate than the average.
In the directory there must to be two files:
1. Weights of the model. Named: 'weights.ckpt'
2. Dictionary in a json to translate from neuron to class. Named: 'neuron_to_class_dict.json'
In the dataset folder, it expects a csv file with a Path and a ClassId column.
Tha path column should have the path of each image and the ClassId column the class of each image,
this classes have to be the same as the folder names in the training set.
:param checkpoint_dir_path: Path to the directory.
:param dataset_name: Name of the dataset used.
:param model_img_res: Image resolution to use.
:param plot: Whether to plot results in 3x3 images or not. The label will be green when correctly classified,
red otherwise.
:param n_plots: Number of plots to show.
:param verbose: If > 0 it will print for every image if it was correctly or not.
"""
with open(checkpoint_dir_path + 'neuron_to_class_dict.json', "rb") as f:
neuron_to_class_dict = json.load(f)
model = Recognizer(dataset_name, model_img_res, False)
model.inference_model.load_weights(checkpoint_dir_path + 'weights.ckpt')
images_path = constants.DATASET_PATH.format(dataset_name) + 'test/'
test = pd.read_csv(constants.DATASET_PATH.format(dataset_name) + 'Test.csv')
test['Path'] = test['Path'].apply(lambda x: x.split('/')[-1])
test = test.set_index('Path')
classified = {}
n_wrong, total = 0, 0
for i in test['ClassId'].unique():
classified[str(i)] = collections.defaultdict(int)
image_paths, titles, n_ploted = [], [], 0
images_paths = os.listdir(images_path)
for i, image_id in enumerate(tqdm(images_paths)):
if 'png' not in image_id:
continue
total += 1
image_path = images_path + image_id
img = tf.image.decode_image(open(image_path, 'rb').read(), channels=3)
img = tf.expand_dims(helpers.transform_images(img, model.img_res), 0)
real_class_id = str(test.loc[image_id]['ClassId'])
img_classes_probs = model.inference_model(img)
img_class_id = np.argmax(img_classes_probs)
img_class_id = neuron_to_class_dict[str(img_class_id)]
classified[real_class_id][img_class_id] += 1
if real_class_id != img_class_id:
n_wrong += 1
if verbose > 0:
img_class_prob = np.max(img_classes_probs.numpy())
if real_class_id == img_class_id:
print(constants.C_OKBLUE, image_id, ". Correctly labeled as",
model.labels_map_dict[str(img_class_id)].upper(),
'with probability {:.2f} %'.format(100 * img_class_prob), constants.C_ENDC)
else:
print(constants.C_FAIL, image_id, ". Wrongly labeled as",
model.labels_map_dict[str(img_class_id)].upper(),
', id =',
img_class_id, 'with probability {:.2f} %'.format(100 * img_class_prob), '. Should have been',
model.labels_map_dict[real_class_id],
', id =', real_class_id, constants.C_ENDC)
if plot:
if i % 9 == 0 and i > 0 and n_ploted < n_plots:
plot_3x3_images_RW(image_paths, titles)
image_paths, titles = [], []
n_ploted += 1
image_paths.append(image_path)
pred_class = ("R" if real_class_id == img_class_id else "W") + model.labels_map_dict[str(img_class_id)]
titles.append(pred_class.upper())
error_rate = 100 * n_wrong / total
for real_class in sorted(classified.keys(), key=helpers.natural_keys):
value = classified[real_class]
print('Class', real_class, "({}):".format(model.labels_map_dict[real_class]))
for pred_class, times in value.items():
if pred_class != real_class:
print(" - Misslabeled for class", pred_class, '->', times, 'times',
"({})".format(model.labels_map_dict[pred_class]))
else:
print(" - Correctly labeled", times, 'times')
print()
print(
constants.C_FAIL + "Error rate (% of missclassified) = {:.2f} % in test.".format(error_rate) + constants.C_ENDC)
print(constants.C_OKBLUE + "Test accuracy of {:.2f} % in test.".format(100 - error_rate) + constants.C_ENDC)
miss_class_perc = {}
for real_class, predictions in classified.items():
total = 0
wrong = 0
for pred_class, times in predictions.items():
total += times
if real_class != pred_class:
wrong += times
miss_class_perc[real_class] = 100 * wrong / total
ordered_dict = {}
for key in sorted(miss_class_perc.keys(), key=helpers.natural_keys):
ordered_dict[key] = miss_class_perc[key]
miss_class_perc = ordered_dict
def plot_helper(show_labels):
fig, ax = plt.subplots(1, 1)
ticks = []
for real_class, class_error_rate in miss_class_perc.items():
if show_labels:
ticks.append(model.labels_map_dict[str(real_class)] if class_error_rate > error_rate else '')
else:
ticks.append(str(real_class) if class_error_rate > error_rate else '')
ax.bar(range(len(ticks)), miss_class_perc.values())
ax.plot([-1, len(ticks)], [error_rate, error_rate], 'k--')
ax.set_xticks([i for i in range(len(miss_class_perc))])
if show_labels:
ax.set_xticklabels(ticks, rotation='vertical')
else:
ax.set_xticklabels(ticks, rotation='horizontal')
plt.xlabel('Classes')
plt.ylabel('Percentage of miss-classifications')
plt.title('Miss-classification percentages per label')
plt.xlim([-1, len(ticks)])
plt.tight_layout()
plt.show()
plot_helper(True)
plot_helper(False)
return classified, error_rate
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"tqdm.tqdm",
"json.load",
"constants.DATASET_PATH.format",
"matplotlib.pyplot.show",
"numpy.argmax",
"helpers.transform_images",
"collections.defaultdict",
"visualization.visualize.plot_3x3_images_RW",
"models.recognition.recognizer.Recognizer",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"os.listdir"
] |
[((1775, 1821), 'models.recognition.recognizer.Recognizer', 'Recognizer', (['dataset_name', 'model_img_res', '(False)'], {}), '(dataset_name, model_img_res, False)\n', (1785, 1821), False, 'from models.recognition.recognizer import Recognizer\n'), ((2362, 2385), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (2372, 2385), False, 'import os\n'), ((1749, 1761), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1758, 1761), False, 'import json\n'), ((1917, 1960), 'constants.DATASET_PATH.format', 'constants.DATASET_PATH.format', (['dataset_name'], {}), '(dataset_name)\n', (1946, 1960), False, 'import constants\n'), ((2267, 2295), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (2290, 2295), False, 'import collections\n'), ((2419, 2437), 'tqdm.tqdm', 'tqdm', (['images_paths'], {}), '(images_paths)\n', (2423, 2437), False, 'from tqdm import tqdm\n'), ((2853, 2881), 'numpy.argmax', 'np.argmax', (['img_classes_probs'], {}), '(img_classes_probs)\n', (2862, 2881), True, 'import numpy as np\n'), ((5690, 5708), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (5702, 5708), True, 'import matplotlib.pyplot as plt\n'), ((6433, 6481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of miss-classifications"""'], {}), "('Percentage of miss-classifications')\n", (6443, 6481), True, 'import matplotlib.pyplot as plt\n'), ((6490, 6544), 'matplotlib.pyplot.title', 'plt.title', (['"""Miss-classification percentages per label"""'], {}), "('Miss-classification percentages per label')\n", (6499, 6544), True, 'import matplotlib.pyplot as plt\n'), ((6588, 6606), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6604, 6606), True, 'import matplotlib.pyplot as plt\n'), ((6615, 6625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6623, 6625), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2038), 'constants.DATASET_PATH.format', 'constants.DATASET_PATH.format', (['dataset_name'], {}), '(dataset_name)\n', (2024, 2038), False, 'import constants\n'), ((2666, 2710), 'helpers.transform_images', 'helpers.transform_images', (['img', 'model.img_res'], {}), '(img, model.img_res)\n', (2690, 2710), False, 'import helpers\n'), ((6402, 6423), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Classes"""'], {}), "('Classes')\n", (6412, 6423), True, 'import matplotlib.pyplot as plt\n'), ((3977, 4016), 'visualization.visualize.plot_3x3_images_RW', 'plot_3x3_images_RW', (['image_paths', 'titles'], {}), '(image_paths, titles)\n', (3995, 4016), False, 'from visualization.visualize import plot_3x3_images_RW\n')]
|
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from processor import data_filling, data_processing
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
pd.set_option('display.max_columns', None)
def produce(m):
x_train, y_train, x_test = data_processing()
predictor = m.fit(x_train, y_train)
y_predict = predictor.predict(x_test)
np.savetxt('result.txt', y_predict, fmt='%d')
def test(m):
x_train, y_train, x_test = data_processing()
print(x_train.shape)
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.2)
predictor = m.fit(x_train, y_train)
y_predict = predictor.predict(x_test)
rmse = mean_squared_error(y_test, y_predict) ** 0.5
print(rmse)
# model = SVR(kernel='rbf')
# model = RandomForestRegressor(n_estimators=1000)
model = XGBRegressor(n_estimators=600, max_depth=5)
test(model)
# produce(model)
|
[
"sklearn.model_selection.train_test_split",
"numpy.savetxt",
"processor.data_processing",
"xgboost.XGBRegressor",
"pandas.set_option",
"sklearn.metrics.mean_squared_error"
] |
[((276, 318), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (289, 318), True, 'import pandas as pd\n'), ((940, 983), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'n_estimators': '(600)', 'max_depth': '(5)'}), '(n_estimators=600, max_depth=5)\n', (952, 983), False, 'from xgboost import XGBRegressor\n'), ((368, 385), 'processor.data_processing', 'data_processing', ([], {}), '()\n', (383, 385), False, 'from processor import data_filling, data_processing\n'), ((472, 517), 'numpy.savetxt', 'np.savetxt', (['"""result.txt"""', 'y_predict'], {'fmt': '"""%d"""'}), "('result.txt', y_predict, fmt='%d')\n", (482, 517), True, 'import numpy as np\n'), ((564, 581), 'processor.data_processing', 'data_processing', ([], {}), '()\n', (579, 581), False, 'from processor import data_filling, data_processing\n'), ((646, 695), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train', 'y_train'], {'test_size': '(0.2)'}), '(x_train, y_train, test_size=0.2)\n', (662, 695), False, 'from sklearn.model_selection import train_test_split\n'), ((790, 827), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_predict'], {}), '(y_test, y_predict)\n', (808, 827), False, 'from sklearn.metrics import mean_squared_error\n')]
|
import time
import numpy as np
import torch
from torch import nn
from rl.learners.ppo import PPO
class PPOTestTime(PPO):
def learn_test_time(self, ne, bs, mbs):
os, acts, rs, op, logpbs, _, dones = self.buf.get(self.pol.dist_stack)
os, acts, rs, op, logpbs, dones = os[:bs], acts[:bs], rs[:bs], op[:bs], logpbs[:bs], dones[:bs]
with torch.no_grad():
pre_vals = self.vf.value(torch.cat((os, op)))
adv_calc_start_time = time.time()
v_rets, advs = self.get_rets_advs(rs, dones, pre_vals.t()[0])
adv_calc_time = time.time() - adv_calc_start_time
inds = np.arange(os.shape[0])
update_start_time = time.time()
for itr in range(ne):
np.random.shuffle(inds)
for start in range(0, len(os), mbs):
ind = inds[start:start + mbs]
# Policy update preparation
logpts, dist = self.pol.logp_dist(os[ind], acts[ind])
grad_sub = (logpts - logpbs[ind]).exp()
p_loss0 = - (grad_sub * advs[ind])
ext_loss = - (torch.clamp(grad_sub, 1 - self.clip_eps, 1 + self.clip_eps) * advs[ind])
p_loss = torch.max(p_loss0, ext_loss)
p_loss = p_loss.mean()
# value update preparation
vals = self.vf.value(os[ind])
v_loss = ((v_rets[ind] - vals).pow(2)).mean()
# Policy update
if self.u_joint_opt:
p_loss += v_loss
self.opt_pol.zero_grad()
p_loss.backward()
if self.max_grad_norm > 0:
nn.utils.clip_grad_norm_(list(self.pol.parameters()) + list(self.vf.parameters()), self.max_grad_norm)
self.opt_pol.step()
# Value update
if not self.u_joint_opt:
self.opt_vf.zero_grad()
v_loss.backward()
if self.max_grad_norm > 0:
nn.utils.clip_grad_norm_(self.vf.parameters(), self.max_grad_norm)
self.opt_vf.step()
update_time = time.time() - update_start_time
return adv_calc_time, update_time
|
[
"torch.cat",
"time.time",
"torch.clamp",
"numpy.arange",
"torch.max",
"torch.no_grad",
"numpy.random.shuffle"
] |
[((471, 482), 'time.time', 'time.time', ([], {}), '()\n', (480, 482), False, 'import time\n'), ((627, 649), 'numpy.arange', 'np.arange', (['os.shape[0]'], {}), '(os.shape[0])\n', (636, 649), True, 'import numpy as np\n'), ((678, 689), 'time.time', 'time.time', ([], {}), '()\n', (687, 689), False, 'import time\n'), ((365, 380), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (378, 380), False, 'import torch\n'), ((577, 588), 'time.time', 'time.time', ([], {}), '()\n', (586, 588), False, 'import time\n'), ((732, 755), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (749, 755), True, 'import numpy as np\n'), ((2158, 2169), 'time.time', 'time.time', ([], {}), '()\n', (2167, 2169), False, 'import time\n'), ((419, 438), 'torch.cat', 'torch.cat', (['(os, op)'], {}), '((os, op))\n', (428, 438), False, 'import torch\n'), ((1200, 1228), 'torch.max', 'torch.max', (['p_loss0', 'ext_loss'], {}), '(p_loss0, ext_loss)\n', (1209, 1228), False, 'import torch\n'), ((1102, 1161), 'torch.clamp', 'torch.clamp', (['grad_sub', '(1 - self.clip_eps)', '(1 + self.clip_eps)'], {}), '(grad_sub, 1 - self.clip_eps, 1 + self.clip_eps)\n', (1113, 1161), False, 'import torch\n')]
|
import numpy
from ._helpers import HexahedronScheme
def product(scheme1d):
schemes = scheme1d if isinstance(scheme1d, list) else 3 * [scheme1d]
wy, wz, wx = numpy.meshgrid(
schemes[0].weights, schemes[1].weights, schemes[2].weights
)
weights = numpy.vstack([wx.flatten(), wy.flatten(), wz.flatten()]).T
weights = numpy.prod(weights, axis=1)
# the order, yeah...
y, z, x = numpy.meshgrid(schemes[0].points, schemes[1].points, schemes[2].points)
points = numpy.vstack([x.flatten(), y.flatten(), z.flatten()]).T
degree = min([s.degree for s in schemes])
return HexahedronScheme(
"Product scheme ({})".format(scheme1d.name), weights, points, degree
)
|
[
"numpy.meshgrid",
"numpy.prod"
] |
[((169, 243), 'numpy.meshgrid', 'numpy.meshgrid', (['schemes[0].weights', 'schemes[1].weights', 'schemes[2].weights'], {}), '(schemes[0].weights, schemes[1].weights, schemes[2].weights)\n', (183, 243), False, 'import numpy\n'), ((345, 372), 'numpy.prod', 'numpy.prod', (['weights'], {'axis': '(1)'}), '(weights, axis=1)\n', (355, 372), False, 'import numpy\n'), ((412, 483), 'numpy.meshgrid', 'numpy.meshgrid', (['schemes[0].points', 'schemes[1].points', 'schemes[2].points'], {}), '(schemes[0].points, schemes[1].points, schemes[2].points)\n', (426, 483), False, 'import numpy\n')]
|
#!/bin/python
"""This is a simple text editor"""
# Import the libraries we are using. It is good practice to import all necessary
# libraries in the first lines of a file.
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Create a function to read the data file
def read_data(filename,delimiter=',',starting_row=0):
"""This function reads data from a specified filename.
The specified filename should point to a .csv file."""
# Create an array (a multi-dimensional table) out of our data file, full of text
all_data = np.genfromtxt(filename, delimiter=delimiter,skip_header=5)
# Select the data range we are interested in, convert it into a new array, full of numbers
temperature_data = np.array(all_data[starting_row:,:], dtype=float)
return temperature_data
def process_data(temperature_data):
"""Given some input temperature data this function converts the second column
from degrees F to K and appends a new column with that data. """
# Compute a new column by multiplying column number 1 to Kelvin
temperature_kelvin = (temperature_data[:,1,None] - 32) * 5/9 + 273
# Append this new column to the existing temperature_data array
processed_temperature_data = np.append(temperature_data, temperature_kelvin,1)
return processed_temperature_data
def plot_data(processed_temperature_data, plot_filename):
""" Given some input temperature data this function converts the second column
from degrees F to K and appends a new column with that data. """
# Create a figure of the processed data
temperature_figure = plt.figure()
plt.bar (processed_temperature_data[:,0],
processed_temperature_data[:,2],
width=50, color='blue')
plt.show(block=True)
temperature_figure.savefig(plot_filename)
def convert_data(filename, output_filename):
""" Read data from a CSV falled filename, and write this
data into a Pandas dataframe, and write this dataframe
into a json file called output_filename. """
all_data = pd.read_csv(filename, index_col='Date', header=4)
all_data.info()
all_data.to_json(output_filename)
def plot():
"""Maine program that reads a dataset and processes it, plots it, and write the
converted data into a json file"""
input_file = "110-tavg-12-12-1950-2020.csv"
plot_file = "temperature-over-time.pdf"
json_output_file = "data_output.json"
data_directory = os.path.realpath(os.path.join(os.path.dirname(__file__),"..","data"))
results_directory = os.path.realpath(os.path.join(os.path.dirname(__file__),"..","results"))
input_filename = os.path.join(data_directory,input_file)
plot_filename = os.path.join(results_directory,plot_file)
json_filename = os.path.join(results_directory,json_output_file)
temperature_data = read_data(input_filename, starting_row=5)
processed_temperature_data = process_data(temperature_data)
plot_data(processed_temperature_data, plot_filename)
convert_data(input_filename, json_filename)
if __name__ == "__main__":
print(sys.argv)
plot()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"os.path.dirname",
"numpy.genfromtxt",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.array",
"os.path.join"
] |
[((582, 641), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': 'delimiter', 'skip_header': '(5)'}), '(filename, delimiter=delimiter, skip_header=5)\n', (595, 641), True, 'import numpy as np\n'), ((760, 809), 'numpy.array', 'np.array', (['all_data[starting_row:, :]'], {'dtype': 'float'}), '(all_data[starting_row:, :], dtype=float)\n', (768, 809), True, 'import numpy as np\n'), ((1266, 1316), 'numpy.append', 'np.append', (['temperature_data', 'temperature_kelvin', '(1)'], {}), '(temperature_data, temperature_kelvin, 1)\n', (1275, 1316), True, 'import numpy as np\n'), ((1634, 1646), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1644, 1646), True, 'import matplotlib.pyplot as plt\n'), ((1651, 1754), 'matplotlib.pyplot.bar', 'plt.bar', (['processed_temperature_data[:, 0]', 'processed_temperature_data[:, 2]'], {'width': '(50)', 'color': '"""blue"""'}), "(processed_temperature_data[:, 0], processed_temperature_data[:, 2],\n width=50, color='blue')\n", (1658, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1820, 1840), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (1828, 1840), True, 'import matplotlib.pyplot as plt\n'), ((2120, 2169), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '"""Date"""', 'header': '(4)'}), "(filename, index_col='Date', header=4)\n", (2131, 2169), True, 'import pandas as pd\n'), ((2710, 2750), 'os.path.join', 'os.path.join', (['data_directory', 'input_file'], {}), '(data_directory, input_file)\n', (2722, 2750), False, 'import os\n'), ((2770, 2812), 'os.path.join', 'os.path.join', (['results_directory', 'plot_file'], {}), '(results_directory, plot_file)\n', (2782, 2812), False, 'import os\n'), ((2832, 2881), 'os.path.join', 'os.path.join', (['results_directory', 'json_output_file'], {}), '(results_directory, json_output_file)\n', (2844, 2881), False, 'import os\n'), ((2551, 2576), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2566, 2576), False, 'import os\n'), ((2645, 2670), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2660, 2670), False, 'import os\n')]
|
import numpy as np
import logbook
import pandas as pd
from zipline.lib.adjusted_array import AdjustedArray
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.utils.calendars import get_calendar
from zipline.errors import NoFurtherDataError
from pipeline_live.data.sources import alpaca
log = logbook.Logger(__name__)
class USEquityPricingLoader(PipelineLoader):
"""
PipelineLoader for US Equity Pricing data
"""
def __init__(self):
cal = get_calendar('NYSE')
self._all_sessions = cal.all_sessions
def load_adjusted_array(self, columns, dates, symbols, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the start of each date. We assume that the latest data
# known on day N is the data from day (N - 1), so we shift all query
# dates back by a day.
start_date, end_date = _shift_dates(
self._all_sessions, dates[0], dates[-1], shift=1,
)
sessions = self._all_sessions
sessions = sessions[(sessions >= start_date) & (sessions <= end_date)]
timedelta = pd.Timestamp.utcnow() - start_date
chart_range = timedelta.days + 1
log.info('chart_range={}'.format(chart_range))
prices = alpaca.get_stockprices(chart_range)
dfs = []
for symbol in symbols:
if symbol not in prices:
df = pd.DataFrame(
{c.name: c.missing_value for c in columns},
index=sessions
)
else:
df = prices[symbol]
df = df.reindex(sessions, method='ffill')
dfs.append(df)
raw_arrays = {}
for c in columns:
colname = c.name
parsed_values = []
for df in dfs:
if not df.empty:
value = df[colname].values
else:
value = np.empty(shape=(len(sessions)))
value.fill(np.nan)
parsed_values.append(value)
raw_arrays[colname] = np.stack(
parsed_values,
axis=-1
)
out = {}
for c in columns:
c_raw = raw_arrays[c.name]
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
{},
c.missing_value
)
return out
def _shift_dates(dates, start_date, end_date, shift):
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift]
|
[
"numpy.stack",
"pandas.DataFrame",
"logbook.Logger",
"pandas.Timestamp.utcnow",
"zipline.utils.calendars.get_calendar",
"pipeline_live.data.sources.alpaca.get_stockprices"
] |
[((315, 339), 'logbook.Logger', 'logbook.Logger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logbook\n'), ((488, 508), 'zipline.utils.calendars.get_calendar', 'get_calendar', (['"""NYSE"""'], {}), "('NYSE')\n", (500, 508), False, 'from zipline.utils.calendars import get_calendar\n'), ((1369, 1404), 'pipeline_live.data.sources.alpaca.get_stockprices', 'alpaca.get_stockprices', (['chart_range'], {}), '(chart_range)\n', (1391, 1404), False, 'from pipeline_live.data.sources import alpaca\n'), ((1221, 1242), 'pandas.Timestamp.utcnow', 'pd.Timestamp.utcnow', ([], {}), '()\n', (1240, 1242), True, 'import pandas as pd\n'), ((2201, 2233), 'numpy.stack', 'np.stack', (['parsed_values'], {'axis': '(-1)'}), '(parsed_values, axis=-1)\n', (2209, 2233), True, 'import numpy as np\n'), ((1512, 1584), 'pandas.DataFrame', 'pd.DataFrame', (['{c.name: c.missing_value for c in columns}'], {'index': 'sessions'}), '({c.name: c.missing_value for c in columns}, index=sessions)\n', (1524, 1584), True, 'import pandas as pd\n')]
|
from nose.tools import *
import scipy.stats
import torch
import numpy as np
from stable_nalu.dataset import SimpleFunctionStaticDataset
def test_solveable_by_linear_algebra():
dataset = SimpleFunctionStaticDataset(
operation='add', seed=0
)
dataset_test = iter(dataset.fork(input_range=1).dataloader(batch_size=100))
x_batch, t_batch = next(dataset_test)
x_batch_np = np.stack(x_batch)
t_batch_np = np.stack(t_batch)
w_merged_np = np.linalg.solve(x_batch_np, t_batch_np.ravel())
w_merged_np_int = np.round(w_merged_np, 0).astype('int8')
# W is whole numbers
np.testing.assert_almost_equal(
w_merged_np - w_merged_np_int,
np.zeros(100),
decimal=4
)
# W is either 0, 1, 2
# NOTE: a different seed might not result in an overlap, thus {2} might
# not be present.
assert_equal(
set(w_merged_np_int.tolist()),
{0, 1, 2}
)
# Compute a, b range parameters
# For seed=0, the b subset, is a subset of the a subset, which is assumed
# by the following algorithm.
a_start = None
a_end = None
b_start = None
b_end = None
previuse_w_value = 0
for w_index, w_value in enumerate(w_merged_np_int.tolist()):
if w_value == 1 and previuse_w_value == 0:
a_start = w_index
elif w_value == 0 and previuse_w_value == 1:
a_end = w_index
elif w_value == 2 and previuse_w_value == 1:
b_start = w_index
elif w_value == 1 and previuse_w_value == 2:
b_end = w_index
previuse_w_value = w_value
# Compare a and b range parameters
assert_equal(a_start, dataset.a_start)
assert_equal(a_end, dataset.a_end)
assert_equal(b_start, dataset.b_start)
assert_equal(b_end, dataset.b_end)
def test_input_range():
dataset = SimpleFunctionStaticDataset(
operation='add',
vector_size=10000,
seed=0
)
x, t = dataset.fork(input_range=5)[0]
_, p = scipy.stats.kstest(
x,
scipy.stats.uniform(loc=0, scale=5).cdf
)
assert p > 0.5
def test_output_shape():
dataset = SimpleFunctionStaticDataset(
operation='add',
seed=0
)
x, t = dataset.fork(input_range=5)[0]
assert_equal(x.shape, (100, ))
# Note, t.shape should be a 1-long vector, not a scalar. Otherwise
# the loss function gets confused about what the observation dimention
# is.
assert_equal(t.shape, (1, ))
|
[
"numpy.stack",
"numpy.round",
"numpy.zeros",
"stable_nalu.dataset.SimpleFunctionStaticDataset"
] |
[((194, 246), 'stable_nalu.dataset.SimpleFunctionStaticDataset', 'SimpleFunctionStaticDataset', ([], {'operation': '"""add"""', 'seed': '(0)'}), "(operation='add', seed=0)\n", (221, 246), False, 'from stable_nalu.dataset import SimpleFunctionStaticDataset\n'), ((400, 417), 'numpy.stack', 'np.stack', (['x_batch'], {}), '(x_batch)\n', (408, 417), True, 'import numpy as np\n'), ((435, 452), 'numpy.stack', 'np.stack', (['t_batch'], {}), '(t_batch)\n', (443, 452), True, 'import numpy as np\n'), ((1852, 1923), 'stable_nalu.dataset.SimpleFunctionStaticDataset', 'SimpleFunctionStaticDataset', ([], {'operation': '"""add"""', 'vector_size': '(10000)', 'seed': '(0)'}), "(operation='add', vector_size=10000, seed=0)\n", (1879, 1923), False, 'from stable_nalu.dataset import SimpleFunctionStaticDataset\n'), ((2151, 2203), 'stable_nalu.dataset.SimpleFunctionStaticDataset', 'SimpleFunctionStaticDataset', ([], {'operation': '"""add"""', 'seed': '(0)'}), "(operation='add', seed=0)\n", (2178, 2203), False, 'from stable_nalu.dataset import SimpleFunctionStaticDataset\n'), ((691, 704), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (699, 704), True, 'import numpy as np\n'), ((542, 566), 'numpy.round', 'np.round', (['w_merged_np', '(0)'], {}), '(w_merged_np, 0)\n', (550, 566), True, 'import numpy as np\n')]
|
import re
import json
import os
import requests
import numpy as np
import copy
from sklearn.metrics.pairwise import cosine_similarity
import spacy
from collections import defaultdict
from networkx import algorithms
from fourlang.stanford_wrapper import StanfordParser
from fourlang.fourlang import FourLang
from .parse_data import load_vec
from .utils import get_distance
class Similarity(object):
def __init__(self, lang="en", with_embedding=True):
self.lang = lang
self.language_models = {"en": "en_core_web_sm", "it": "it_core_news_sm", "de": "de_core_news_sm"}
self.cross_lingual_path = "/home/adaamko/data/DMR/"
self.nlp = spacy.load(self.language_models[self.lang])
self.stanford_parser = StanfordParser()
self.fourlang_expressions = ["has", "at", "npmod"]
if with_embedding:
fourlang_embeddings = self.call_elmo_service(self.fourlang_expressions)
self.fourlang_expression_embeddings = {expr: emb[0] for (expr, emb) in
zip(self.fourlang_expressions, fourlang_embeddings)}
def clear_node(self, node):
"""
Clears the node from the 4lang id parts
:param node: the text to clear
:return: the cleared text
"""
return re.sub(r'_[0-9][0-9]*', '', node)
def init_cross_lingual_embeddings(self, src_lang, tgt_lang):
"""
Initialize cross-lingual embeddings
:param src_lang: the language of the premise
:param tgt_lang: the language of the hypothesis
:return: None
"""
src_path = '/home/adaamko/data/DMR/wiki.multi.' + src_lang + '.vec'
tgt_path = '/home/adaamko/data/DMR/wiki.multi.' + tgt_lang + '.vec'
nmax = 250000 # maximum number of word embeddings to load
self.src_embeddings, self.src_id2word, self.src_word2id = load_vec(src_path, nmax)
self.tgt_embeddings, self.tgt_id2word, self.tgt_word2id = load_vec(tgt_path, nmax)
self.src_word2id = {v: k for k, v in self.src_id2word.items()}
self.tgt_word2id = {v: k for k, v in self.tgt_id2word.items()}
self.nlp_src_lang = spacy.load(self.language_models[src_lang])
self.nlp_tgt_lang = spacy.load(self.language_models[tgt_lang])
def init_dictionaries(self, src_lang, tgt_lang):
"""
Initialize dictionaries
:param src_lang: the language of the premise
:param tgt_lang: the language of the hypothesis
:return: None
"""
path = "../dictionaries/" + src_lang + "_dictionary"
dictionary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
dictionary = defaultdict(list)
with open(dictionary_path, "r+") as f:
for line in f:
line = line.strip().split("\t")
if line[0] == src_lang and line[2] == tgt_lang:
dictionary[line[1].lower()].append(line[3].lower())
self.nlp_src_lang = spacy.load(self.language_models[src_lang])
self.nlp_tgt_lang = spacy.load(self.language_models[tgt_lang])
self.dictionary = dictionary
def call_elmo_service(self, sentences, port=1666):
"""
Calls the already running elmo service
:param sentences: the sentences we want to get the embeddings for
:param port: the port of the service
:return: list of embeddings
"""
data = json.dumps({"sentences": sentences})
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post("http://127.0.0.1:{}/{}".format(port, self.lang), data=data, headers=headers)
return [np.asarray(e) for e in r.json()["embeddings"]]
def get_elmo_embeddings(self, premise, hypothesis, port=1666):
"""
Calls the call_elmo_service with the parameters
:param premise: the premise sentence
:param hypothesis: the hypothesis sentence
:param port: the port of the service
:return: list of embeddings
"""
return self.call_elmo_service([premise, hypothesis], port=port)
def get_embedding_dictionary(self, token_lemmas, embeddings, first_word):
"""
Gets the dictionary of the lemmas and the corresponding embeddings baseg on existing lemmas and embeddings
:param token_lemmas: the lemmas in the sentence
:param embeddings: the embedding of the sentence
:param first_word: the first (not lemmatized) word of the sentence
:return: the dictionary of the lemma-to-token relations
"""
word_dict = self.fourlang_expression_embeddings.copy()
word_dict[first_word] = embeddings[0]
for (words, embedding) in zip(token_lemmas, embeddings):
for w in words:
word_dict[w] = embedding
return word_dict
def get_elmo_nodes(self, premise, def_premise, hypothesis, def_hypothesis):
"""
Gets the dictionary of the lemmas and the corresponding embeggings for the premise and hypothesis
:param premise: the premise word
:param def_premise: the definition of the premise
:param hypothesis: the hypothesis word
:param def_hypothesis: the definition of the hypothesis
:return: the embedding dictionary of the premise and hypothesis
"""
premise_token_lemmas, premise_token_words = self.stanford_parser.lemmatize_text(": ".join([premise, def_premise]))
hypothesis_token_lemmas, hypothesis_token_words = self.stanford_parser.lemmatize_text(": ".join([hypothesis, def_hypothesis]))
premise_full_def = " ".join(premise_token_words)
hypothesis_full_def = " ".join(hypothesis_token_words)
embeddings = self.get_elmo_embeddings(premise_full_def, hypothesis_full_def)
premise_words = self.get_embedding_dictionary(premise_token_lemmas, embeddings[0], premise)
hypothesis_words = self.get_embedding_dictionary(hypothesis_token_lemmas, embeddings[1], hypothesis)
return premise_words, hypothesis_words
def get_elmo_edges(self, graph, words):
"""
Create the list of edges containing the triplet of the two node embedding and the edge type
:param graph: the graph of the definition
:param words: the dictionary of pre-generated embeddings
:return: the list of edges
"""
edges = []
for (source, receiver, edge) in graph.G.edges(data=True):
cleared_source = self.clear_node(source)
cleared_receiver = self.clear_node(receiver)
if cleared_source not in words:
print([k for k in words.keys()])
print([self.clear_node(k) for k in graph.G.nodes])
s = self.call_elmo_service([cleared_source])[0][0]
else:
s = words[cleared_source]
if cleared_receiver not in words:
print([k for k in words.keys()])
print([self.clear_node(k) for k in graph.G.nodes])
r = self.call_elmo_service([cleared_receiver])[0][0]
else:
r = words[cleared_receiver]
edges.append((s, r, edge['color']))
return edges
def cross_lingual_dictionary_bag(self, def_premise, def_hypothesis, premise_src=True):
"""
Cross-lingual bag of words approach
:param def_premise: the definition of the premise
:param def_hypothesis: the definition of the hypothesis
:param premise_src: whether or not to keep the ordering of the premise and hypothesis
:return: the best score
"""
if premise_src:
prem = self.nlp_src_lang(def_premise)
hyp = self.nlp_tgt_lang(def_hypothesis)
else:
hyp = self.nlp_src_lang(def_premise)
prem = self.nlp_tgt_lang(def_hypothesis)
filtered_prem = []
for token in prem:
if not token.is_stop and not token.is_punct:
filtered_prem.append(token.lemma_)
filtered_hyp = []
for token in hyp:
if not token.is_stop and not token.is_punct:
filtered_hyp.append(token.lemma_)
dic_elements = []
for word in filtered_prem:
if not self.dictionary[word]:
dic_elements.append(word)
for el in self.dictionary[word]:
dic_elements.append(el)
filtered_prem = set(dic_elements)
filtered_hyp = set(filtered_hyp)
sim = filtered_hyp & filtered_prem
if not sim or len(filtered_hyp) == 0:
return 0
else:
return float(len(sim)) / len(filtered_hyp)
def cross_lingual_dictionary_4lang(self, graph_premise, graph_hypothesis, premise_src=True):
"""
Asymmetric Jaccard similarity between the lowercase nodes of the definition graphs
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:param premise_src: whether or not to keep the ordering of the premise and hypothesis
:return: the score
"""
if premise_src:
prem = set([self.clear_node(node).lower() for node in graph_premise.G.nodes])
hyp = set([self.clear_node(node).lower() for node in graph_hypothesis.G.nodes])
else:
hyp = set([self.clear_node(node).lower() for node in graph_premise.G.nodes])
prem = set([self.clear_node(node).lower() for node in graph_hypothesis.G.nodes])
dic_elements = []
for word in prem:
if not self.dictionary[word]:
dic_elements.append(word)
for el in self.dictionary[word]:
dic_elements.append(el)
filtered_prem = set(dic_elements)
filtered_hyp = set(hyp)
sim = filtered_hyp & filtered_prem
if not sim or len(filtered_hyp) == 0:
return 0
else:
return float(len(sim)) / len(filtered_hyp)
def muse_min_distance_4lang(self, graph_premise, graph_hypothesis, premise_src=True):
"""
Asymmetric cross-lingual Jaccard similarity between the nodes of the definition graphs
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:param premise_src: whether or not to keep the ordering of the premise and hypothesis
:return: the score
"""
if premise_src:
prem = set([self.clear_node(node).lower() for node in graph_premise.G.nodes])
hyp = set([self.clear_node(node).lower() for node in graph_hypothesis.G.nodes])
else:
hyp = set([self.clear_node(node).lower() for node in graph_premise.G.nodes])
prem = set([self.clear_node(node).lower() for node in graph_hypothesis.G.nodes])
max_score = 0
for prem_word in prem:
for hyp_word in hyp:
try:
distance = get_distance(prem_word, hyp_word, self.src_embeddings, self.tgt_embeddings, self.src_word2id, self.tgt_word2id)
except KeyError:
distance = 0
if distance > max_score:
max_score = distance
return max_score
def compute_min_distance_scores(self, def_premise, def_hypothesis, premise_src=True):
"""
Compute the cross-lingual minimum distance between words of the definition sentences
:param def_premise: the definition of the premise
:param def_hypothesis: the definition of the hypothesis
:param premise_src: whether or not to keep the ordering of the premise and hypothesis
:return: the best achievable score
"""
if premise_src:
prem = self.nlp_src_lang(def_premise)
hyp = self.nlp_tgt_lang(def_hypothesis)
else:
hyp = self.nlp_src_lang(def_premise)
prem = self.nlp_tgt_lang(def_hypothesis)
filtered_prem = []
for token in prem:
if not token.is_stop and not token.is_punct:
filtered_prem.append(token.lemma_)
filtered_hyp = []
for token in hyp:
if not token.is_stop and not token.is_punct:
filtered_hyp.append(token.lemma_)
filtered_prem = set(filtered_prem)
filtered_hyp = set(filtered_hyp)
max_score = 0
for prem_word in filtered_prem:
for hyp_word in filtered_hyp:
try:
distance = get_distance(prem_word, hyp_word, self.src_embeddings, self.tgt_embeddings, self.src_word2id, self.tgt_word2id)
except KeyError:
distance = 0
if distance > max_score:
max_score = distance
return max_score
def asim_jac_words(self, def_premise, def_hypothesis):
"""
Asymmetric Jaccard similarity between the words of the definitions
:param def_premise: the definition of the premise
:param def_hypothesis: the definition of the hypothesis
:return: the ratio of overlap per the length of the hypothesis definition
"""
prem = self.nlp(def_premise)
hyp = self.nlp(def_hypothesis)
filtered_prem = []
for token in prem:
if not token.is_stop and not token.is_punct:
filtered_prem.append(token.lemma_)
filtered_hyp = []
for token in hyp:
if not token.is_stop and not token.is_punct:
filtered_hyp.append(token.lemma_)
filtered_prem = set(filtered_prem)
filtered_hyp = set(filtered_hyp)
sim = filtered_hyp & filtered_prem
if not sim or len(filtered_hyp) == 0:
return 0
else:
return float(len(sim)) / len(filtered_hyp)
def asim_jac_edges(self, graph_premise, graph_hypothesis):
"""
Asymmetric Jaccard similarity between the edges of the definition graphs
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:return: the ratio of overlapping edges per the length of the hypothesis definition
"""
prem = set([(self.clear_node(s), self.clear_node(r), e['color']) for (s, r, e) in graph_premise.G.edges(data=True)])
hyp = set([(self.clear_node(s), self.clear_node(r), e['color']) for (s, r, e) in graph_hypothesis.G.edges(data=True)])
sim = hyp & prem
if not sim or len(hyp) == 0:
return 0
else:
return float(len(sim)) / len(hyp)
def multi_def_best_match(self, graph_premises, graph_hypothesises, similarity_function, return_graphs=False):
"""
Find the best matching premise and hypothesis
:param graph_premises: the definition graph of the premise
:param graph_hypothesises: the definition graph of the hypothesis
:param similarity_function: the similarity function to use
:param return_graphs: whether to return the graphs
:return: the score and the best graph pair
"""
if len(graph_premises) > 0 and len(graph_hypothesises) > 0:
best_pair = (graph_premises[0], graph_hypothesises[0])
best_match = 0
for graph_premise in graph_premises:
for graph_hypothesis in graph_hypothesises:
match = similarity_function(graph_premise, graph_hypothesis)
if match > best_match:
best_match = match
best_pair = (graph_premise, graph_hypothesis)
if best_match == 1.0:
break
if best_match == 1.0:
break
if return_graphs:
return best_match, best_pair
return best_match
elif return_graphs:
if len(graph_premises) > 0:
return 0, (graph_premises[0], FourLang())
elif len(graph_hypothesises) > 0:
return 0, (FourLang(), graph_hypothesises[0])
else:
return 0, (FourLang(), FourLang())
return 0
def asim_jac_nodes(self, graph_premise, graph_hypothesis):
"""
Asymmetric Jaccard similarity between the nodes of the definition graphs
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:return: the ratio of overlapping nodes per the length of the hypothesis definition
"""
prem = set([self.clear_node(node) for node in graph_premise.G.nodes])
hyp = set([self.clear_node(node) for node in graph_hypothesis.G.nodes])
sim = hyp & prem
if not sim or len(hyp) == 0:
return 0
else:
return float(len(sim)) / len(hyp)
def asim_jac_nodes_graded(self, graph_premise, graph_hypothesis):
"""
Asymmetric Jaccard similarity between the nodes of the definition graphs
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:return: the ratio of overlapping nodes per the length of the hypothesis definition
"""
prem = [(node, self.clear_node(node)) for node in graph_premise.G.nodes]
hyp = [(node, self.clear_node(node)) for node in graph_hypothesis.G.nodes]
sim = 0
for hyp_node in hyp:
if hyp_node[1] in [p[1] for p in prem]:
try:
shortest_path = algorithms.shortest_path(graph_premise.G, graph_premise.root, prem[[p[1] for p in prem].index(hyp_node[1])][0])
print("ok")
hypothesis_path = algorithms.shortest_path(graph_hypothesis.G, graph_hypothesis.root, hyp_node[0])
if shortest_path in [0, 1]:
sim += max(hypothesis_path, 2)
elif hypothesis_path == 0:
sim += 1
else:
sim += hypothesis_path / (shortest_path - 1)
except Exception as e:
sim += 0.5
if sim == 0 or len(hyp) == 0:
print(sim)
return 0
else:
return float(sim) / float(len(hyp))
def asim_jac_nodes_with_backup(self, graph_premise, graph_hypothesis):
"""
Asymmetric Jaccard similarity between the nodes of the definition graphs, if the score is not 1 it calculates
the asymmetric Jaccard similarity between the edges without the hypothesis root node
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:return: the ratio of overlapping nodes per the length of the hypothesis definition
"""
node_score = self.asim_jac_nodes(graph_premise, graph_hypothesis)
edge_score = 0
if 0.0 < node_score < 1.0:
root = graph_hypothesis.d_clean(graph_hypothesis.root).split("_")[0]
if root in graph_premise.get_nodes():
root_id = [node for node in graph_premise.G.nodes() if self.clear_node(node) == root][0]
graph_premise_only_zero = copy.deepcopy(graph_premise)
delete_list = []
for edge in graph_premise_only_zero.G.adj.items():
for output_node in edge[1].items():
inner_delete_list = []
for edge_type in output_node[1].items():
if edge_type[1]["color"]:
inner_delete_list.append(edge_type[0])
for inner_del in inner_delete_list:
del output_node[1]._atlas[inner_del]
if len(output_node[1]) < 1:
delete_list.append(output_node[0])
for to_del in delete_list:
del edge[1]._atlas[to_del]
try:
if algorithms.has_path(graph_premise_only_zero.G, graph_premise.root, root_id):
return 1.0
except Exception as e:
print("Error occured:", e)
graph_hypothesis_wo_root = copy.deepcopy(graph_hypothesis)
graph_hypothesis_wo_root.G.remove_node(graph_hypothesis_wo_root.root)
#edge_score = self.asim_jac_edges(graph_premise, graph_hypothesis_wo_root)
return self.asim_jac_edges(graph_premise, graph_hypothesis_wo_root)
#return max([node_score, edge_score])
return node_score
def asim_jac_nodes_elmo(self, premise, hypothesis, graph_premise, graph_hypothesis, def_premise, def_hypothesis):
"""
Asymmetric Jaccard similarity between the node embeddings of the definition graphs
:param premise: the premise word
:param hypothesis: the hypothesis word
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:param def_premise: the definition of the premise
:param def_hypothesis: the definition of the hypothesis
:return: the ratio of best node matches per the length of the hypothesis definition
"""
premise_words, hypothesis_words = self.get_elmo_nodes(premise, def_premise, hypothesis, def_hypothesis)
prem = []
for node in graph_premise.G.nodes:
cleared_node = self.clear_node(node)
if cleared_node not in premise_words:
prem.append(self.call_elmo_service([cleared_node])[0][0])
else:
prem.append(premise_words[cleared_node])
hyp = []
for node in graph_hypothesis.G.nodes:
cleared_node = self.clear_node(node)
if cleared_node not in hypothesis_words:
hyp.append(self.call_elmo_service([cleared_node])[0][0])
else:
hyp.append(hypothesis_words[cleared_node])
similarities = cosine_similarity(prem, hyp)
best_sim = [max(word_sim) for word_sim in np.transpose(similarities)]
return sum(best_sim) / len(best_sim)
def asim_jac_edges_elmo(self, premise, hypothesis, graph_premise, graph_hypothesis, def_premise, def_hypothesis):
"""
Asymmetric Jaccard similarity between the edges based on the node embeddings of the definition graphs
:param premise: the premise word
:param hypothesis: the hypothesis word
:param graph_premise: the definition graph of the premise
:param graph_hypothesis: the definition graph of the hypothesis
:param def_premise: the definition of the premise
:param def_hypothesis: the definition of the hypothesis
:return: the ratio of best edge matches per the length of the hypothesis definition
"""
premise_words, hypothesis_words = self.get_elmo_nodes(premise, def_premise, hypothesis, def_hypothesis)
prem = self.get_elmo_edges(graph_premise, premise_words)
hyp = self.get_elmo_edges(graph_hypothesis, hypothesis_words)
if len(hyp) == 0 or len(prem) == 0:
return 0
sim = 0
for hyp_edge in hyp:
scores = []
for prem_edge in prem:
if hyp_edge[2] == prem_edge[2]:
scores.append((cosine_similarity([np.asarray(hyp_edge[0])], [np.asarray(prem_edge[0])])[0] +
cosine_similarity([np.asarray(hyp_edge[1])], [np.asarray(prem_edge[1])])[0]) / 2)
sim += max(scores + [0])
return sum(sim) / len(hyp)
def asim_jac_bow_elmo(self, def_premise, def_hypothesis):
"""
Asymmetric Jaccard similarity between the word embeddings of the definitions
:param def_premise: the definition of the premise
:param def_hypothesis: the definition of the hypothesis
:return: the ratio of overlap per the length of the hypothesis definition
"""
embeddings = self.get_elmo_embeddings(def_premise, def_hypothesis)
similarities = cosine_similarity(embeddings[0], embeddings[1])
best_sim = [max(word_sim) for word_sim in np.transpose(similarities)]
return sum(best_sim) / len(best_sim)
def word_elmo(self, premise, hypothesis):
"""
The cosine similarity of the words
:param premise: the premise word
:param hypothesis: the hypothesis word
:return: the cosine similarity score
"""
embeddings = self.get_elmo_embeddings(premise, hypothesis)
similarity = cosine_similarity(embeddings[0], embeddings[1])[0][0]
return similarity
|
[
"copy.deepcopy",
"sklearn.metrics.pairwise.cosine_similarity",
"os.path.abspath",
"numpy.asarray",
"numpy.transpose",
"json.dumps",
"collections.defaultdict",
"spacy.load",
"networkx.algorithms.has_path",
"fourlang.stanford_wrapper.StanfordParser",
"networkx.algorithms.shortest_path",
"re.sub",
"fourlang.fourlang.FourLang"
] |
[((666, 709), 'spacy.load', 'spacy.load', (['self.language_models[self.lang]'], {}), '(self.language_models[self.lang])\n', (676, 709), False, 'import spacy\n'), ((741, 757), 'fourlang.stanford_wrapper.StanfordParser', 'StanfordParser', ([], {}), '()\n', (755, 757), False, 'from fourlang.stanford_wrapper import StanfordParser\n'), ((1308, 1340), 're.sub', 're.sub', (['"""_[0-9][0-9]*"""', '""""""', 'node'], {}), "('_[0-9][0-9]*', '', node)\n", (1314, 1340), False, 'import re\n'), ((2180, 2222), 'spacy.load', 'spacy.load', (['self.language_models[src_lang]'], {}), '(self.language_models[src_lang])\n', (2190, 2222), False, 'import spacy\n'), ((2251, 2293), 'spacy.load', 'spacy.load', (['self.language_models[tgt_lang]'], {}), '(self.language_models[tgt_lang])\n', (2261, 2293), False, 'import spacy\n'), ((2707, 2724), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2718, 2724), False, 'from collections import defaultdict\n'), ((3012, 3054), 'spacy.load', 'spacy.load', (['self.language_models[src_lang]'], {}), '(self.language_models[src_lang])\n', (3022, 3054), False, 'import spacy\n'), ((3083, 3125), 'spacy.load', 'spacy.load', (['self.language_models[tgt_lang]'], {}), '(self.language_models[tgt_lang])\n', (3093, 3125), False, 'import spacy\n'), ((3460, 3496), 'json.dumps', 'json.dumps', (["{'sentences': sentences}"], {}), "({'sentences': sentences})\n", (3470, 3496), False, 'import json\n'), ((22360, 22388), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['prem', 'hyp'], {}), '(prem, hyp)\n', (22377, 22388), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((24446, 24493), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['embeddings[0]', 'embeddings[1]'], {}), '(embeddings[0], embeddings[1])\n', (24463, 24493), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3696, 3709), 'numpy.asarray', 'np.asarray', (['e'], {}), '(e)\n', (3706, 3709), True, 'import numpy as np\n'), ((20573, 20604), 'copy.deepcopy', 'copy.deepcopy', (['graph_hypothesis'], {}), '(graph_hypothesis)\n', (20586, 20604), False, 'import copy\n'), ((2651, 2676), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2666, 2676), False, 'import os\n'), ((19530, 19558), 'copy.deepcopy', 'copy.deepcopy', (['graph_premise'], {}), '(graph_premise)\n', (19543, 19558), False, 'import copy\n'), ((22439, 22465), 'numpy.transpose', 'np.transpose', (['similarities'], {}), '(similarities)\n', (22451, 22465), True, 'import numpy as np\n'), ((24544, 24570), 'numpy.transpose', 'np.transpose', (['similarities'], {}), '(similarities)\n', (24556, 24570), True, 'import numpy as np\n'), ((24952, 24999), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['embeddings[0]', 'embeddings[1]'], {}), '(embeddings[0], embeddings[1])\n', (24969, 24999), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((18006, 18091), 'networkx.algorithms.shortest_path', 'algorithms.shortest_path', (['graph_hypothesis.G', 'graph_hypothesis.root', 'hyp_node[0]'], {}), '(graph_hypothesis.G, graph_hypothesis.root, hyp_node[0]\n )\n', (18030, 18091), False, 'from networkx import algorithms\n'), ((20336, 20411), 'networkx.algorithms.has_path', 'algorithms.has_path', (['graph_premise_only_zero.G', 'graph_premise.root', 'root_id'], {}), '(graph_premise_only_zero.G, graph_premise.root, root_id)\n', (20355, 20411), False, 'from networkx import algorithms\n'), ((16194, 16204), 'fourlang.fourlang.FourLang', 'FourLang', ([], {}), '()\n', (16202, 16204), False, 'from fourlang.fourlang import FourLang\n'), ((16279, 16289), 'fourlang.fourlang.FourLang', 'FourLang', ([], {}), '()\n', (16287, 16289), False, 'from fourlang.fourlang import FourLang\n'), ((16359, 16369), 'fourlang.fourlang.FourLang', 'FourLang', ([], {}), '()\n', (16367, 16369), False, 'from fourlang.fourlang import FourLang\n'), ((16371, 16381), 'fourlang.fourlang.FourLang', 'FourLang', ([], {}), '()\n', (16379, 16381), False, 'from fourlang.fourlang import FourLang\n'), ((23724, 23747), 'numpy.asarray', 'np.asarray', (['hyp_edge[0]'], {}), '(hyp_edge[0])\n', (23734, 23747), True, 'import numpy as np\n'), ((23751, 23775), 'numpy.asarray', 'np.asarray', (['prem_edge[0]'], {}), '(prem_edge[0])\n', (23761, 23775), True, 'import numpy as np\n'), ((23837, 23860), 'numpy.asarray', 'np.asarray', (['hyp_edge[1]'], {}), '(hyp_edge[1])\n', (23847, 23860), True, 'import numpy as np\n'), ((23864, 23888), 'numpy.asarray', 'np.asarray', (['prem_edge[1]'], {}), '(prem_edge[1])\n', (23874, 23888), True, 'import numpy as np\n')]
|
"""
This module contains classes that can learn the weights.
"""
import numpy as np
class STDP:
"""
Spike Timing Dependent Plasticity
"""
def __init__(self, eta, w_in, w_out, tau, window_size, verbose=False, tau2=None):
"""
:param eta: learning rate
:param w_in:
:param w_out:
:param tau: The tau parameter for the learning window. If you want an unsymmetric window, then also set tau2.
:param window_size:
:param verbose: Verbose output of the weight change.
:param tau2: If learning window is unsymmetric, then tau2 is the tau parameter for x-values GREATER than 0. If not given, it defaults to tau.
:return:
"""
self.eta = eta
self.w_in = w_in
self.w_out = w_out
self.tau = tau
self.tau2 = tau2 if tau2 else tau
self.window_size = window_size # T_l
self.verbose = verbose
def learning_window_neuron_pre(self, t1, t2_list):
"""
Return the sum of the learning windows of one neuron.
:param t1: current time
:param t2_list: spiking times of neuron
"""
sum_result = 0
for t2 in t2_list:
sum_result += self.learning_window(t2 - t1)
return sum_result
def learning_window_neuron_post(self, t1, t2_list):
"""
Return the sum of the learning windows of one neuron.
:param t1: current time
:param t2_list: spiking times of neuron
"""
sum_result = 0
for t2 in t2_list:
sum_result += self.learning_window(t1 - t2)
return sum_result
def learning_window(self, x):
"""
Constant Learning Window
:param x:
:return:
"""
if x > 0:
return - np.exp(-x / self.tau2)
elif x < 0:
return np.exp(x / self.tau)
else:
return 0
def weight_change(self, spikes, weights, t):
"""
Calculate the weight change at time t. Changes the weights in place.
:param spikes: Spiketrain
:param weights: current weights
:return: Changes in weights
"""
if weights.dtype != 'float':
raise ValueError('The weight matrix has to be a float array. (Try to create it with dtype=float)')
# Trim spiketrain, so that it's 'windowed' (look at variable T_l in the script)
spikes = spikes[:, max(0, t+1-self.window_size):t+1]
if not spikes.any():
if self.verbose:
print("--------------------------")
print("Calculating STDP weight change at time")
print("No spikes found")
return np.zeros(weights.shape)
neurons, current_time = spikes.shape
current_time -= 1 # because index begins with 0
connected_neurons = np.array(weights, dtype=bool)
last_spikes = spikes[:, -1]
last_spikes = last_spikes[:, np.newaxis]
# Calculate the weight change for presynaptic spikes
weight_change_presynaptic = last_spikes * connected_neurons * self.w_in
# Calculate the weight change for postsynaptic spikes
weight_change_postsynaptic = last_spikes.T * connected_neurons * self.w_out
# Calculate the weight changes in regards of the learning window
spikes_time = []
for neuron in range(neurons):
spikes_time.append([])
for time, spike in enumerate(spikes[neuron, :]):
if spike:
spikes_time[neuron].append(time)
neuron_learnwindow_pre = [self.learning_window_neuron_pre(current_time, x) for x in spikes_time]
neuron_learnwindow_pre = np.array(neuron_learnwindow_pre, ndmin=2).T # Make it a column-vector
neuron_learnwindow_post = [self.learning_window_neuron_post(current_time, x) for x in spikes_time]
neuron_learnwindow_post = np.array(neuron_learnwindow_post, ndmin=2).T # Make it a column-vector
learning_window_presynaptic = (last_spikes.T * connected_neurons) * neuron_learnwindow_pre
learning_window_postsynaptic = (last_spikes * connected_neurons) * neuron_learnwindow_post.T
# Total weight change
weight_change = self.eta * (weight_change_presynaptic + weight_change_postsynaptic + learning_window_presynaptic
+ learning_window_postsynaptic)
# Change the weight in place
weights = weights.__iadd__(weight_change)
if self.verbose:
print("--------------------------")
print("Calculating STDP weight change at time")
print("Last spikes", last_spikes)
print("Weight change in:", weight_change_presynaptic)
print("Weight change out:", weight_change_postsynaptic)
print("Outgoing spikes time", spikes_time)
print("Neuron learnwindow pre", neuron_learnwindow_pre)
print("Neuron learnwindow post", neuron_learnwindow_post)
print("Presyncpit:", learning_window_presynaptic)
print("Postsynapitc:", learning_window_postsynaptic)
print("Summe (pres): ", neuron_learnwindow_pre, neuron_learnwindow_pre.shape)
print("Summe (post): ", neuron_learnwindow_post, neuron_learnwindow_post.shape)
print("presynaptic learning window", learning_window_presynaptic)
print("postsynaptic learning window", learning_window_postsynaptic)
print("type of weight change:", type(weight_change))
print("updated weights (function):", weights)
print("")
return weight_change
if __name__ == "__main__":
s = np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 1]], dtype=bool)
w = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 0]], dtype=float)
print("Spike Train", s)
print("Weights", w)
learning_model = STDP(eta=0.05, w_in=0.5, w_out=0.5, tau=10.0, window_size=4, verbose=True)
print("Weight change: ", learning_model.weight_change(s, w, 2))
print("updated weights", w)
import matplotlib.pyplot as plt
x = np.linspace(-15, 15, 1000)
y = np.array([learning_model.learning_window(xv) for xv in x])
plt.plot(x,y)
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.linspace"
] |
[((5713, 5786), 'numpy.array', 'np.array', (['[[0, 0, 1, 1, 1], [0, 0, 1, 0, 0], [0, 0, 1, 0, 1]]'], {'dtype': 'bool'}), '([[0, 0, 1, 1, 1], [0, 0, 1, 0, 0], [0, 0, 1, 0, 1]], dtype=bool)\n', (5721, 5786), True, 'import numpy as np\n'), ((5832, 5888), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {'dtype': 'float'}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]], dtype=float)\n', (5840, 5888), True, 'import numpy as np\n'), ((6220, 6246), 'numpy.linspace', 'np.linspace', (['(-15)', '(15)', '(1000)'], {}), '(-15, 15, 1000)\n', (6231, 6246), True, 'import numpy as np\n'), ((6318, 6332), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (6326, 6332), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6344, 6346), True, 'import matplotlib.pyplot as plt\n'), ((2882, 2911), 'numpy.array', 'np.array', (['weights'], {'dtype': 'bool'}), '(weights, dtype=bool)\n', (2890, 2911), True, 'import numpy as np\n'), ((2725, 2748), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (2733, 2748), True, 'import numpy as np\n'), ((3738, 3779), 'numpy.array', 'np.array', (['neuron_learnwindow_pre'], {'ndmin': '(2)'}), '(neuron_learnwindow_pre, ndmin=2)\n', (3746, 3779), True, 'import numpy as np\n'), ((3951, 3993), 'numpy.array', 'np.array', (['neuron_learnwindow_post'], {'ndmin': '(2)'}), '(neuron_learnwindow_post, ndmin=2)\n', (3959, 3993), True, 'import numpy as np\n'), ((1811, 1833), 'numpy.exp', 'np.exp', (['(-x / self.tau2)'], {}), '(-x / self.tau2)\n', (1817, 1833), True, 'import numpy as np\n'), ((1873, 1893), 'numpy.exp', 'np.exp', (['(x / self.tau)'], {}), '(x / self.tau)\n', (1879, 1893), True, 'import numpy as np\n')]
|
######################################################
# 多个分支、远端近端、多个频率下某个分支从近端到远端的突触权值分布数据
######################################################
from __future__ import division
from brian2 import *
import numpy as np
import matplotlib.pyplot as plt
import json
import copy as cp
import os, sys
mod_path = os.path.abspath(os.path.join('..','Model'))
sys.path.append(mod_path)
from oo_Parameters import *
from oo_equations_AMPAplast import *
from oo_initScripts import set_init_nrn, set_init_syn
from MakeNeuron_AMPAplast import *
from MorphologyData import *
start_scope()
######################################################
## Load Morpho
######################################################
#morph = '../Model/Branco2010_Morpho.swc'
#morph_data = BrancoData
#
morph = '../Model/Acker2008.swc'
morph_data = AckerData
synmodel = 'Chen' # synmodel = 'Chen' , synmodel = 'Clopath', synmodel = 'nonPlast'
print('突触可塑性模型:',synmodel)
expNr = 0 #实验编号,指两个任务对应的初始随机连接
loc1 = 'basal' #'tuft','apical','basal'
print('树突神经元的区域:',loc1)
titlestr = 'DataFBComps/'+loc1+'_'+str(expNr)+'_'
data1 = open(titlestr+'compsF'+'.txt','r')
data2 = open(titlestr+'compsB'+'.txt','r')
compFWR = json.load(data1)
compBWR = json.load(data2)
data1.close()
data2.close()
nrFWR = len(compFWR) # nr of compartments for forward running
nrBWR = len(compBWR) # nr of compartments for backward running
#print('nrFWR=',nrFWR,', ','nrBWR=',nrBWR)
#print('compFWR=',compFWR)
#print('compBWR=',compBWR)
allcomps = compFWR + compBWR
allcompsArr = np.array(allcomps)
if loc1 == 'tuft':
distComps = distal_Acker_tuft
proxComps = proximal_Acker_tuft
elif loc1 == 'apical':
distComps = distal_Acker_apical
proxComps = proximal_Acker_apical
elif loc1 == 'basal':
distComps = distal_Acker_basal
proxComps = proximal_Acker_basal
else:
print('Error!')
sys.exit(1)
branchNr = len(proxComps)
print('区域包含的分支数:',branchNr)
#homoloc = 'proximal' #'proximal','distal'
homolocs = ['proximal', 'distal'] #'proximal','distal'
#abranch = 0
abranchs = range(branchNr)
nr_clst = 2 # nr of synapses per compartment
print('每个分室的突触数:',nr_clst)
#signal_rate = 10*Hz # activation rate of the pools
signal_rates = np.array([100])*Hz #signal_rates = np.array([0.5,1,5,10,20,30,40,50,100])*Hz
t_stim = 100*ms # length of activation 50*ms
buffertime = 300*ms # rest time between two activations 150*ms
init_weight = 0.5 # initial weight
Theta_low = morph_data['thetalow']*mV
#####################################################
# Input Neuron
#####################################################
V_rest = 0.*mV
V_thresh = 0.5*mV
# Equations input neuron
eqs_in = '''
dv/dt = (V_rest-v)/ms: volt
v2 = rand()<(1.0*rate_v*dt) :1 (constant over dt)
rate_v :Hz
ds_trace/dt = -s_trace/taux :1
'''
#####################################################
# Create neuron
#####################################################
N_input = NeuronGroup(nr_clst*(nrFWR+nrBWR), eqs_in, threshold='v+v2*2*V_thresh>V_thresh',
reset='v=V_rest;s_trace+=x_reset*(taux/ms)',method='linear')#
test_model = BRIANModel(morph)
neuron = test_model.makeNeuron_Ca(morph_data)
neuron.run_regularly('Mgblock = 1./(1.+ exp(-0.062*vu2)/3.57)',dt=defaultclock.dt)
print('Neurons created...')
#####################################################
# create Synapses
#####################################################
if synmodel == 'Clopath':
Syn_1 = Synapses(N_input,neuron,
model= eq_1_plastAMPA,
on_pre = eq_2_plastAMPA,
method='heun'
)
elif synmodel == 'Chen':
Syn_1 = Synapses(N_input,neuron,
model= chen_1_plastAMPA,
on_pre = chen_2_plastAMPA,
method='heun'
)
else:
Syn_1 = Synapses(N_input,neuron,
model= eq_1_nonPlast,
on_pre = eq_2_nonPlast,
method='heun'
)
for rr in range(nrFWR):
Syn_1.connect(i=range(rr*nr_clst,(rr+1)*nr_clst),j=neuron[compFWR[rr]:compFWR[rr]+1])
for rr in range(nrBWR):
Syn_1.connect(i=range((rr+nrFWR)*nr_clst,(rr+nrFWR+1)*nr_clst),j=neuron[compBWR[rr]:compBWR[rr]+1])
print('Synapses created...')
print('------------------')
print('Simulating...')
for abranch in abranchs:
print('第几个分支:',abranch)
print('分支包含的分室数:',distComps[abranch]-proxComps[abranch]+1)
for homoloc in homolocs:
print('刺激位置:',homoloc)
for signal_rate in signal_rates:
print('刺激频率:',signal_rate)
#####################################################
# Initial Values
#####################################################
set_init_syn(Syn_1,init_weight)
nr_syn = len(Syn_1.wampa[:])
set_init_nrn(neuron,Theta_low)
N_input.v = V_rest
N_input.rate_v = 0*Hz
compset = list(range(proxComps[abranch],distComps[abranch]+1))
compsind = []
for acomp in compset:
inputind = allcomps.index(acomp)
compsind.append(inputind)
if homoloc == 'proximal':
homocomps = [compset[0]]
heterocomps = compset[1:]
else:
homocomps = [compset[-1]]
heterocomps = compset[:-1]
homosyns = []
for acomp in homocomps:
inputind = allcomps.index(acomp)
homosyns = homosyns + list(range(inputind*nr_clst,(inputind+1)*nr_clst))
#####################################################
# Run
#####################################################
#run(MEt0)
for iii in range(40):
N_input.rate_v[homosyns] = signal_rate
run(t_stim)
N_input.rate_v[homosyns] = 0*Hz
run(buffertime)
#####################################################
# Weight distribution in a branch
#####################################################
if synmodel == 'Chen':
wbranch = np.zeros(len(compset))
Erbranch = np.zeros(len(compset))
Efbranch = np.zeros(len(compset))
PEbranch = np.zeros(len(compset))
MEmaxbranch = np.zeros(len(compset))
MEdampbranch = np.zeros(len(compset))
wbranch_Free = np.zeros(len(compset))
Erbranch_Free = np.zeros(len(compset))
Efbranch_Free = np.zeros(len(compset))
PEbranch_Free = np.zeros(len(compset))
for jj in range(len(compset)):
compind = compsind[jj]
wbranch[jj] = np.mean(Syn_1.wampa[compind*nr_clst:(compind+1)*nr_clst])
Erbranch[jj] = np.mean(Syn_1.Erest[compind*nr_clst:(compind+1)*nr_clst])
Efbranch[jj] = np.mean(Syn_1.Efire[compind*nr_clst:(compind+1)*nr_clst])
PEbranch[jj] = np.mean(Syn_1.PE[compind*nr_clst:(compind+1)*nr_clst])
MEmaxbranch[jj] = np.mean(Syn_1.MEmax[compind*nr_clst:(compind+1)*nr_clst])
MEdampbranch[jj] = np.mean(Syn_1.MEdamp[compind*nr_clst:(compind+1)*nr_clst])
wbranch_Free[jj] = np.mean(Syn_1.wampa_Free[compind*nr_clst:(compind+1)*nr_clst])
Erbranch_Free[jj] = np.mean(Syn_1.Erest_Free[compind*nr_clst:(compind+1)*nr_clst])
Efbranch_Free[jj] = np.mean(Syn_1.Efire_Free[compind*nr_clst:(compind+1)*nr_clst])
PEbranch_Free[jj] = np.mean(Syn_1.PE_Free[compind*nr_clst:(compind+1)*nr_clst])
if homoloc == 'distal':
wbranch = wbranch[::-1]
Erbranch = Erbranch[::-1]
Efbranch = Efbranch[::-1]
PEbranch = PEbranch[::-1]
MEmaxbranch = MEmaxbranch[::-1]
MEdampbranch = MEdampbranch[::-1]
wbranch_Free = wbranch_Free[::-1]
Erbranch_Free = Erbranch_Free[::-1]
Efbranch_Free = Efbranch_Free[::-1]
PEbranch_Free = PEbranch_Free[::-1]
elif synmodel == 'Clopath':
wbranch = np.zeros(len(compset))
for jj in range(len(compset)):
compind = compsind[jj]
# wmean = np.mean(Syn_1.wampa[compind*nr_clst:(compind+1)*nr_clst])
# wbranch[jj] = wmean + 15.*(wmean-init_weight)
wbranch[jj] = np.mean(Syn_1.wampa[compind*nr_clst:(compind+1)*nr_clst])
if homoloc == 'distal':
wbranch = wbranch[::-1]
else:
wbranch = np.zeros(len(compset))
for jj in range(len(compset)):
compind = compsind[jj]
wbranch[jj] = np.mean(Syn_1.wampa[compind*nr_clst:(compind+1)*nr_clst])
if homoloc == 'distal':
wbranch = wbranch[::-1]
#------------------------------
titlestr = 'Data/'+synmodel+'_'+loc1+'_'+str(nr_clst)+'_'+str(init_weight)+'_'+str(abranch)+'_'+homoloc+'_'+str(signal_rate/Hz)
data0 = open(titlestr+'_wbranch.txt','w')
json.dump(wbranch.tolist(),data0)
data0.close()
if synmodel == 'Chen':
data0 = open(titlestr+'_wbranch_Free.txt','w')
json.dump(wbranch_Free.tolist(),data0)
data0.close()
data0 = open(titlestr+'_Erbranch.txt','w')
json.dump(Erbranch.tolist(),data0)
data0.close()
data0 = open(titlestr+'_Efbranch.txt','w')
json.dump(Efbranch.tolist(),data0)
data0.close()
data0 = open(titlestr+'_PEbranch.txt','w')
json.dump(PEbranch.tolist(),data0)
data0.close()
data0 = open(titlestr+'_MEmaxbranch.txt','w')
json.dump(MEmaxbranch.tolist(),data0)
data0.close()
data0 = open(titlestr+'_MEdampbranch.txt','w')
json.dump(MEdampbranch.tolist(),data0)
data0.close()
data0 = open(titlestr+'_Erbranch_Free.txt','w')
json.dump(Erbranch_Free.tolist(),data0)
data0.close()
data0 = open(titlestr+'_Efbranch_Free.txt','w')
json.dump(Efbranch_Free.tolist(),data0)
data0.close()
data0 = open(titlestr+'_PEbranch_Free.txt','w')
json.dump(PEbranch_Free.tolist(),data0)
data0.close()
sys.exit(1)
|
[
"sys.path.append",
"json.load",
"oo_initScripts.set_init_nrn",
"numpy.mean",
"numpy.array",
"oo_initScripts.set_init_syn",
"os.path.join",
"sys.exit"
] |
[((354, 379), 'sys.path.append', 'sys.path.append', (['mod_path'], {}), '(mod_path)\n', (369, 379), False, 'import os, sys\n'), ((1191, 1207), 'json.load', 'json.load', (['data1'], {}), '(data1)\n', (1200, 1207), False, 'import json\n'), ((1218, 1234), 'json.load', 'json.load', (['data2'], {}), '(data2)\n', (1227, 1234), False, 'import json\n'), ((1532, 1550), 'numpy.array', 'np.array', (['allcomps'], {}), '(allcomps)\n', (1540, 1550), True, 'import numpy as np\n'), ((10899, 10910), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10907, 10910), False, 'import os, sys\n'), ((326, 353), 'os.path.join', 'os.path.join', (['""".."""', '"""Model"""'], {}), "('..', 'Model')\n", (338, 353), False, 'import os, sys\n'), ((2210, 2225), 'numpy.array', 'np.array', (['[100]'], {}), '([100])\n', (2218, 2225), True, 'import numpy as np\n'), ((1861, 1872), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1869, 1872), False, 'import os, sys\n'), ((4800, 4832), 'oo_initScripts.set_init_syn', 'set_init_syn', (['Syn_1', 'init_weight'], {}), '(Syn_1, init_weight)\n', (4812, 4832), False, 'from oo_initScripts import set_init_nrn, set_init_syn\n'), ((4885, 4916), 'oo_initScripts.set_init_nrn', 'set_init_nrn', (['neuron', 'Theta_low'], {}), '(neuron, Theta_low)\n', (4897, 4916), False, 'from oo_initScripts import set_init_nrn, set_init_syn\n'), ((6878, 6941), 'numpy.mean', 'np.mean', (['Syn_1.wampa[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.wampa[compind * nr_clst:(compind + 1) * nr_clst])\n', (6885, 6941), True, 'import numpy as np\n'), ((6971, 7034), 'numpy.mean', 'np.mean', (['Syn_1.Erest[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.Erest[compind * nr_clst:(compind + 1) * nr_clst])\n', (6978, 7034), True, 'import numpy as np\n'), ((7064, 7127), 'numpy.mean', 'np.mean', (['Syn_1.Efire[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.Efire[compind * nr_clst:(compind + 1) * nr_clst])\n', (7071, 7127), True, 'import numpy as np\n'), ((7157, 7217), 'numpy.mean', 'np.mean', (['Syn_1.PE[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.PE[compind * nr_clst:(compind + 1) * nr_clst])\n', (7164, 7217), True, 'import numpy as np\n'), ((7250, 7313), 'numpy.mean', 'np.mean', (['Syn_1.MEmax[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.MEmax[compind * nr_clst:(compind + 1) * nr_clst])\n', (7257, 7313), True, 'import numpy as np\n'), ((7347, 7411), 'numpy.mean', 'np.mean', (['Syn_1.MEdamp[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.MEdamp[compind * nr_clst:(compind + 1) * nr_clst])\n', (7354, 7411), True, 'import numpy as np\n'), ((7445, 7513), 'numpy.mean', 'np.mean', (['Syn_1.wampa_Free[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.wampa_Free[compind * nr_clst:(compind + 1) * nr_clst])\n', (7452, 7513), True, 'import numpy as np\n'), ((7548, 7616), 'numpy.mean', 'np.mean', (['Syn_1.Erest_Free[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.Erest_Free[compind * nr_clst:(compind + 1) * nr_clst])\n', (7555, 7616), True, 'import numpy as np\n'), ((7651, 7719), 'numpy.mean', 'np.mean', (['Syn_1.Efire_Free[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.Efire_Free[compind * nr_clst:(compind + 1) * nr_clst])\n', (7658, 7719), True, 'import numpy as np\n'), ((7754, 7819), 'numpy.mean', 'np.mean', (['Syn_1.PE_Free[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.PE_Free[compind * nr_clst:(compind + 1) * nr_clst])\n', (7761, 7819), True, 'import numpy as np\n'), ((8731, 8794), 'numpy.mean', 'np.mean', (['Syn_1.wampa[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.wampa[compind * nr_clst:(compind + 1) * nr_clst])\n', (8738, 8794), True, 'import numpy as np\n'), ((9064, 9127), 'numpy.mean', 'np.mean', (['Syn_1.wampa[compind * nr_clst:(compind + 1) * nr_clst]'], {}), '(Syn_1.wampa[compind * nr_clst:(compind + 1) * nr_clst])\n', (9071, 9127), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String, Header
from tf2_msgs.msg import TFMessage
from nav_msgs.msg import Odometry
from sensor_msgs.msg import PointCloud2, Image, PointField
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import PoseStamped
from actionlib_msgs.msg import GoalID
from geometry_msgs.msg import Twist
import sensor_msgs.point_cloud2 as pc2
from tf.transformations import euler_from_quaternion, quaternion_from_euler, euler_matrix
import tf2_ros as tf2
import numpy as np
import cv2
# Apparently ROS thinks Python is C :/
import ctypes
import struct
from move import MoveMe
pc_callback_count = 0
pc_callback_rate = 3 #5
goal_coords = None
goal_pose = None
has_set_goal = False
seq = 0
seen_map_msg = None
curr_pose = None
pub_map_seen = None
pub_cancel = None
pub_goal_found = None
def map_callback(msg):
global seen_map_msg
global pub_map_seen
#print(dir(msg))
# Maybe this will create a new topic?
pub_map_seen = rospy.Publisher('map_seen', OccupancyGrid, queue_size=1)
origin_pos = msg.info.origin.position
res = msg.info.resolution
map_width = msg.info.width
map_height = msg.info.height
print("Map Callback")
print(origin_pos)
print(map_height, map_width)
data = np.array(msg.data)
data[np.where((data < 0) | (data > 80))] = 100
data[np.where(data!=100)] = 0
msg.data = tuple(data)
seen_map_msg = msg
while pub_map_seen.get_num_connections() < 1:
rospy.sleep(0.1)
pub_map_seen.publish(msg)
def callback(data):
print("-------------------------\n")
print(data.encoding)
w = data.width
h = data.height
print("Width: ", w, "Height: ", h)
print(len(data.data)/(w*h), data.step)
print(type(data.data[0]))
print(data.data[0], data.data[1], data.data[2])
rospy.sleep(1)
# I.e. decode data
def rgb_float_to_list(rgb_float):
s = struct.pack('>f', rgb_float)
i = struct.unpack('>l',s)[0]
# you can get back the float value by the inverse operations
pack = ctypes.c_uint32(i).value
r = int ((pack & 0x00FF0000)>> 16)
g = int ((pack & 0x0000FF00)>> 8)
b = int (pack & 0x000000FF)
return [r, g, b]
def callback_pc(ros_point_cloud):
global pc_callback_count
global pc_callback_rate
# print("Hello")
pc_callback_count = pc_callback_count +1
if pc_callback_count >= pc_callback_rate:
pc_callback_count = 0
pc_proc(ros_point_cloud)
def get_3D_transf_matrix(transf_msg):
#print("get_3D_transf_matrix_z")
x_translate = transf_msg.transform.translation.x
y_translate = transf_msg.transform.translation.y
z_translate = transf_msg.transform.translation.z
rot = transf_msg.transform.rotation
euler_rot = euler_from_quaternion([rot.x, rot.y, rot.z, rot.w])
euler_mat = euler_matrix(euler_rot[0], euler_rot[1], euler_rot[2])
transf_mat = euler_mat.copy()
transf_mat[0][-1] = x_translate
transf_mat[1][-1] = y_translate
transf_mat[2][-1] = z_translate
return transf_mat
def transf_sing(transf_mat, xyz_np):
transf_xyz = np.matmul(transf_mat, np.append(xyz_np, 1))
transf_xyz = np.delete(transf_xyz, -1)
return transf_xyz
# xy - np.array
# transform_msg
def apply_transform(xyz_np, transf_msg):
transf_mat = get_3D_transf_matrix(transf_msg)
if len(xyz_np.shape) == 1:
return transf_sing(transf_mat, xyz_np)
pass
def pc_proc(ros_point_cloud):
global goal_coords
global has_set_goal
global seen_map_msg
print("-------------------------\n")
width = ros_point_cloud.width
height = ros_point_cloud.height
tfBuffer = tf2.Buffer()
listener = tf2.TransformListener(tfBuffer)
tmp = list(pc2.read_points(ros_point_cloud))
all_xyz = list(pc2.read_points(ros_point_cloud, field_names = ("x","y","z")))
xyz_np = np.array(all_xyz)
#print("xyz_np.shape - ", xyz_np.shape)
xyz_np = xyz_np.reshape((height,width,3))
all_rgb_float = list(pc2.read_points(ros_point_cloud, field_names = ("rgb")))
all_rgb = [rgb_float_to_list(rgb_float[0]) for rgb_float in all_rgb_float]
np_rgb = np.array(all_rgb)
np_rgb = np_rgb.reshape((height,width,3))
rgb_lower = [125,40,0]
rgb_upper = [145,60,20]
orange_indices = np.where(np.all((np_rgb > rgb_lower) & (np_rgb < rgb_upper), axis=-1))
tot_pixels = width * height
tol = 0.005 # 0.5%
if orange_indices[0].shape[0]/float(tot_pixels) >= tol:
xyz_obj = xyz_np[orange_indices[0], orange_indices[1]]
obj_coords_mean = np.nanmean(xyz_obj.reshape((-1,3)), axis=0)
transform_msg = None
if not np.isnan(np.sum(obj_coords_mean)):
print("Target found at " + str(obj_coords_mean) + " :)")
# Cancel current path - /move_base/cancel
pub_cancel.publish(GoalID())
rospy.sleep(1)
try:
transform_msg = tfBuffer.lookup_transform("map","base_link", rospy.Time(0), rospy.Duration(0.1))
print("TRANSFORM MESSAGE: \n" + str(transform_msg))
transf_mat = get_3D_transf_matrix(transform_msg)
except (tf2.LookupException, tf2.ConnectivityException, tf2.ExtrapolationException):
print("transform_msg not found")
else:
print("Object found but coords cannot be determined")
if not has_set_goal and not np.isnan(np.sum(obj_coords_mean)) and (transform_msg is not None):
goal_coords_world = apply_transform(obj_coords_mean, transform_msg)
pub_goal_found.publish(Twist())
has_set_goal = True
move_me = MoveMe()
move_me.move_to_xyrot(goal_coords_world[0], goal_coords_world[1], 0)
else:
print("Target not found :(")
if seen_map_msg is not None and False:
print("MAP:")
map_height = seen_map_msg.info.height
map_width = seen_map_msg.info.width
map_res = seen_map_msg.info.resolution
map_origin_pos_obj = seen_map_msg.info.origin.position
map_origin_xy = [map_origin_pos_obj.x, map_origin_pos_obj.y]
print("map_res: ", map_res, str(map_origin_pos_obj))
print("map_origin_xy:")
print(map_origin_xy)
map_data = np.array(seen_map_msg.data).reshape((map_height,map_width))
xy_np = np.delete(xyz_np, 2,2)
xy_transf = xy_np - map_origin_xy
xy_transf = np.nan_to_num(xy_transf) # Replace NANs with zero
xy_transf = (xy_transf/map_res).round().astype(int)
xy_transf = xy_transf.reshape((-1,2))
#print(xy_transf)
print("map_data.shape: " + str(map_data.shape))
print("xy_transf.shape: " + str(xy_transf.shape))
# map_arr[index_arr[:,0], index_arr[:,1]]
map_data[xy_transf[:,0], xy_transf[:,1]] = 100
map_data = map_data.reshape(-1)
seen_map_msg.data = tuple(map_data)
pub_map_seen.publish(seen_map_msg)
def goal_cb(data):
#print(dir(data))
print(dir(data.pose))
pos = data.pose.position
orient = data.pose.orientation
must_print = True
if must_print:
print("---------------------------")
print("Goal data recieved")
print(data)
print("Position: " + str(pos))
print("Orient Quaternion: " + str(orient))
print("Orient Euler: " + str(euler_from_quaternion([orient.x, orient.y, orient.z, orient.w])))
print("---------------------------")
def move_to_xzrot(x,z,rot,pub_goal):
global seq
curr_pose = rospy.wait_for_message("/odom", Odometry)
goal_msg = PoseStamped()
odom_orient = curr_pose.twist.twist.angular
odom_coords = curr_pose.twist.twist.linear
y = odom_coords.y
tmp_orient = quaternion_from_euler(odom_orient.x, odom_orient.y, rot)
goal_msg.pose.orientation.x = tmp_orient[0]
goal_msg.pose.orientation.y = tmp_orient[1]
goal_msg.pose.orientation.z = tmp_orient[2]
goal_msg.pose.orientation.w = tmp_orient[3]
goal_msg.pose.position.x = x
goal_msg.pose.position.y = y
goal_msg.pose.position.z = z
goal_msg.header.frame_id = 'map'
now = rospy.get_rostime()
goal_msg.header.stamp.secs = now.secs
goal_msg.header.stamp.nsecs = now.nsecs
goal_msg.header.seq = seq
seq = seq + 1
pub_goal.publish(goal_msg)
return 0
def main():
global pub_cancel
global pub_goal_found
rospy.init_node('listener', anonymous=True)
pub_cancel = rospy.Publisher('/move_base/cancel', GoalID, queue_size=1)
pub_goal_found = rospy.Publisher('/goal_found', Twist, queue_size=1)
while pub_cancel.get_num_connections() < 1:
rospy.sleep(0.1)
rospy.Subscriber("/map", OccupancyGrid, map_callback)
rospy.Subscriber("/camera/depth/points", PointCloud2, callback_pc)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"rospy.Subscriber",
"numpy.nan_to_num",
"numpy.sum",
"rospy.Time",
"actionlib_msgs.msg.GoalID",
"sensor_msgs.point_cloud2.read_points",
"rospy.Duration",
"geometry_msgs.msg.PoseStamped",
"tf2_ros.TransformListener",
"struct.pack",
"numpy.append",
"rospy.init_node",
"struct.unpack",
"geometry_msgs.msg.Twist",
"move.MoveMe",
"numpy.delete",
"numpy.all",
"rospy.wait_for_message",
"rospy.get_rostime",
"rospy.Publisher",
"rospy.sleep",
"tf2_ros.Buffer",
"numpy.where",
"numpy.array",
"tf.transformations.quaternion_from_euler",
"tf.transformations.euler_from_quaternion",
"rospy.spin",
"ctypes.c_uint32",
"tf.transformations.euler_matrix"
] |
[((989, 1045), 'rospy.Publisher', 'rospy.Publisher', (['"""map_seen"""', 'OccupancyGrid'], {'queue_size': '(1)'}), "('map_seen', OccupancyGrid, queue_size=1)\n", (1004, 1045), False, 'import rospy\n'), ((1258, 1276), 'numpy.array', 'np.array', (['msg.data'], {}), '(msg.data)\n', (1266, 1276), True, 'import numpy as np\n'), ((1767, 1781), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (1778, 1781), False, 'import rospy\n'), ((1841, 1869), 'struct.pack', 'struct.pack', (['""">f"""', 'rgb_float'], {}), "('>f', rgb_float)\n", (1852, 1869), False, 'import struct\n'), ((2626, 2677), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[rot.x, rot.y, rot.z, rot.w]'], {}), '([rot.x, rot.y, rot.z, rot.w])\n', (2647, 2677), False, 'from tf.transformations import euler_from_quaternion, quaternion_from_euler, euler_matrix\n'), ((2691, 2745), 'tf.transformations.euler_matrix', 'euler_matrix', (['euler_rot[0]', 'euler_rot[1]', 'euler_rot[2]'], {}), '(euler_rot[0], euler_rot[1], euler_rot[2])\n', (2703, 2745), False, 'from tf.transformations import euler_from_quaternion, quaternion_from_euler, euler_matrix\n'), ((3014, 3039), 'numpy.delete', 'np.delete', (['transf_xyz', '(-1)'], {}), '(transf_xyz, -1)\n', (3023, 3039), True, 'import numpy as np\n'), ((3469, 3481), 'tf2_ros.Buffer', 'tf2.Buffer', ([], {}), '()\n', (3479, 3481), True, 'import tf2_ros as tf2\n'), ((3494, 3525), 'tf2_ros.TransformListener', 'tf2.TransformListener', (['tfBuffer'], {}), '(tfBuffer)\n', (3515, 3525), True, 'import tf2_ros as tf2\n'), ((3664, 3681), 'numpy.array', 'np.array', (['all_xyz'], {}), '(all_xyz)\n', (3672, 3681), True, 'import numpy as np\n'), ((3933, 3950), 'numpy.array', 'np.array', (['all_rgb'], {}), '(all_rgb)\n', (3941, 3950), True, 'import numpy as np\n'), ((6938, 6979), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/odom"""', 'Odometry'], {}), "('/odom', Odometry)\n", (6960, 6979), False, 'import rospy\n'), ((6993, 7006), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (7004, 7006), False, 'from geometry_msgs.msg import PoseStamped\n'), ((7136, 7192), 'tf.transformations.quaternion_from_euler', 'quaternion_from_euler', (['odom_orient.x', 'odom_orient.y', 'rot'], {}), '(odom_orient.x, odom_orient.y, rot)\n', (7157, 7192), False, 'from tf.transformations import euler_from_quaternion, quaternion_from_euler, euler_matrix\n'), ((7506, 7525), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (7523, 7525), False, 'import rospy\n'), ((7753, 7796), 'rospy.init_node', 'rospy.init_node', (['"""listener"""'], {'anonymous': '(True)'}), "('listener', anonymous=True)\n", (7768, 7796), False, 'import rospy\n'), ((7812, 7870), 'rospy.Publisher', 'rospy.Publisher', (['"""/move_base/cancel"""', 'GoalID'], {'queue_size': '(1)'}), "('/move_base/cancel', GoalID, queue_size=1)\n", (7827, 7870), False, 'import rospy\n'), ((7890, 7941), 'rospy.Publisher', 'rospy.Publisher', (['"""/goal_found"""', 'Twist'], {'queue_size': '(1)'}), "('/goal_found', Twist, queue_size=1)\n", (7905, 7941), False, 'import rospy\n'), ((8010, 8063), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/map"""', 'OccupancyGrid', 'map_callback'], {}), "('/map', OccupancyGrid, map_callback)\n", (8026, 8063), False, 'import rospy\n'), ((8067, 8133), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/camera/depth/points"""', 'PointCloud2', 'callback_pc'], {}), "('/camera/depth/points', PointCloud2, callback_pc)\n", (8083, 8133), False, 'import rospy\n'), ((8137, 8149), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (8147, 8149), False, 'import rospy\n'), ((1283, 1317), 'numpy.where', 'np.where', (['((data < 0) | (data > 80))'], {}), '((data < 0) | (data > 80))\n', (1291, 1317), True, 'import numpy as np\n'), ((1331, 1352), 'numpy.where', 'np.where', (['(data != 100)'], {}), '(data != 100)\n', (1339, 1352), True, 'import numpy as np\n'), ((1453, 1469), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (1464, 1469), False, 'import rospy\n'), ((1875, 1897), 'struct.unpack', 'struct.unpack', (['""">l"""', 's'], {}), "('>l', s)\n", (1888, 1897), False, 'import struct\n'), ((1970, 1988), 'ctypes.c_uint32', 'ctypes.c_uint32', (['i'], {}), '(i)\n', (1985, 1988), False, 'import ctypes\n'), ((2975, 2995), 'numpy.append', 'np.append', (['xyz_np', '(1)'], {}), '(xyz_np, 1)\n', (2984, 2995), True, 'import numpy as np\n'), ((3539, 3571), 'sensor_msgs.point_cloud2.read_points', 'pc2.read_points', (['ros_point_cloud'], {}), '(ros_point_cloud)\n', (3554, 3571), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((3590, 3651), 'sensor_msgs.point_cloud2.read_points', 'pc2.read_points', (['ros_point_cloud'], {'field_names': "('x', 'y', 'z')"}), "(ros_point_cloud, field_names=('x', 'y', 'z'))\n", (3605, 3651), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((3789, 3840), 'sensor_msgs.point_cloud2.read_points', 'pc2.read_points', (['ros_point_cloud'], {'field_names': '"""rgb"""'}), "(ros_point_cloud, field_names='rgb')\n", (3804, 3840), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((4072, 4132), 'numpy.all', 'np.all', (['((np_rgb > rgb_lower) & (np_rgb < rgb_upper))'], {'axis': '(-1)'}), '((np_rgb > rgb_lower) & (np_rgb < rgb_upper), axis=-1)\n', (4078, 4132), True, 'import numpy as np\n'), ((5861, 5884), 'numpy.delete', 'np.delete', (['xyz_np', '(2)', '(2)'], {}), '(xyz_np, 2, 2)\n', (5870, 5884), True, 'import numpy as np\n'), ((5937, 5961), 'numpy.nan_to_num', 'np.nan_to_num', (['xy_transf'], {}), '(xy_transf)\n', (5950, 5961), True, 'import numpy as np\n'), ((7991, 8007), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (8002, 8007), False, 'import rospy\n'), ((4578, 4592), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (4589, 4592), False, 'import rospy\n'), ((5252, 5260), 'move.MoveMe', 'MoveMe', ([], {}), '()\n', (5258, 5260), False, 'from move import MoveMe\n'), ((4411, 4434), 'numpy.sum', 'np.sum', (['obj_coords_mean'], {}), '(obj_coords_mean)\n', (4417, 4434), True, 'import numpy as np\n'), ((4564, 4572), 'actionlib_msgs.msg.GoalID', 'GoalID', ([], {}), '()\n', (4570, 4572), False, 'from actionlib_msgs.msg import GoalID\n'), ((5206, 5213), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (5211, 5213), False, 'from geometry_msgs.msg import Twist\n'), ((5790, 5817), 'numpy.array', 'np.array', (['seen_map_msg.data'], {}), '(seen_map_msg.data)\n', (5798, 5817), True, 'import numpy as np\n'), ((4668, 4681), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (4678, 4681), False, 'import rospy\n'), ((4683, 4702), 'rospy.Duration', 'rospy.Duration', (['(0.1)'], {}), '(0.1)\n', (4697, 4702), False, 'import rospy\n'), ((5046, 5069), 'numpy.sum', 'np.sum', (['obj_coords_mean'], {}), '(obj_coords_mean)\n', (5052, 5069), True, 'import numpy as np\n'), ((6768, 6831), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[orient.x, orient.y, orient.z, orient.w]'], {}), '([orient.x, orient.y, orient.z, orient.w])\n', (6789, 6831), False, 'from tf.transformations import euler_from_quaternion, quaternion_from_euler, euler_matrix\n')]
|
#!/usr/bin/env python
"""
Library module that contains functions common to all our FARM-based programs for evaluation, training, application.
"""
import sys
import os.path
import datetime
from sklearn.metrics import confusion_matrix
import torch
import statistics
import numbers
import logging
import time
from collections import defaultdict
from pathlib import Path
import numpy as np
from orderedattrdict import AttrDict
from argparse import ArgumentParser
import mlflow
import signal
import toml
import json
import farm.utils
import farm.infer
from farm.infer import Inferencer
import farm.modeling.tokenization
import farm.data_handler.processor
import farm.data_handler.data_silo
import farm.modeling.optimization
from farm.data_handler.data_silo import DataSiloForCrossVal, DataSiloForHoldout
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.language_model import LanguageModel
# from farm.train import Trainer, EarlyStopping
from farm_tools.train_modified import Trainer, EarlyStopping
from farm_tools.farm_eval import OurEvaluator
# from farm.eval import Evaluator
from farm.evaluation.metrics import registered_metrics
from farm_tools.utils import init_logger
from farm.visual.ascii.images import BUSH_SEP
from farm_tools.farm_tasks import *
from farm_tools.farm_optsched import *
from farm_tools.farm_utils import str2bool
logger = init_logger()
def install_signal_handler(mlf_logger):
"""
Install the SIGINT signal handler which will log the "ABORT" parameter to the FARM MLFlowLogger
(not directly to mlflow so we do NOT log if logging is disabled).
:param mlf_logger: FARM MLFlowLogger instance
:return:
"""
def signal_handler(sig, frame):
logger.error("Control-C / SIGINT received, aborting!!!!")
mlf_logger.log_params({"ABORTED": "SIGINT"})
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
DEFAULT_CONFIG_BASIC = AttrDict(dict(
seed=42,
n_gpu=1,
use_cuda=None,
use_amp=False,
do_lower_case=False,
text_column="text",
batch_size=32,
max_seq=64,
deterministic="False"
))
DEFAULT_CONFIG_TRAIN = AttrDict(dict(
label_column="target",
dev_splt=0.1,
dev_stratification=False,
grad_acc=1,
evaluate_every=10,
max_epochs=20,
dropout=0.2,
lrate=0.5e-5,
es_patience=10,
es_metric="f1_micro",
es_mode="max",
es_min_evals=1,
es_hd=0,
losses_alpha=0.5,
fts="FTSingleClassification",
fts_cfg=None, # multiple values of the form key=val
fos="FOSDefault",
fos_cfg=None,
hd_dim=768,
hd0_cfg=None,
hd1_cfg=None,
hd2_cfg=None,
hd3_cfg=None,
hd4_cfg=None,
hd5_cfg=None,
hd6_cfg=None,
hd7_cfg=None,
hd8_cfg=None,
hd9_cfg=None,
))
DEFAULT_CONFIG_APPLY = AttrDict(dict(
text_column="text",
label_column="prediction",
prob_column="prob",
batch_size=32,
max_seq=None,
num_processes=1,
n_gpu=1, # NEEDED???
do_lower_case=False,
))
DEFAULT_CONFIG_ESTIMATE = AttrDict(dict(
eval_method="xval", # possible values: xval, holdout, onfile
xval_folds=10,
holdout_repeats=5,
holdout_train=0.7,
eval_stratification=False,
onfile_file="NEEDED"
))
DEFAULT_CONFIG_HSEARCH = AttrDict(dict(
halg="grid",
beamsize=3,
halg_random_n=20,
est_var="head0_f1_macro_mean",
est_cmp="max",
))
def update_config(toupdate, updatewith):
for k, v in updatewith.items():
if v is not None:
toupdate[k] = v
return toupdate
def load_config(fpath):
"""
Load configuration from fpath and return as AttrDict.
:param fpath: configuration file path, either TOML or JSON file
:return: configuration object
"""
if fpath.endswith(".toml"):
data = toml.load(fpath)
elif fpath.endswith(".json"):
with open(fpath, "rt", encoding="utf-8") as infp:
data = json.load(infp)
else:
raise Exception(f"Cannot load config file {fpath}, must be .toml or json file")
return AttrDict(data)
def getargs(parser, cfg):
args = parser.parse_args()
if args.cfg:
cfg_add = load_config(args.cfg)
update_config(cfg, cfg_add)
update_config(cfg, vars(args))
if cfg.get("labels") is None:
cfg["label_list"] = ["0", "1"]
else:
cfg["label_list"] = cfg.labels.split(",")
logger.info(f"Effective configuration: {cfg}")
return cfg
def argparser_basic(parser=None, cfg=None, ignore_runname=False):
"""
Creates the initial parser and config data structure for most applications.
We extend the parser and the config with whatever additional bits we need before actually
parsing the arguments.
:return: a parser and a config data structure
"""
if parser is None:
parser = ArgumentParser()
if cfg is None:
cfg = AttrDict()
DF = DEFAULT_CONFIG_BASIC
cfg.update(DF)
if not ignore_runname:
parser.add_argument("--runname", type=str, required=True,
help="Experiment name. Files are stored in directory {runname}-{datetime}")
parser.add_argument("--infile", type=str, required=True,
help="Path to input file")
parser.add_argument("--cfg", type=str,
help="Path to configuration file")
parser.add_argument("--seed", type=int,
help=f"Random seed ({DF.seed})")
parser.add_argument("--n_gpu", type=int,
help=f"Number of GPUs, if GPU is to be used ({DF.n_gpu}")
parser.add_argument("--use_cuda", default=None, type=str2bool,
help="If GPUs should be used, if not specified, determined from setup")
parser.add_argument("--use_amp", type=str2bool,
help=f"Use AMP ({DF.use_amp}")
parser.add_argument("--do_lower_case", type=str2bool,
help=f"Lower case tokens ({DF.do_lower_case})")
parser.add_argument("--text_column", type=str,
help=f"Name of in/out text column ({DF.text_column})")
parser.add_argument("--batch_size", type=int,
help=f"Batch size ({DF.batch_size})")
parser.add_argument("--max_seq", type=int,
help=f"Maximum sequence length (whatever the trainer used)")
parser.add_argument("--deterministic", type=str2bool, default="False",
help=f"Use deterministic (slower) code ({DF.deterministic})")
parser.add_argument("-d", action="store_true", help="Enable debug mode")
return parser, cfg
def argparser_estimate(parser=None, cfg=None):
parser, cfg = argparser_train(parser, cfg)
DF = DEFAULT_CONFIG_ESTIMATE
if parser is None:
parser = ArgumentParser()
if cfg is None:
cfg = AttrDict()
cfg.update(DF)
parser.add_argument("--eval_method", type=str,
help=f"Evaluation method, one of xval, holdout ({DF.eval_method})")
parser.add_argument("--xval_folds", type=int,
help=f"Number of folds for xval ({DF.xval_folds})")
parser.add_argument("--holdout_repeats", type=int,
help=f"Number of repetitions for holdout estimation ({DF.holdout_repeats})")
parser.add_argument("--holdout_train", type=float,
help=f"Portion used for training for holdout estimation ({DF.holdout_train})")
parser.add_argument("--eval_stratification", type=str2bool,
help=f"Use stratified samples for the evaluation splits? ({DF.eval_stratification})")
return parser, cfg
def argparser_hsearch(parser=None, cfg=None):
parser, cfg = argparser_estimate(parser, cfg)
DF = DEFAULT_CONFIG_HSEARCH
if parser is None:
parser = ArgumentParser()
if cfg is None:
cfg = AttrDict()
cfg.update(DF)
parser.add_argument("--hcfg", type=str, required=True,
help="TOML configuration file for the hyperparameter search (required)")
parser.add_argument("--outpref", type=str, required=True,
help=f"Output prefix for the files written for the hsearch run")
parser.add_argument("--halg", type=str, default=DF.halg,
help=f"Search algorithm, one of grid, random, greedy, beam ({DF.halg})")
parser.add_argument("--halg_rand_n", type=int, default=DF.halg_random_n,
help=f"Number of random runs for halg=random ({DF.halg_random_n})")
parser.add_argument("--beamsize", type=str, default=DF.beamsize,
help=f"Size of beam for halg=beam ({DF.beamsize})")
parser.add_argument("--est_var", type=str, default=DF.est_var,
help=f"Estimation variable to use for sorting/searching ({DF.est_var})")
parser.add_argument("--est_cmp", type=str, default=DF.est_cmp,
help=f"Comparison to use for optimizing est_var, min or max ({DF.est_cmp})")
return parser, cfg
def argparser_train(parser=None, cfg=None):
parser, cfg = argparser_basic(parser, cfg)
DF = DEFAULT_CONFIG_TRAIN
if parser is None:
parser = ArgumentParser()
if cfg is None:
cfg = AttrDict()
cfg.update(DF)
parser.add_argument("--label_column", type=str,
help=f"Name of label column ({DF.label_column})")
parser.add_argument("--dev_splt", type=float,
help=f"Development set proportion ({DF.dev_splt})")
parser.add_argument("--grad_acc", type=int,
help=f"Gradient accumulation steps ({DF.grad_acc})")
parser.add_argument("--lm_dir", type=str,
help="Load LM from that directory instead of default")
parser.add_argument("--lm_name", type=str,
help="Load LM from that known named model (will download and cache model!)")
parser.add_argument("--evaluate_every", type=float,
help=f"Evaluate every this many batches ({DF.evaluate_every})")
parser.add_argument("--max_epochs", type=int,
help=f"Maximum number of epochs ({DF.max_epochs})")
parser.add_argument("--dropout", type=float,
help=f"Dropout rate ({DF.dropout})")
parser.add_argument("--lrate", type=float,
help=f"Learning rate ({DF.lrate})")
parser.add_argument("--es_patience", type=int,
help=f"Early stopping patience ({DF.es_patience})")
parser.add_argument("--es_metric", type=str,
help=f"Early stopping metric ({DF.es_metric})")
parser.add_argument("--es_mode", type=str,
help=f"Early stopping mode ({DF.es_mode})")
parser.add_argument("--es_min_evals", type=int,
help=f"Early stopping minimum evaluation steps ({DF.es_min_evals})")
parser.add_argument("--es_hd", type=int,
help=f"Early stopping head number to use ({DF.es_hd})")
parser.add_argument("--labels", type=str, default=None,
help=f"Comma separated list of labels, if missing, assume '0' and '1'")
parser.add_argument("--dev_stratification", type=str2bool,
help=f"Use stratified dev set splits? ({DF.dev_stratification})")
parser.add_argument("--fts", type=str,
help=f"FarmTasks class to use ({DF.fts})")
parser.add_argument("--fts_cfg", nargs='*', default=[],
help=f"FarmTasks configuration settings of the form parm=value")
parser.add_argument("--fos", type=str,
help=f"FarmOptSched class to use ({DF.fos})")
parser.add_argument("--fos_cfg", nargs='*', default=[],
help=f"Farm optimizer/scheduler configuration settings of the form parm=value")
parser.add_argument("--hd_dim", type=int, default=DF.hd_dim,
help=f"Dimension of the LM output, i.e. the head input ({DF.hd_dim})")
parser.add_argument("--hd0_cfg", nargs='*', default=[],
help=f"Head 0 config parameters of the form parm=value")
parser.add_argument("--hd1_cfg", nargs='*', default=[],
help=f"Head 1 config parameters of the form parm=value")
parser.add_argument("--hd2_cfg", nargs='*', default=[],
help=f"Head 2 config parameters of the form parm=value")
parser.add_argument("--hd3_cfg", nargs='*', default=[],
help=f"Head 2 config parameters of the form parm=value")
parser.add_argument("--hd4_cfg", nargs='*', default=[],
help=f"Head 2 config parameters of the form parm=value")
parser.add_argument("--losses_alpha", type=float, default=DF.losses_alpha,
help=f"Alpha for loss aggregation (weight of head 0, weight for head 1 is 1-alpha)")
return parser, cfg
def argparser_apply(parser=None, cfg=None):
parser, cfg = argparser_basic(parser, cfg, ignore_runname=True)
DF = DEFAULT_CONFIG_APPLY
if parser is None:
parser = ArgumentParser()
if cfg is None:
cfg = AttrDict()
cfg.update(DF)
parser.add_argument("--outfile", type=str, required=True,
help="Path to output TSV file")
parser.add_argument("--modeldir", type=str, required=True,
help="Path to directory where the model is stored")
# TODO: these should come from the task name maybe?
parser.add_argument("--label_column", type=str,
help=f"Name of added label column ({DF.label_column})")
parser.add_argument("--prob_column", type=str,
help=f"Name of added probability column ({DF.prob_column})")
parser.add_argument("--num_processes", default=None,
help=f"Number of processes to use ({DF.num_processes})")
return parser, cfg
def init_farm(cfg, logger=logger):
if cfg.get("use_cuda") is None:
use_cuda = torch.cuda.is_available()
else:
use_cuda = cfg.use_cuda
device, n_gpu = farm.utils.initialize_device_settings(use_cuda=use_cuda)
if cfg.get("deterministic", False):
farm.utils.set_all_seeds(seed=cfg.get("seed", 41), deterministic_cudnn=True)
# torch.set_deterministic(True)
torch.use_deterministic_algorithms(True)
else:
farm.utils.set_all_seeds(seed=cfg.get("seed", 41), deterministic_cudnn=False)
# torch.set_deterministic(False)
torch.use_deterministic_algorithms(False)
device = str(device) # this should give cuda or cpu
if use_cuda:
n_gpu = max(n_gpu, cfg.n_gpu)
cfg.n_gpu = n_gpu
cfg.device = device
cfg.cuda_used = use_cuda
logger.info("Device={}, nGPU={}".format(device, n_gpu))
mlflow.log_params({"device": str(device)})
mlflow.log_params({"n_gpu": str(n_gpu)})
import train_modified
import farm_eval
train_modified.looger = logger
farm_eval.logger = logger
# farm.train.logger = logger
# farm.eval.logger = logger
farm.utils.logger = logger
farm.infer.logger = logger
def log_results(results, name, steps=None, logging=True, print=True, num_fold=None):
assert steps is not None or num_fold is not None
use_steps = steps or num_fold
# Print a header
header = "\n\n"
header += BUSH_SEP + "\n"
header += "***************************************************\n"
if num_fold is not None:
header += f"***** EVALUATION {name} | FOLD: {num_fold} *****\n"
else:
header += f"***** EVALUATION {name} *****\n"
header += "***************************************************\n"
header += BUSH_SEP + "\n"
logger.info(header)
for head_num, head in enumerate(results):
taskname = head["task_name"]
logger.info(f"\n _________ head {head_num} of {len(results)}: {taskname} _________")
for metric_name, metric_val in head.items():
# log with ML framework (e.g. Mlflow)
if logging:
if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
if isinstance(metric_val, numbers.Number):
mlflow.log_metrics(
metrics={
f"{name}_{metric_name}_{head['task_name']}": metric_val
},
step=use_steps,
)
# print via standard python logger
if print:
if metric_name == "report":
if isinstance(metric_val, str) and len(metric_val) > 8000:
metric_val = metric_val[:7500] + "\n ............................. \n" + metric_val[-500:]
logger.info("{}: \n {}".format(metric_name, metric_val))
else:
if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
logger.info("{} {}: {}".format(taskname, metric_name, metric_val))
def train_model(silo, save_dir, lang_model_dir, cfg=None):
language_model = LanguageModel.load(lang_model_dir)
# TODO: use our own task classes here to create one or more prediction heads to use
ft = cfg["_fts"] # the actual farm tasks instance (not the name)
prediction_heads = ft.get_heads(silo)
model = AdaptiveModel(
language_model=language_model,
prediction_heads=prediction_heads,
embeds_dropout_prob=cfg.dropout,
lm_output_types=["per_sequence"] * len(prediction_heads),
loss_aggregation_fn=ft.get_loss_aggregation_fn(silo),
device=cfg.device)
logger.info(f"Model used for training:\n{model}")
logger.info(f"Number of named model parameters: {len(list(model.named_parameters()))}")
logger.info(f"Number of all model parameters: {len(list(model.parameters()))}")
if cfg.d:
for name, param in model.named_parameters():
if param.requires_grad:
logger.info(f"PARAMETER name={name}, shape={param.data.shape}")
else:
logger.info(f"NOGRAD: name={name}, shape={param.data.shape}")
# Create an optimizer, this was the original code
# model, optimizer, lr_schedule = initialize_optimizer(
# model=model,
# learning_rate=cfg.lrate,
# device=cfg.device,
# n_batches=len(silo.loaders["train"]),
# n_epochs=cfg.max_epochs,
# use_amp=cfg.use_amp,
# optimizer_opts=None,
# schedule_opts={"name": "CosineWarmupWithRestarts", "warmup_proportion": 0.4},
# grad_acc_steps=cfg.grad_acc,
# )
# use our own optimizer initializer instead:
logger.info("Create optimizer/scheduler")
fosname = cfg["fos"]
clazz = globals().get(fosname)
if clazz is None:
raise Exception(f"FarmOptSched class {fosname} unknown")
fos = clazz(
model=model,
n_batches=len(silo.loaders["train"]),
n_epochs=cfg.max_epochs,
device=cfg.device,
learning_rate=cfg.lrate,
grad_acc_steps=cfg.grad_acc,
cfg=cfg
)
logger.info(f"Using Farm OptSched Instance: {fos} of type {type(fos)}")
model, optimizer, lr_schedule = fos.get_optsched()
if cfg.d:
logger.info(f"Created optimizer: {optimizer}")
logger.info(f"Created scheduler: {lr_schedule}")
earlystopping = EarlyStopping(
head=cfg.es_hd,
metric=cfg.es_metric,
mode=cfg.es_mode,
min_evals=cfg.es_min_evals,
save_dir=os.path.join(save_dir), # where to save the best model
patience=cfg.es_patience # number of evaluations to wait for improvement before terminating the training
)
# if evaluate_every is < 0, interpret abs(evaluate_every) as number of epochs
# for this we first need to find the number of batches per epoch
eval_every = cfg.evaluate_every
steps4epoch = len(silo.get_data_loader("train"))
if eval_every < 0:
nepochs = abs(eval_every)
eval_every = int(nepochs * steps4epoch)
else:
eval_every = int(eval_every)
neval4epoch = steps4epoch/eval_every
logger.info(f"Evaluating every {eval_every} steps, {steps4epoch} steps, {neval4epoch} total per epoch")
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=silo,
epochs=cfg.max_epochs,
n_gpu=cfg.n_gpu,
lr_schedule=lr_schedule,
evaluate_every=eval_every,
device=cfg.device,
grad_acc_steps=cfg.grad_acc,
early_stopping=earlystopping,
evaluator_test=False,
disable_tqdm=True,
)
# train it
trainer.train()
return trainer.model
def run_xval(silo, save_dir, lang_model_dir, cfg):
# Load one silo for each fold in our cross-validation
save_dir = Path(save_dir)
silos = DataSiloForCrossVal.make(silo,
n_splits=cfg.xval_folds,
stratification=cfg.eval_stratification,
sets=["train", "dev"])
for silo in silos:
sz_train = len(silo.data.get("train", 0))
sz_dev = len(silo.data.get("dev", 0))
sz_test = len(silo.data.get("test", 0))
logger.info("XVAL SPLIT SET SIZE: {}+{}+{}={}".format(
sz_train, sz_dev, sz_test, sz_train+sz_dev+sz_test
))
# for each fold, run the whole training, earlystopping to get a model, then evaluate the model
# on the test set of each fold
# Remember all the results for overall metrics over all predictions of all folds and for averaging
allresults = []
all_preds = None # for each head, the list of all predictions over all folds
all_labels = None # for each head the list of all labels over all folds
all_preferred_metrics = []
all_train_times = []
all_eval_times = []
save_dir_root = Path(save_dir)
if not save_dir_root.exists():
raise Exception("Model saving path must exist: {}".format(save_dir_root))
if not save_dir_root.is_dir():
raise Exception("Model saving path must be a directory: {}".format(save_dir_root))
for num_fold, silo in enumerate(silos):
save_to = save_dir_root.joinpath("fold{}".format(num_fold))
if not save_to.exists():
save_to.mkdir()
mlflow.start_run(run_name=f"fold-{num_fold + 1}-of-{len(silos)}", nested=True)
logger.info(f"############ Crossvalidation: Fold {num_fold + 1} of {len(silos)} ############")
tmptime = time.perf_counter()
model = train_model(silo, save_to, lang_model_dir, cfg=cfg)
all_train_times.append(time.perf_counter()-tmptime)
# do eval on test set here (and not in Trainer),
# so that we can easily store the actual preds and labels for a "global" eval across all folds.
evaluator_test = OurEvaluator(
data_loader=silo.get_data_loader("test"),
tasks=silo.processor.tasks,
device=cfg.device,
pass_instids=True,
outdir=save_dir
)
# evaluator_test = Evaluator(
# data_loader=silo.get_data_loader("test"),
# tasks=silo.processor.tasks,
# device=cfg.device,
# )
tmptime = time.perf_counter()
result = evaluator_test.eval(model, return_preds_and_labels=True, foldnr=num_fold)
all_eval_times.append(time.perf_counter() - tmptime)
log_results(result, "Fold", num_fold=num_fold)
# for now we just calculate the average over all preferred metrics for all heads to get
# the value for each fold.
metrics4heads = [h.metric for h in model.prediction_heads]
# NOTE: the metrics we allow here are ONLY registered metrics which refer to metrics classes!
# So we replace the name with the actual class instances
metrics4heads = [registered_metrics[m] for m in metrics4heads]
# now calculate the preferred metrics for each head
metricvals4heads = [metrics4heads[i].preferred(r) for i, r in enumerate(result)]
all_preferred_metrics.append(statistics.mean(metricvals4heads))
allresults.append(result)
if all_preds is None:
all_preds = [[] for _ in result]
all_labels = [[] for _ in result]
for i, r in enumerate(result):
all_preds[i].extend(r.get("preds"))
all_labels[i].extend(r.get("labels"))
if cfg.device == "cuda":
logger.info("CUDA: trying to release memory, current {}".format(
torch.cuda.memory_allocated() / 1024 / 1024
))
logger.info("(before) CUDA memory allocated: {}".format(
torch.cuda.memory_allocated() / 1024 / 1024))
logger.info("(before) CUDA max memory allocated: {}".format(
torch.cuda.max_memory_allocated() / 1024 / 1024))
logger.info("(before) CUDA memory cached: {}".format(
torch.cuda.memory_cached() / 1024 / 1024))
logger.info("(before) CUDA max memory cached: {}".format(
torch.cuda.max_memory_cached() / 1024 / 1024))
model.cpu() # MAYBE NOT NECESSARY BUT NOT SURE
torch.cuda.empty_cache()
logger.info("(after) CUDA memory allocated: {}".format(
torch.cuda.memory_allocated() / 1024 / 1024))
logger.info("(after) CUDA max memory allocated: {}".format(
torch.cuda.max_memory_allocated() / 1024 / 1024))
logger.info("(after) CUDA memory cached: {}".format(
torch.cuda.memory_cached() / 1024 / 1024))
logger.info("(after) CUDA max memory cached: {}".format(
torch.cuda.max_memory_cached() / 1024 / 1024))
with open(str(save_to.joinpath("results.json")), "wt") as fp:
json.dump(result, fp)
mlflow.end_run()
# Save the per-fold results to json for a separate, more detailed analysis
with open(str(save_dir_root.joinpath("results-perfold.json")), "wt") as fp:
json.dump(allresults, fp)
# find the fold with the best average preferred metric value
best_fold_idx = np.argmax(all_preferred_metrics)
logger.info(f"Best fold index: {best_fold_idx}")
mlflow.log_params({"XVAL_BEST_FOLD_IDX": best_fold_idx})
mlflow.log_params({"XVAL_BEST_FOLD_METRIC": all_preferred_metrics[best_fold_idx]})
# the following is a list that contains one defaultdict(list) per head.
# each defaultdict(list) will have all the values for that head and metric from allresults
xval_metric_lists_per_head = [defaultdict(list) for _ in allresults[0]]
for resultsperhead in allresults:
assert len(xval_metric_lists_per_head) == len(resultsperhead)
for i, res in enumerate(resultsperhead):
for name in res.keys():
if name not in ["preds", "labels"] and \
not name.startswith("_") and \
isinstance(res[name], numbers.Number):
xval_metric_lists_per_head[i][name].append(res[name])
# now collapse each of the lists into its mean, and add a stdev and var metric
xval_metric_per_head = [{} for _ in xval_metric_lists_per_head]
for i, res in enumerate(xval_metric_lists_per_head):
newres = xval_metric_per_head[i]
newres["dirname"] = str(save_dir_root)
# newres["report"] = allresults[0][i].get("report", None)
newres["task_name"] = allresults[0][i].get("task_name", "UNKNOWN TASKNAME ???")
newres["time_train_mean"] = statistics.mean(all_train_times)
newres["time_eval_mean"] = statistics.mean(all_eval_times)
newres["time_total"] = sum(all_train_times)+sum(all_eval_times)
for name in res.keys():
values = res[name]
vmean = statistics.mean(values)
newres[name+"_mean"] = vmean
newres[name+"_min"] = min(values)
newres[name+"_max"] = max(values)
if len(values) > 1:
vstdev = statistics.stdev(values)
vvar = statistics.variance(values)
newres[name + "_stdev"] = vstdev
newres[name + "_var"] = vvar
log_results(xval_metric_per_head, "XVAL", steps=0)
# add the confusion matrices per head
for i, d in enumerate(xval_metric_per_head):
# automatically determine the label list for the head
tmplabels = set()
tmplabels.update(all_labels[i])
tmplabels.update(all_preds[i])
tmplabels = list(tmplabels)
tmplabels.sort()
conf_matrix = confusion_matrix(all_labels[i], all_preds[i], labels=tmplabels)
conf_matrix = conf_matrix.tolist()
d["confusion_matrix"] = conf_matrix
d["confusion_labels"] = tmplabels
# log overall confusions matrix
logger.info(f"Confusion matrix for head {i}:")
l = " ".join(tmplabels)
logger.info(f" {l}")
for j, row in enumerate(conf_matrix):
r = " ".join([str(tmp) for tmp in row])
logger.info(f"{tmplabels[j]} {r}")
with open(str(save_dir_root.joinpath("results-all.json")), "wt") as fp:
json.dump(xval_metric_per_head, fp)
return xval_metric_per_head
def run_holdout(silo, save_dir, lang_model_dir, cfg):
# Load one silo for each holdout repition
save_dir = Path(save_dir)
silos = DataSiloForHoldout.make(silo,
n_splits=cfg.holdout_repeats,
stratification=cfg.eval_stratification,
random_state=cfg.seed,
train_split=cfg.holdout_train,
sets=["train", "dev"])
for silo in silos:
sz_train = len(silo.data.get("train", 0))
sz_dev = len(silo.data.get("dev", 0))
sz_test = len(silo.data.get("test", 0))
logger.info("HOLDOUT SPLIT SET SIZE: train={} dev={} test={} all={}".format(
sz_train, sz_dev, sz_test, sz_train+sz_dev+sz_test
))
# tmp_train = silo.data.get("train")
# tmp_test = silo.data.get("test")
# tmp_dev = silo.data.get("dev")
# logger.info(f"!!!!DEBUG first instance from train {list(tmp_train)[0]}")
# logger.info(f"!!!!DEBUG last instance from train {list(tmp_train)[-1]}")
# logger.info(f"!!!!DEBUG first instance from test {list(tmp_test)[0]}")
# logger.info(f"!!!!DEBUG last instance from test {list(tmp_test)[-1]}")
# logger.info(f"!!!!DEBUG first instance from dev {list(tmp_dev)[0]}")
# logger.info(f"!!!!DEBUG last instance from dev {list(tmp_dev)[-1]}")
# for each repetition, run the whole training, earlystopping to get a model, then evaluate the model
# on the test set of each fold
# Remember all the results for overall metrics over all predictions of all folds and for averaging
allresults = []
all_preds = None # for each head, the list of all predictions over all folds
all_labels = None # for each head the list of all labels over all folds
all_preferred_metrics = []
all_train_times = []
all_eval_times = []
save_dir_root = Path(save_dir)
if not save_dir_root.exists():
raise Exception("Model saving path must exist: {}".format(save_dir_root))
if not save_dir_root.is_dir():
raise Exception("Model saving path must be a directory: {}".format(save_dir_root))
for num_fold, silo in enumerate(silos):
save_to = save_dir_root.joinpath("fold{}".format(num_fold))
if not save_to.exists():
save_to.mkdir()
mlflow.start_run(run_name=f"fold-{num_fold + 1}-of-{len(silos)}", nested=True)
logger.info(f"############ Holdout estimation: Split {num_fold + 1} of {len(silos)} ############")
tmptime = time.perf_counter()
model = train_model(silo, save_to, lang_model_dir, cfg=cfg)
all_train_times.append(time.perf_counter()-tmptime)
# do eval on test set here (and not in Trainer),
# so that we can easily store the actual preds and labels for a "global" eval across all folds.
evaluator_test = OurEvaluator(
data_loader=silo.get_data_loader("test"),
tasks=silo.processor.tasks,
device=cfg.device,
pass_instids=True,
outdir=save_dir,
)
# evaluator_test = Evaluator(
# data_loader=silo.get_data_loader("test"),
# tasks=silo.processor.tasks,
# device=cfg.device,
# )
tmptime = time.perf_counter()
result = evaluator_test.eval(model, return_preds_and_labels=True, foldnr=num_fold)
all_eval_times.append(time.perf_counter()-tmptime)
log_results(result, "Split", num_fold=num_fold)
# for now we just calculate the average over all preferred metrics for all heads to get
# the value for each fold.
metrics4heads = [h.metric for h in model.prediction_heads]
# NOTE: the metrics we allow here are ONLY registered metrics which refer to metrics classes!
# So we replace the name with the actual class instances
metrics4heads = [registered_metrics[m] for m in metrics4heads]
# now calculate the preferred metrics for each head
metricvals4heads = [metrics4heads[i].preferred(r) for i, r in enumerate(result)]
all_preferred_metrics.append(statistics.mean(metricvals4heads))
allresults.append(result)
if all_preds is None:
all_preds = [[] for _ in result]
all_labels = [[] for _ in result]
for i, r in enumerate(result):
all_preds[i].extend(r.get("preds"))
all_labels[i].extend(r.get("labels"))
if cfg.device == "cuda":
logger.info("CUDA: trying to release memory, current {}".format(
torch.cuda.memory_allocated() / 1024 / 1024
))
logger.info("(before) CUDA memory allocated: {}".format(
torch.cuda.memory_allocated() / 1024 / 1024))
logger.info("(before) CUDA max memory allocated: {}".format(
torch.cuda.max_memory_allocated() / 1024 / 1024))
logger.info("(before) CUDA memory cached: {}".format(
torch.cuda.memory_cached() / 1024 / 1024))
logger.info("(before) CUDA max memory cached: {}".format(
torch.cuda.max_memory_cached() / 1024 / 1024))
model.cpu() # MAYBE NOT NECESSARY BUT NOT SURE
torch.cuda.empty_cache()
logger.info("(after) CUDA memory allocated: {}".format(
torch.cuda.memory_allocated() / 1024 / 1024))
logger.info("(after) CUDA max memory allocated: {}".format(
torch.cuda.max_memory_allocated() / 1024 / 1024))
logger.info("(after) CUDA memory cached: {}".format(
torch.cuda.memory_cached() / 1024 / 1024))
logger.info("(after) CUDA max memory cached: {}".format(
torch.cuda.max_memory_cached() / 1024 / 1024))
with open(str(save_to.joinpath("results.json")), "wt") as fp:
json.dump(result, fp)
logger.info(f"Fold model and data saved to {save_to}")
mlflow.end_run()
# Save the per-fold results to json for a separate, more detailed analysis
with open(str(save_dir_root.joinpath("results-persplit.json")), "wt") as fp:
json.dump(allresults, fp)
# find the fold with the best average preferred metric value
best_fold_idx = np.argmax(all_preferred_metrics)
logger.info(f"Best split index: {best_fold_idx}")
mlflow.log_params({"HOLDOUT_BEST_SPLIT_IDX": best_fold_idx})
mlflow.log_params({"HOLDOUT_BEST_SPLIT_METRIC": all_preferred_metrics[best_fold_idx]})
# the following is a list that contains one defaultdict(list) per head.
# each defaultdict(list) will have all the values for that head and metric from allresults
eval_metric_lists_per_head = [defaultdict(list) for _ in allresults[0]]
for resultsperhead in allresults:
assert len(eval_metric_lists_per_head) == len(resultsperhead)
for headnr, res in enumerate(resultsperhead):
for name in res.keys():
if name not in ["preds", "labels"] and \
not name.startswith("_") and \
isinstance(res[name], numbers.Number):
eval_metric_lists_per_head[headnr][name].append(res[name])
# now collapse each of the lists into its mean, and add a stdev and var metric
eval_metric_per_head = [{} for _ in eval_metric_lists_per_head]
for i, res in enumerate(eval_metric_lists_per_head):
newres = eval_metric_per_head[i]
newres["dirname"] = str(save_dir_root)
# newres["report"] = allresults[i][0].get("report", None)
newres["task_name"] = allresults[0][i].get("task_name", "UNKNOWN TASKNAME ???")
newres["time_train_mean"] = statistics.mean(all_train_times)
newres["time_eval_mean"] = statistics.mean(all_eval_times)
newres["time_total"] = sum(all_train_times)+sum(all_eval_times)
for name in res.keys():
values = res[name]
vmean = statistics.mean(values)
newres[name+"_mean"] = vmean
newres[name+"_min"] = min(values)
newres[name+"_max"] = max(values)
if len(values) > 1:
vstdev = statistics.stdev(values)
vvar = statistics.variance(values)
newres[name+"_stdev"] = vstdev
newres[name+"_var"] = vvar
log_results(eval_metric_per_head, "HOLDOUT", steps=0)
# add the confusion matrices per head
for i, d in enumerate(eval_metric_per_head):
# automatically determine the label list for the head
tmplabels = set()
tmplabels.update(all_labels[i])
tmplabels.update(all_preds[i])
tmplabels = list(tmplabels)
tmplabels.sort()
conf_matrix = confusion_matrix(all_labels[i], all_preds[i], labels=tmplabels)
conf_matrix = conf_matrix.tolist()
d["confusion_matrix"] = conf_matrix
d["confusion_labels"] = tmplabels
# log overall confusions matrix
logger.info(f"Confusion matrix for head {i}:")
l = " ".join(tmplabels)
logger.info(f" {l}")
for j, row in enumerate(conf_matrix):
r = " ".join([str(tmp) for tmp in row])
logger.info(f"{tmplabels[j]} {r}")
with open(str(save_dir_root.joinpath("results-all.json")), "wt") as fp:
json.dump(eval_metric_per_head, fp)
logger.info(f"Estimation data and folds saved to {save_dir_root}")
return eval_metric_per_head
def run_estimate(cfg, logger=logger):
if cfg.get("runname") is None:
cfg["runname"] = "estimate"
cfg.runname = cfg.runname + "_" + time.strftime('%Y%m%d_%H%M%S')
savedir = cfg.runname
logger.info(f"Running estimation with configuration: {cfg}")
ml_logger = farm.utils.MLFlowLogger(tracking_uri=str(Path(savedir).joinpath("mlruns")))
run_name = "eval"
ml_logger.init_experiment(experiment_name="{} / {}".format(cfg.runname, str(datetime.datetime.now())[:19]), run_name=run_name)
init_farm(cfg, logger=logger)
ml_logger.log_params(cfg)
logger.info("Experiment init")
lang_model_dir = "/raid/data/models/bert/deepset_bert-base-german-cased"
if cfg.get("lm_name"):
lang_model_dir = cfg.lm_name
if cfg.get("lm_dir"):
lang_model_dir = cfg.lm_dir
logger.info(f"Using language model directory: {lang_model_dir}")
ml_logger.log_params({"use_lm_model": lang_model_dir})
train_file = cfg.infile
ml_logger.log_params({"train_file": train_file})
logger.info(f"Using input file: {train_file}")
#label_column_name = cfg.label_column
#text_column_name = cfg.text_column
max_seq_length = cfg.max_seq
dev_split = cfg.dev_splt
#label_list = cfg.label_list
#ml_logger.log_params({"label_list": ",".join(label_list)})
# Create tokenizer
# Here we cannot just specify the model bin file, this requires a directory with all kinds of files
# For now, test with the predefined name: bert-base-german-cased bert-base-german-dbmdz-cased
# OK this downloads for
# bert-base-german-cased: file https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt
# bert-base-german-dbmdz-cased: https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt
# OK directly specifying the vocab file works
logger.info(f"Load tokenizer from {lang_model_dir}")
tokenizer = farm.modeling.tokenization.Tokenizer.load(
pretrained_model_name_or_path=lang_model_dir,
do_lower_case=cfg.do_lower_case)
# register_metrics('mymetrics', ClassificationMetrics(label_list=label_list))
logger.info("Create processor")
ftname = cfg["fts"]
clazz = globals().get(ftname)
if clazz is None:
raise Exception(f"FarmTasks class {ftname} unknown")
ft = clazz(cfg)
data_dir = os.path.dirname(train_file)
if data_dir == "":
data_dir = "."
processor = ft.get_processor(
tokenizer=tokenizer,
max_seq_len=max_seq_length,
train_filename=os.path.basename(train_file),
test_filename=None,
dev_split=dev_split,
dev_stratification=cfg.dev_stratification,
data_dir=data_dir,
)
cfg["_fts"] = ft
logger.info("Create data silo")
silo = farm.data_handler.data_silo.DataSilo(
processor=processor,
max_processes=1,
batch_size=cfg.batch_size)
if cfg["eval_method"] == "xval":
ret = run_xval(silo, savedir, lang_model_dir, cfg=cfg)
elif cfg["eval_method"] == "holdout":
ret = run_holdout(silo, savedir, lang_model_dir, cfg=cfg)
else:
raise Exception(f"Not supported: eval_method={cfg['eval_method']}")
ml_logger.end_run()
return ret
def run_apply(cfg, logger=logger):
# logger.setLevel(logging.CRITICAL) ## does not work
if not cfg.d:
logging.disable(logging.CRITICAL+1)
init_farm(cfg, logger=logger)
def process_output_batch(cfg, inferencer, batch, name2idx, outfp):
"""In-place modify batch: add label and prob columns at end"""
dicts = [{"text": row[name2idx[cfg.text_column]]} for row in batch]
ret = inferencer.inference_from_dicts(dicts)
# TODO we assume getting the prediction from head 0 here
result = ret[0]
preds = result["predictions"]
labels = [pred["label"] for pred in preds]
probs = [pred["probability"] for pred in preds]
assert len(batch) == len(labels)
assert len(batch) == len(probs)
for incols, label, prob in zip(batch, labels, probs):
print("\t".join(incols), label, prob, sep="\t", file=outfp)
logger.info("LOADING MODEL")
if cfg.max_seq is None:
inferencer = Inferencer.load(
cfg.modeldir,
batch_size=cfg.batch_size,
gpu=cfg.cuda_used,
return_class_probs=False,
num_processes=cfg.num_processes,
disable_tqdm=True,
)
else:
inferencer = Inferencer.load(
cfg.modeldir,
batch_size=cfg.batch_size,
max_seq_len=cfg.max_seq,
gpu=cfg.cuda_used,
return_class_probs=False,
num_processes=cfg.num_processes,
disable_tqdm=True,
)
used_max_seq_len = inferencer.processor.max_seq_len
logging.info(f"Used max_seq_len is {used_max_seq_len}")
mlflow.log_params({"used_max_seq_len": used_max_seq_len})
inferencer.disable_tqdm = True
# TODO: do we need to disable logging?
with open(cfg.infile, "rt", encoding="utf-8") as infp:
# read the header line which we always assume to exist
cols = infp.readline().rstrip("\n\r").split("\t")
name2idx = {n: i for i, n in enumerate(cols)}
with open(cfg.outfile, "wt", encoding="utf8") as outfp:
# write hdr
outcols = cols.copy()
outcols.append(cfg.label_column)
outcols.append(cfg.prob_column)
print("\t".join(outcols), file=outfp)
batch = [] # batchsize rows to process
for line in infp:
fields = line.rstrip("\n\r").split("\t")
batch.append(fields)
if len(batch) >= cfg.batch_size:
process_output_batch(cfg, inferencer, batch, name2idx, outfp)
batch = []
if len(batch) > 0:
process_output_batch(cfg, inferencer, batch, name2idx, outfp)
def run_train(cfg, logger=logger):
if cfg.get("runname") is None:
cfg["runname"] = "train"
cfg.runname = cfg.runname + "_" + time.strftime('%Y%m%d_%H%M%S')
savedir = cfg.runname
ml_logger = farm.utils.MLFlowLogger(tracking_uri=str(Path(savedir).joinpath("mlruns")))
run_name = "train"
ml_logger.init_experiment(experiment_name="{} / {}".format(cfg.runname, str(datetime.datetime.now())[:19]), run_name=run_name)
init_farm(cfg, logger=logger)
ml_logger.log_params(cfg)
lang_model_dir = os.environ["HOME"] + "/models/bert/deepset_bert-base-german-cased"
if cfg.get("lm_name"):
lang_model_dir = cfg.lm_name
if cfg.get("lm_dir"):
lang_model_dir = cfg.lm_dir
logger.info(f"Using language model directory: {lang_model_dir}")
ml_logger.log_params({"use_lm_model": lang_model_dir})
train_file = cfg.infile
ml_logger.log_params({"train_file": train_file})
logger.info(f"Using input file: {train_file}")
# TODO: these should go into the experiment class
#label_column_name = cfg.label_column
#text_column_name = cfg.text_column
max_seq_length = cfg.max_seq
dev_split = cfg.dev_splt
# TODO: Should go into the experiment class and get logged for each head, head0 to headk
#label_list = cfg.label_list
#ml_logger.log_params({"label_list": ",".join(label_list)})
# Create tokenizer
# Here we cannot just specify the model bin file, this requires a directory with all kinds of files
# For now, test with the predefined name: bert-base-german-cased bert-base-german-dbmdz-cased
# OK this downloads for
# bert-base-german-cased: file https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt
# bert-base-german-dbmdz-cased: https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt
# OK directly specifying the vocab file works
logger.info(f"Load tokenizer from {lang_model_dir}")
tokenizer = farm.modeling.tokenization.Tokenizer.load(
pretrained_model_name_or_path=lang_model_dir,
do_lower_case=cfg.do_lower_case)
# TODO: we need a separate metric for each head
#register_metrics('mymetrics', ClassificationMetrics(label_list=label_list))
logger.info("Create processor")
ftname = cfg["fts"]
clazz = globals().get(ftname)
if clazz is None:
raise Exception(f"FarmTasks class {ftname} unknown")
ft = clazz(cfg)
data_dir=os.path.dirname(train_file)
if data_dir == "":
data_dir = "."
processor = ft.get_processor(
tokenizer=tokenizer,
max_seq_len=max_seq_length,
train_filename=os.path.basename(train_file),
test_filename=None,
dev_split=dev_split,
dev_stratification=cfg.dev_stratification,
data_dir=data_dir,
)
if hasattr(ft, "label_list"):
ml_logger.log_params({"label_list": ",".join(ft.label_list)})
else:
for i in range(ft.nheads):
llist = getattr(ft, f"label_list{i}")
ml_logger.log_params({f"label_list{i}": ",".join(llist)})
cfg["_fts"] = ft # the farm tasks object, stored with underscore-name to avoid logging!
logger.info("Create data silo")
silo = farm.data_handler.data_silo.DataSilo(
processor=processor,
batch_size=cfg.batch_size)
model = train_model(silo, savedir, lang_model_dir, cfg=cfg)
ml_logger.end_run()
return model
|
[
"sklearn.metrics.confusion_matrix",
"argparse.ArgumentParser",
"torch.cuda.max_memory_allocated",
"numpy.argmax",
"time.strftime",
"farm.infer.Inferencer.load",
"collections.defaultdict",
"pathlib.Path",
"statistics.variance",
"orderedattrdict.AttrDict",
"mlflow.end_run",
"torch.cuda.max_memory_cached",
"toml.load",
"farm.data_handler.data_silo.DataSiloForHoldout.make",
"datetime.datetime.now",
"torch.cuda.memory_allocated",
"json.dump",
"farm_tools.train_modified.Trainer",
"statistics.stdev",
"time.perf_counter",
"farm.modeling.language_model.LanguageModel.load",
"logging.disable",
"torch.cuda.is_available",
"statistics.mean",
"torch.use_deterministic_algorithms",
"signal.signal",
"mlflow.log_metrics",
"sys.exit",
"farm.data_handler.data_silo.DataSiloForCrossVal.make",
"json.load",
"logging.info",
"mlflow.log_params",
"torch.cuda.empty_cache",
"farm_tools.utils.init_logger",
"torch.cuda.memory_cached"
] |
[((1369, 1382), 'farm_tools.utils.init_logger', 'init_logger', ([], {}), '()\n', (1380, 1382), False, 'from farm_tools.utils import init_logger\n'), ((1855, 1899), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (1868, 1899), False, 'import signal\n'), ((4047, 4061), 'orderedattrdict.AttrDict', 'AttrDict', (['data'], {}), '(data)\n', (4055, 4061), False, 'from orderedattrdict import AttrDict\n'), ((17197, 17231), 'farm.modeling.language_model.LanguageModel.load', 'LanguageModel.load', (['lang_model_dir'], {}), '(lang_model_dir)\n', (17215, 17231), False, 'from farm.modeling.language_model import LanguageModel\n'), ((20361, 20643), 'farm_tools.train_modified.Trainer', 'Trainer', ([], {'model': 'model', 'optimizer': 'optimizer', 'data_silo': 'silo', 'epochs': 'cfg.max_epochs', 'n_gpu': 'cfg.n_gpu', 'lr_schedule': 'lr_schedule', 'evaluate_every': 'eval_every', 'device': 'cfg.device', 'grad_acc_steps': 'cfg.grad_acc', 'early_stopping': 'earlystopping', 'evaluator_test': '(False)', 'disable_tqdm': '(True)'}), '(model=model, optimizer=optimizer, data_silo=silo, epochs=cfg.\n max_epochs, n_gpu=cfg.n_gpu, lr_schedule=lr_schedule, evaluate_every=\n eval_every, device=cfg.device, grad_acc_steps=cfg.grad_acc,\n early_stopping=earlystopping, evaluator_test=False, disable_tqdm=True)\n', (20368, 20643), False, 'from farm_tools.train_modified import Trainer, EarlyStopping\n'), ((20920, 20934), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (20924, 20934), False, 'from pathlib import Path\n'), ((20947, 21070), 'farm.data_handler.data_silo.DataSiloForCrossVal.make', 'DataSiloForCrossVal.make', (['silo'], {'n_splits': 'cfg.xval_folds', 'stratification': 'cfg.eval_stratification', 'sets': "['train', 'dev']"}), "(silo, n_splits=cfg.xval_folds, stratification=cfg.\n eval_stratification, sets=['train', 'dev'])\n", (20971, 21070), False, 'from farm.data_handler.data_silo import DataSiloForCrossVal, DataSiloForHoldout\n'), ((21999, 22013), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (22003, 22013), False, 'from pathlib import Path\n'), ((26305, 26337), 'numpy.argmax', 'np.argmax', (['all_preferred_metrics'], {}), '(all_preferred_metrics)\n', (26314, 26337), True, 'import numpy as np\n'), ((26395, 26451), 'mlflow.log_params', 'mlflow.log_params', (["{'XVAL_BEST_FOLD_IDX': best_fold_idx}"], {}), "({'XVAL_BEST_FOLD_IDX': best_fold_idx})\n", (26412, 26451), False, 'import mlflow\n'), ((26456, 26543), 'mlflow.log_params', 'mlflow.log_params', (["{'XVAL_BEST_FOLD_METRIC': all_preferred_metrics[best_fold_idx]}"], {}), "({'XVAL_BEST_FOLD_METRIC': all_preferred_metrics[\n best_fold_idx]})\n", (26473, 26543), False, 'import mlflow\n'), ((29516, 29530), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (29520, 29530), False, 'from pathlib import Path\n'), ((29543, 29729), 'farm.data_handler.data_silo.DataSiloForHoldout.make', 'DataSiloForHoldout.make', (['silo'], {'n_splits': 'cfg.holdout_repeats', 'stratification': 'cfg.eval_stratification', 'random_state': 'cfg.seed', 'train_split': 'cfg.holdout_train', 'sets': "['train', 'dev']"}), "(silo, n_splits=cfg.holdout_repeats, stratification=\n cfg.eval_stratification, random_state=cfg.seed, train_split=cfg.\n holdout_train, sets=['train', 'dev'])\n", (29566, 29729), False, 'from farm.data_handler.data_silo import DataSiloForCrossVal, DataSiloForHoldout\n'), ((31373, 31387), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (31377, 31387), False, 'from pathlib import Path\n'), ((35747, 35779), 'numpy.argmax', 'np.argmax', (['all_preferred_metrics'], {}), '(all_preferred_metrics)\n', (35756, 35779), True, 'import numpy as np\n'), ((35838, 35898), 'mlflow.log_params', 'mlflow.log_params', (["{'HOLDOUT_BEST_SPLIT_IDX': best_fold_idx}"], {}), "({'HOLDOUT_BEST_SPLIT_IDX': best_fold_idx})\n", (35855, 35898), False, 'import mlflow\n'), ((35903, 35994), 'mlflow.log_params', 'mlflow.log_params', (["{'HOLDOUT_BEST_SPLIT_METRIC': all_preferred_metrics[best_fold_idx]}"], {}), "({'HOLDOUT_BEST_SPLIT_METRIC': all_preferred_metrics[\n best_fold_idx]})\n", (35920, 35994), False, 'import mlflow\n'), ((43829, 43884), 'logging.info', 'logging.info', (['f"""Used max_seq_len is {used_max_seq_len}"""'], {}), "(f'Used max_seq_len is {used_max_seq_len}')\n", (43841, 43884), False, 'import logging\n'), ((43889, 43946), 'mlflow.log_params', 'mlflow.log_params', (["{'used_max_seq_len': used_max_seq_len}"], {}), "({'used_max_seq_len': used_max_seq_len})\n", (43906, 43946), False, 'import mlflow\n'), ((1839, 1850), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1847, 1850), False, 'import sys\n'), ((3794, 3810), 'toml.load', 'toml.load', (['fpath'], {}), '(fpath)\n', (3803, 3810), False, 'import toml\n'), ((4824, 4840), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4838, 4840), False, 'from argparse import ArgumentParser\n'), ((4875, 4885), 'orderedattrdict.AttrDict', 'AttrDict', ([], {}), '()\n', (4883, 4885), False, 'from orderedattrdict import AttrDict\n'), ((6778, 6794), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (6792, 6794), False, 'from argparse import ArgumentParser\n'), ((6829, 6839), 'orderedattrdict.AttrDict', 'AttrDict', ([], {}), '()\n', (6837, 6839), False, 'from orderedattrdict import AttrDict\n'), ((7809, 7825), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (7823, 7825), False, 'from argparse import ArgumentParser\n'), ((7860, 7870), 'orderedattrdict.AttrDict', 'AttrDict', ([], {}), '()\n', (7868, 7870), False, 'from orderedattrdict import AttrDict\n'), ((9187, 9203), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (9201, 9203), False, 'from argparse import ArgumentParser\n'), ((9238, 9248), 'orderedattrdict.AttrDict', 'AttrDict', ([], {}), '()\n', (9246, 9248), False, 'from orderedattrdict import AttrDict\n'), ((13144, 13160), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (13158, 13160), False, 'from argparse import ArgumentParser\n'), ((13195, 13205), 'orderedattrdict.AttrDict', 'AttrDict', ([], {}), '()\n', (13203, 13205), False, 'from orderedattrdict import AttrDict\n'), ((14059, 14084), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14082, 14084), False, 'import torch\n'), ((14377, 14417), 'torch.use_deterministic_algorithms', 'torch.use_deterministic_algorithms', (['(True)'], {}), '(True)\n', (14411, 14417), False, 'import torch\n'), ((14563, 14604), 'torch.use_deterministic_algorithms', 'torch.use_deterministic_algorithms', (['(False)'], {}), '(False)\n', (14597, 14604), False, 'import torch\n'), ((22638, 22657), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22655, 22657), False, 'import time\n'), ((23383, 23402), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (23400, 23402), False, 'import time\n'), ((26009, 26025), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (26023, 26025), False, 'import mlflow\n'), ((26193, 26218), 'json.dump', 'json.dump', (['allresults', 'fp'], {}), '(allresults, fp)\n', (26202, 26218), False, 'import json\n'), ((26744, 26761), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (26755, 26761), False, 'from collections import defaultdict\n'), ((27714, 27746), 'statistics.mean', 'statistics.mean', (['all_train_times'], {}), '(all_train_times)\n', (27729, 27746), False, 'import statistics\n'), ((27782, 27813), 'statistics.mean', 'statistics.mean', (['all_eval_times'], {}), '(all_eval_times)\n', (27797, 27813), False, 'import statistics\n'), ((28749, 28812), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_labels[i]', 'all_preds[i]'], {'labels': 'tmplabels'}), '(all_labels[i], all_preds[i], labels=tmplabels)\n', (28765, 28812), False, 'from sklearn.metrics import confusion_matrix\n'), ((29331, 29366), 'json.dump', 'json.dump', (['xval_metric_per_head', 'fp'], {}), '(xval_metric_per_head, fp)\n', (29340, 29366), False, 'import json\n'), ((32016, 32035), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32033, 32035), False, 'import time\n'), ((32762, 32781), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32779, 32781), False, 'import time\n'), ((35450, 35466), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (35464, 35466), False, 'import mlflow\n'), ((35635, 35660), 'json.dump', 'json.dump', (['allresults', 'fp'], {}), '(allresults, fp)\n', (35644, 35660), False, 'import json\n'), ((36195, 36212), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (36206, 36212), False, 'from collections import defaultdict\n'), ((37175, 37207), 'statistics.mean', 'statistics.mean', (['all_train_times'], {}), '(all_train_times)\n', (37190, 37207), False, 'import statistics\n'), ((37243, 37274), 'statistics.mean', 'statistics.mean', (['all_eval_times'], {}), '(all_eval_times)\n', (37258, 37274), False, 'import statistics\n'), ((38209, 38272), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_labels[i]', 'all_preds[i]'], {'labels': 'tmplabels'}), '(all_labels[i], all_preds[i], labels=tmplabels)\n', (38225, 38272), False, 'from sklearn.metrics import confusion_matrix\n'), ((38791, 38826), 'json.dump', 'json.dump', (['eval_metric_per_head', 'fp'], {}), '(eval_metric_per_head, fp)\n', (38800, 38826), False, 'import json\n'), ((39079, 39109), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (39092, 39109), False, 'import time\n'), ((42352, 42389), 'logging.disable', 'logging.disable', (['(logging.CRITICAL + 1)'], {}), '(logging.CRITICAL + 1)\n', (42367, 42389), False, 'import logging\n'), ((43226, 43388), 'farm.infer.Inferencer.load', 'Inferencer.load', (['cfg.modeldir'], {'batch_size': 'cfg.batch_size', 'gpu': 'cfg.cuda_used', 'return_class_probs': '(False)', 'num_processes': 'cfg.num_processes', 'disable_tqdm': '(True)'}), '(cfg.modeldir, batch_size=cfg.batch_size, gpu=cfg.cuda_used,\n return_class_probs=False, num_processes=cfg.num_processes, disable_tqdm\n =True)\n', (43241, 43388), False, 'from farm.infer import Inferencer\n'), ((43494, 43682), 'farm.infer.Inferencer.load', 'Inferencer.load', (['cfg.modeldir'], {'batch_size': 'cfg.batch_size', 'max_seq_len': 'cfg.max_seq', 'gpu': 'cfg.cuda_used', 'return_class_probs': '(False)', 'num_processes': 'cfg.num_processes', 'disable_tqdm': '(True)'}), '(cfg.modeldir, batch_size=cfg.batch_size, max_seq_len=cfg.\n max_seq, gpu=cfg.cuda_used, return_class_probs=False, num_processes=cfg\n .num_processes, disable_tqdm=True)\n', (43509, 43682), False, 'from farm.infer import Inferencer\n'), ((45111, 45141), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (45124, 45141), False, 'import time\n'), ((24233, 24266), 'statistics.mean', 'statistics.mean', (['metricvals4heads'], {}), '(metricvals4heads)\n', (24248, 24266), False, 'import statistics\n'), ((25348, 25372), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (25370, 25372), False, 'import torch\n'), ((25979, 26000), 'json.dump', 'json.dump', (['result', 'fp'], {}), '(result, fp)\n', (25988, 26000), False, 'import json\n'), ((27969, 27992), 'statistics.mean', 'statistics.mean', (['values'], {}), '(values)\n', (27984, 27992), False, 'import statistics\n'), ((33611, 33644), 'statistics.mean', 'statistics.mean', (['metricvals4heads'], {}), '(metricvals4heads)\n', (33626, 33644), False, 'import statistics\n'), ((34726, 34750), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (34748, 34750), False, 'import torch\n'), ((35357, 35378), 'json.dump', 'json.dump', (['result', 'fp'], {}), '(result, fp)\n', (35366, 35378), False, 'import json\n'), ((37430, 37453), 'statistics.mean', 'statistics.mean', (['values'], {}), '(values)\n', (37445, 37453), False, 'import statistics\n'), ((3922, 3937), 'json.load', 'json.load', (['infp'], {}), '(infp)\n', (3931, 3937), False, 'import json\n'), ((22757, 22776), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22774, 22776), False, 'import time\n'), ((23524, 23543), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (23541, 23543), False, 'import time\n'), ((28183, 28207), 'statistics.stdev', 'statistics.stdev', (['values'], {}), '(values)\n', (28199, 28207), False, 'import statistics\n'), ((28231, 28258), 'statistics.variance', 'statistics.variance', (['values'], {}), '(values)\n', (28250, 28258), False, 'import statistics\n'), ((32135, 32154), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32152, 32154), False, 'import time\n'), ((32903, 32922), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (32920, 32922), False, 'import time\n'), ((37644, 37668), 'statistics.stdev', 'statistics.stdev', (['values'], {}), '(values)\n', (37660, 37668), False, 'import statistics\n'), ((37692, 37719), 'statistics.variance', 'statistics.variance', (['values'], {}), '(values)\n', (37711, 37719), False, 'import statistics\n'), ((16272, 16377), 'mlflow.log_metrics', 'mlflow.log_metrics', ([], {'metrics': '{f"{name}_{metric_name}_{head[\'task_name\']}": metric_val}', 'step': 'use_steps'}), '(metrics={f"{name}_{metric_name}_{head[\'task_name\']}":\n metric_val}, step=use_steps)\n', (16290, 16377), False, 'import mlflow\n'), ((39260, 39273), 'pathlib.Path', 'Path', (['savedir'], {}), '(savedir)\n', (39264, 39273), False, 'from pathlib import Path\n'), ((39398, 39421), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (39419, 39421), False, 'import datetime\n'), ((45226, 45239), 'pathlib.Path', 'Path', (['savedir'], {}), '(savedir)\n', (45230, 45239), False, 'from pathlib import Path\n'), ((45365, 45388), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (45386, 45388), False, 'import datetime\n'), ((24689, 24718), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (24716, 24718), False, 'import torch\n'), ((24833, 24862), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (24860, 24862), False, 'import torch\n'), ((24968, 25001), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (24999, 25001), False, 'import torch\n'), ((25100, 25126), 'torch.cuda.memory_cached', 'torch.cuda.memory_cached', ([], {}), '()\n', (25124, 25126), False, 'import torch\n'), ((25229, 25259), 'torch.cuda.max_memory_cached', 'torch.cuda.max_memory_cached', ([], {}), '()\n', (25257, 25259), False, 'import torch\n'), ((25457, 25486), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (25484, 25486), False, 'import torch\n'), ((25591, 25624), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (25622, 25624), False, 'import torch\n'), ((25722, 25748), 'torch.cuda.memory_cached', 'torch.cuda.memory_cached', ([], {}), '()\n', (25746, 25748), False, 'import torch\n'), ((25850, 25880), 'torch.cuda.max_memory_cached', 'torch.cuda.max_memory_cached', ([], {}), '()\n', (25878, 25880), False, 'import torch\n'), ((34067, 34096), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (34094, 34096), False, 'import torch\n'), ((34211, 34240), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (34238, 34240), False, 'import torch\n'), ((34346, 34379), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (34377, 34379), False, 'import torch\n'), ((34478, 34504), 'torch.cuda.memory_cached', 'torch.cuda.memory_cached', ([], {}), '()\n', (34502, 34504), False, 'import torch\n'), ((34607, 34637), 'torch.cuda.max_memory_cached', 'torch.cuda.max_memory_cached', ([], {}), '()\n', (34635, 34637), False, 'import torch\n'), ((34835, 34864), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (34862, 34864), False, 'import torch\n'), ((34969, 35002), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (35000, 35002), False, 'import torch\n'), ((35100, 35126), 'torch.cuda.memory_cached', 'torch.cuda.memory_cached', ([], {}), '()\n', (35124, 35126), False, 'import torch\n'), ((35228, 35258), 'torch.cuda.max_memory_cached', 'torch.cuda.max_memory_cached', ([], {}), '()\n', (35256, 35258), False, 'import torch\n')]
|
#!/usr/bin/python
import pygame, sys, pygame.mixer, os
sys.path.append('/home/pi/GitHub/T-BOTS/Python')
from pygame.locals import *
from time import sleep, time
import bluetooth as bt
from TBotTools import tbt
from collections import deque
import numpy as np
starttime = time()
# setup for plotting
xdatarange = [200,320]
y_origin = 270
yscale = 50
pts = deque(maxlen=xdatarange[1]-xdatarange[0])
for ii in range(xdatarange[0],xdatarange[1]):
pts.appendleft((ii,np.random.rand(1)))
iii = 200
aa = np.zeros((len(pts),2))
aa[:,1]=np.array(pts)[:,1]
aa[:,0]=np.array(range(xdatarange[0],xdatarange[1]))
bb=np.copy(aa)
dirpath = os.path.dirname(os.path.realpath(__file__))+'/Images'
timestart = time()
speedfactor = 0.6
speedlimit = 70
turnspeedlimit = 60
################### Setup Bluetooth #############################
oldvals = [0,0,0,0]
sendcount = 0
#bd_addr = '98:D3:91:FD:46:C9' # use: 'hcitool scan' to scan for your T-Bot address
#bd_addr = '98:D3:32:21:3D:77'
bd_addr = '98:D3:91:FD:46:9C'
bd_addr = '98:D3:51:FD:82:95'# George
port = 1
#btcom = tbt.bt_connect(bd_addr,port,'PyBluez')
btcom = tbt.bt_connect(bd_addr,port,'Socket')
#port = 'COM5'
#port = '/dev/tty.George-DevB'
#baudrate = 38400
#btcom = tbt.bt_connect(bd_addr,port,'PySerial',baudrate)
################### Screen Text Class #############################
class TextPrint(object):
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 15)
def tprint(self, screen, textString):
textBitmap = self.font.render(textString, True, WHITE)
screen.blit(textBitmap, (self.x, self.y))
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
def abspos(self,screen, textString, pos):
textBitmap = self.font.render(textString, True, WHITE)
screen.blit(textBitmap, pos)
################### Instantiate BT Class #############################
# Define some colors.
BLACK = pygame.Color('black')
WHITE = pygame.Color('white')
GRAY = pygame.Color('gray')
pygame.init()
# Set the width and height of the screen (width, height).
screen = pygame.display.set_mode((350, 550))
logo = pygame.image.load(dirpath+'/logo.png')
bg = pygame.image.load(dirpath+'/hexP2.jpg').convert()
bgG = pygame.image.load(dirpath+'/hexG.jpg').convert()
pygame.display.set_caption("Player 2")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates.
clock = pygame.time.Clock()
# Initialize the joysticks.
pygame.joystick.init()
# Get ready to print.
textPrint = TextPrint()
# -------- Main Program Loop -----------
while not done:
#
# EVENT PROCESSING STEP
#
# Possible joystick actions: JOYAXISMOTION, JOYBALLMOTION, JOYBUTTONDOWN,
# JOYBUTTONUP, JOYHATMOTION
for event in pygame.event.get(): # User did something.
if event.type == pygame.QUIT: # If user clicked close.
done = True # Flag that we are done so we exit this loop.
btcom.connect(0)
print('Connection Closed')
if event.type == KEYDOWN and event.key == K_q:
btcom.connect(0)
pygame.display.quit()
sys.exit()
print('Connection Closed')
pass
if btcom.connected():
screen.blit(bg, [0, 0])
else:
tries = 0
while btcom.connected() < 1 and tries < 10:
print('Connecting ...')
screen.blit(bgG, [0, 0])
pygame.display.flip()
try:
print('Try '+str(tries+1)+' of 10')
btcom.connect(0)
btcom.connect(1)
tries+=1
except:
print('Something went wrong')
if btcom.connected() < 1:
print('Exiting Program')
pygame.display.quit()
sys.exit()
else:
tries = 0
textPrint.reset()
# Get count of joysticks.
joystick_count = pygame.joystick.get_count()
textPrint.tprint(screen, "Number of joysticks: {}".format(joystick_count))
textPrint.indent()
# For each joystick:
for i in [1]: # If you have multiple joysticks connected, set this index for the one you want to use.
joystick = pygame.joystick.Joystick(i)
joystick.init()
textPrint.tprint(screen, "Joystick {}".format(i))
textPrint.indent()
# Get the name from the OS for the controller/joystick.
name = joystick.get_name()
textPrint.tprint(screen, "Joystick name: {}".format(name))
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.tprint(screen, "")
textPrint.tprint(screen, "Number of axes: {}".format(axes))
textPrint.indent()
for i in range(axes):
axis = joystick.get_axis(i)
textPrint.tprint(screen, "Axis {} value: {:>6.3f}".format(i, axis))
axis0 = joystick.get_axis(0)
axis1 = joystick.get_axis(1)
axis2 = joystick.get_axis(2)
axis3 = joystick.get_axis(3)
textPrint.unindent()
textPrint.tprint(screen, "")
buttons = joystick.get_numbuttons()
textPrint.tprint(screen, "Number of buttons: {}".format(buttons))
textPrint.indent()
for i in range(buttons):
button = joystick.get_button(i)
textPrint.tprint(screen,
"Button {:>2} value: {}".format(i, button))
textPrint.unindent()
hats = joystick.get_numhats()
textPrint.tprint(screen, "")
textPrint.tprint(screen, "Number of hats: {}".format(hats))
textPrint.indent()
# Hat position. All or nothing for direction, not a float like
# get_axis(). Position is a tuple of int values (x, y).
for i in range(hats):
hat = joystick.get_hat(i)
textPrint.tprint(screen, "Hat {} value: {}".format(i, str(hat)))
if hat[1] == 1:
speedfactor += 0.1
elif hat[1] == -1:
speedfactor -= 0.1
elif hat[0] == -1:
speedlimit -= 5
elif hat[0] == +1:
speedlimit += 5
if speedlimit >= 100:
speedlimit = 100
if speedlimit <= 0:
speedlimit = 0
if speedfactor >= 5:
speedfactor = 5
if speedfactor <= 0:
speedfactor = 0
textPrint.unindent()
textPrint.tprint(screen, "")
textPrint.tprint(screen, "T-Bot Data")
textPrint.indent()
oldvals = btcom.get_data(oldvals)
#g_angle = (oldvals[3]*20/255)-10 # Conversion from scaled output from T-Bot
g_angle = oldvals[3]
pts.appendleft((iii,g_angle))
iii+=1
pygame.draw.lines(screen, (139,5,139), False, ((xdatarange[0],y_origin+0.5*yscale),(xdatarange[1],y_origin+0.5*yscale)),1)
pygame.draw.lines(screen, (139,5,139), False, ((xdatarange[0],y_origin),(xdatarange[0],y_origin+yscale)),1)
if iii > xdatarange[1]:
iii = xdatarange[0]
aa[:,1]=np.array(pts)[:,1]
try:
bb[:,1] = (yscale/((aa[:,1]-aa[:,1].max()).min())*(aa[:,1]-aa[:,1].max()))+y_origin
gdata = tuple(map(tuple, tuple(bb)))
pygame.draw.lines(screen, (255,255,255), False, (gdata),1)
except:
b=1
textPrint.abspos(screen, "{:+.2f}".format(aa[:,1].max()),[xdatarange[0],y_origin-20])
textPrint.abspos(screen, "{:+.2f}".format(aa[:,1].min()),[xdatarange[0],y_origin+yscale+5])
textPrint.tprint(screen, "gyrodata: {}".format(str(oldvals[3])))
textPrint.tprint(screen, "kps: {}".format(str(oldvals[0])))
textPrint.tprint(screen, "kp: {}".format(str(oldvals[1])))
textPrint.tprint(screen, "trim: {}".format(str(oldvals[2])))
textPrint.tprint(screen, "Speed Factor: {}".format(str(speedfactor)))
textPrint.tprint(screen, "Speed Limit: {}%".format(str(speedlimit)))
textPrint.unindent()
#
# ############# Send data #################################
#
if abs(axis0)+abs(axis1)+abs(axis2)+abs(axis3) != 0:
slowfactor = 1+joystick.get_button(7)
turn = 200+int(((axis0+(axis2*0.5))*speedfactor*100/slowfactor))
speed = 200-int(((axis1+(axis3*0.5))*speedfactor*100/slowfactor))
if speed > 200+speedlimit:
speed = 200+speedlimit
if speed < 200-speedlimit:
speed = 200-speedlimit
if turn > 200+turnspeedlimit:
turn = 200+turnspeedlimit
if turn < 200-turnspeedlimit:
turn = 200-turnspeedlimit
cmdwrite = 1
sendstring = str(turn)+str(speed)+'Z'
sendcount = btcom.send_data(sendstring,sendcount)
else:
sendstring = '200200Z'
sendcount = btcom.send_data(sendstring,sendcount)
if joystick.get_button(0):
buttonstring = '200200F' # trim +ve
sendcount = btcom.send_data(buttonstring,sendcount)
elif joystick.get_button(2):
buttonstring = '200200E' # trim -ve
sendcount = btcom.send_data(buttonstring,sendcount)
elif joystick.get_button(1):
buttonstring = '200200B' # kps +ve
sendcount = btcom.send_data(buttonstring,sendcount)
elif joystick.get_button(3):
buttonstring = '200200A' # kps -ve
sendcount = btcom.send_data(buttonstring,sendcount)
elif joystick.get_button(9):
buttonstring = '200200T' # kps -ve
sendcount = btcom.send_data(buttonstring,sendcount)
# Go ahead and update the screen with what we've drawn.
screen.blit(logo,(230,420))
pygame.display.flip()
# Limit to 20 frames per second.
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
|
[
"pygame.joystick.get_count",
"pygame.event.get",
"pygame.draw.lines",
"pygame.font.Font",
"pygame.display.quit",
"collections.deque",
"sys.path.append",
"TBotTools.tbt.bt_connect",
"numpy.copy",
"pygame.display.set_mode",
"pygame.display.set_caption",
"pygame.joystick.Joystick",
"pygame.quit",
"pygame.joystick.init",
"os.path.realpath",
"pygame.init",
"pygame.image.load",
"pygame.time.Clock",
"sys.exit",
"pygame.Color",
"time.time",
"pygame.display.flip",
"numpy.array",
"numpy.random.rand"
] |
[((55, 103), 'sys.path.append', 'sys.path.append', (['"""/home/pi/GitHub/T-BOTS/Python"""'], {}), "('/home/pi/GitHub/T-BOTS/Python')\n", (70, 103), False, 'import pygame, sys, pygame.mixer, os\n'), ((271, 277), 'time.time', 'time', ([], {}), '()\n', (275, 277), False, 'from time import sleep, time\n'), ((356, 399), 'collections.deque', 'deque', ([], {'maxlen': '(xdatarange[1] - xdatarange[0])'}), '(maxlen=xdatarange[1] - xdatarange[0])\n', (361, 399), False, 'from collections import deque\n'), ((608, 619), 'numpy.copy', 'np.copy', (['aa'], {}), '(aa)\n', (615, 619), True, 'import numpy as np\n'), ((702, 708), 'time.time', 'time', ([], {}), '()\n', (706, 708), False, 'from time import sleep, time\n'), ((1121, 1160), 'TBotTools.tbt.bt_connect', 'tbt.bt_connect', (['bd_addr', 'port', '"""Socket"""'], {}), "(bd_addr, port, 'Socket')\n", (1135, 1160), False, 'from TBotTools import tbt\n'), ((2122, 2143), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (2134, 2143), False, 'import pygame, sys, pygame.mixer, os\n'), ((2152, 2173), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (2164, 2173), False, 'import pygame, sys, pygame.mixer, os\n'), ((2181, 2201), 'pygame.Color', 'pygame.Color', (['"""gray"""'], {}), "('gray')\n", (2193, 2201), False, 'import pygame, sys, pygame.mixer, os\n'), ((2204, 2217), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2215, 2217), False, 'import pygame, sys, pygame.mixer, os\n'), ((2286, 2321), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(350, 550)'], {}), '((350, 550))\n', (2309, 2321), False, 'import pygame, sys, pygame.mixer, os\n'), ((2329, 2369), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/logo.png')"], {}), "(dirpath + '/logo.png')\n", (2346, 2369), False, 'import pygame, sys, pygame.mixer, os\n'), ((2480, 2518), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Player 2"""'], {}), "('Player 2')\n", (2506, 2518), False, 'import pygame, sys, pygame.mixer, os\n'), ((2635, 2654), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2652, 2654), False, 'import pygame, sys, pygame.mixer, os\n'), ((2684, 2706), 'pygame.joystick.init', 'pygame.joystick.init', ([], {}), '()\n', (2704, 2706), False, 'import pygame, sys, pygame.mixer, os\n'), ((10391, 10404), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (10402, 10404), False, 'import pygame, sys, pygame.mixer, os\n'), ((533, 546), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (541, 546), True, 'import numpy as np\n'), ((2989, 3007), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3005, 3007), False, 'import pygame, sys, pygame.mixer, os\n'), ((4169, 4196), 'pygame.joystick.get_count', 'pygame.joystick.get_count', ([], {}), '()\n', (4194, 4196), False, 'import pygame, sys, pygame.mixer, os\n'), ((10199, 10220), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (10218, 10220), False, 'import pygame, sys, pygame.mixer, os\n'), ((652, 678), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (668, 678), False, 'import pygame, sys, pygame.mixer, os\n'), ((1444, 1470), 'pygame.font.Font', 'pygame.font.Font', (['None', '(15)'], {}), '(None, 15)\n', (1460, 1470), False, 'import pygame, sys, pygame.mixer, os\n'), ((2373, 2414), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/hexP2.jpg')"], {}), "(dirpath + '/hexP2.jpg')\n", (2390, 2414), False, 'import pygame, sys, pygame.mixer, os\n'), ((2429, 2469), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/hexG.jpg')"], {}), "(dirpath + '/hexG.jpg')\n", (2446, 2469), False, 'import pygame, sys, pygame.mixer, os\n'), ((3316, 3337), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (3335, 3337), False, 'import pygame, sys, pygame.mixer, os\n'), ((3346, 3356), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3354, 3356), False, 'import pygame, sys, pygame.mixer, os\n'), ((4451, 4478), 'pygame.joystick.Joystick', 'pygame.joystick.Joystick', (['i'], {}), '(i)\n', (4475, 4478), False, 'import pygame, sys, pygame.mixer, os\n'), ((7106, 7246), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', '(139, 5, 139)', '(False)', '((xdatarange[0], y_origin + 0.5 * yscale), (xdatarange[1], y_origin + 0.5 *\n yscale))', '(1)'], {}), '(screen, (139, 5, 139), False, ((xdatarange[0], y_origin +\n 0.5 * yscale), (xdatarange[1], y_origin + 0.5 * yscale)), 1)\n', (7123, 7246), False, 'import pygame, sys, pygame.mixer, os\n'), ((7237, 7356), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', '(139, 5, 139)', '(False)', '((xdatarange[0], y_origin), (xdatarange[0], y_origin + yscale))', '(1)'], {}), '(screen, (139, 5, 139), False, ((xdatarange[0], y_origin),\n (xdatarange[0], y_origin + yscale)), 1)\n', (7254, 7356), False, 'import pygame, sys, pygame.mixer, os\n'), ((467, 484), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (481, 484), True, 'import numpy as np\n'), ((3634, 3655), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3653, 3655), False, 'import pygame, sys, pygame.mixer, os\n'), ((3982, 4003), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (4001, 4003), False, 'import pygame, sys, pygame.mixer, os\n'), ((4016, 4026), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4024, 4026), False, 'import pygame, sys, pygame.mixer, os\n'), ((7425, 7438), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (7433, 7438), True, 'import numpy as np\n'), ((7616, 7675), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', '(255, 255, 255)', '(False)', 'gdata', '(1)'], {}), '(screen, (255, 255, 255), False, gdata, 1)\n', (7633, 7675), False, 'import pygame, sys, pygame.mixer, os\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (15, 15), 0)
cv2.imshow("Image", image)
edged = cv2.Canny(blurred, 50, 100)
cv2.imshow("Edges", edged)
"""
cv2.findContours
the first param: edged image
the second param: the type of contours (
cv2.RETR_EXTERNAL: retrieve only the outermost contours;
cv2.RETR_LIST: grab all contours
cv2.RETR_COMP、cv2.RETR_TREE
)
the thrid param: cv2.CHAIN_APPROX_SIMPLE
"""
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print("I count {} coins in this image".format(len(cnts)))
coins = image.copy()
cv2.drawContours(coins, cnts, -1, (0, 255, 0), 2)
cv2.imshow("Coins", coins)
cv2.waitKey(0)
# Crop each individual coin from the image
for (i, c) in enumerate(cnts):
# boundingRect function finds the "enclosing box" that contour will fit into
(x, y, w, h) = cv2.boundingRect(c)
print("Coin #{}".format(i + 1))
coin = image[y:y + h, x: x + w]
cv2.imshow("Coin", coin)
mask = np.zeros(image.shape[:2], dtype = "uint8")
((centerX, centerY), radius) = cv2.minEnclosingCircle(c)
cv2.circle(mask, (int(centerX), int(centerY)), int(radius),
255, -1)
mask = mask[y:y + h, x:x + w]
cv2.imshow("Masked Coin", cv2.bitwise_and(coin, coin, mask = mask))
cv2.waitKey(0)
|
[
"cv2.GaussianBlur",
"cv2.Canny",
"cv2.boundingRect",
"cv2.minEnclosingCircle",
"argparse.ArgumentParser",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.zeros",
"cv2.imread",
"cv2.drawContours",
"cv2.imshow"
] |
[((98, 123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (121, 123), False, 'import argparse\n'), ((244, 269), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (254, 269), False, 'import cv2\n'), ((277, 316), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (289, 316), False, 'import cv2\n'), ((327, 362), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(15, 15)', '(0)'], {}), '(gray, (15, 15), 0)\n', (343, 362), False, 'import cv2\n'), ((363, 389), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'image'], {}), "('Image', image)\n", (373, 389), False, 'import cv2\n'), ((399, 426), 'cv2.Canny', 'cv2.Canny', (['blurred', '(50)', '(100)'], {}), '(blurred, 50, 100)\n', (408, 426), False, 'import cv2\n'), ((427, 453), 'cv2.imshow', 'cv2.imshow', (['"""Edges"""', 'edged'], {}), "('Edges', edged)\n", (437, 453), False, 'import cv2\n'), ((918, 967), 'cv2.drawContours', 'cv2.drawContours', (['coins', 'cnts', '(-1)', '(0, 255, 0)', '(2)'], {}), '(coins, cnts, -1, (0, 255, 0), 2)\n', (934, 967), False, 'import cv2\n'), ((968, 994), 'cv2.imshow', 'cv2.imshow', (['"""Coins"""', 'coins'], {}), "('Coins', coins)\n", (978, 994), False, 'import cv2\n'), ((995, 1009), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1006, 1009), False, 'import cv2\n'), ((1185, 1204), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (1201, 1204), False, 'import cv2\n'), ((1282, 1306), 'cv2.imshow', 'cv2.imshow', (['"""Coin"""', 'coin'], {}), "('Coin', coin)\n", (1292, 1306), False, 'import cv2\n'), ((1319, 1359), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {'dtype': '"""uint8"""'}), "(image.shape[:2], dtype='uint8')\n", (1327, 1359), True, 'import numpy as np\n'), ((1397, 1422), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (1419, 1422), False, 'import cv2\n'), ((1614, 1628), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1625, 1628), False, 'import cv2\n'), ((1568, 1606), 'cv2.bitwise_and', 'cv2.bitwise_and', (['coin', 'coin'], {'mask': 'mask'}), '(coin, coin, mask=mask)\n', (1583, 1606), False, 'import cv2\n')]
|
import numpy as _np
from numpy import ndarray as _ndarray
def accu_len_from_origin(tile_ind:int, tile_mu:float, div_RZ:_ndarray) -> float:
div_tile_len = _np.linalg.norm(div_RZ[1:]-div_RZ[:-1], axis=-1)
assert(tile_ind>=0 and tile_ind<=len(div_tile_len)-1)
return _np.sum(div_tile_len[:tile_ind]) + tile_mu*div_tile_len[tile_ind]
def accu_len_to_tile(accu_len:float, div_RZ:_ndarray) -> (int, float):
seg_len_accu_array = seg_len_accu(div_RZ)
assert(seg_len_accu_array[-1]>accu_len)
for i in range(len(seg_len_accu_array)):
if accu_len<seg_len_accu_array[i]:
tile_ind = i
if tile_ind>0: tile_len = seg_len_accu_array[i] - seg_len_accu_array[i-1]
elif tile_ind==0: tile_len = seg_len_accu_array[0]
break
if tile_ind > 0: tile_mu = (accu_len-seg_len_accu_array[tile_ind-1]) / tile_len
elif tile_ind==0: tile_mu = accu_len / tile_len
else: raise RuntimeError("Unreasonable tile_ind, please check whether your accu_len is right.")
return tile_ind, tile_mu
def tile_mu_to_RZ(tile_ind:int, tile_mu:float, div_RZ:_ndarray) -> _ndarray:
tile_init_RZ = div_RZ[tile_ind]
tile_end_RZ = div_RZ[tile_ind+1]
return tile_init_RZ + tile_mu*(tile_end_RZ-tile_init_RZ)
def accu_len_to_RZ(accu_len:float, div_RZ:_ndarray) -> _ndarray:
tile_ind, tile_mu = accu_len_to_tile(accu_len, div_RZ)
return tile_mu_to_RZ(tile_ind, tile_mu, div_RZ)
# Suppose we have a tile segment, init at $(x_1, y_1)$ and end at $(x_2, y_2)$, we want to ask which point in the divertor is closest to $(x_0, y_0)$.
# tile segment equation
# $$x = x_1 + \mu(x_2 - x_1)$$
# $$y = y_1 + \mu(y_2 - y_1)$$
# For such a point in the divertor closest to $(x_0, y_0)$, it must have the mu satisfying
# $$(x_1-x_0+\mu(x_2-x_1), y_1-y_0+\mu(y_2-y_1)) \cdot (x_2-x_1, y_2-y_1)=0,$$
# turns out to be
# $$\mu= \frac{-(x_1-x_0)(x_2-x_1)-(y_1-y_0)(y_2-y_1)}{(x_2-x_1)^2+(y_2-y_1)^2}$$
# Then we do the same calculation for all tiles in the divertor edge and measure the length between these points and $(x_0, y_0)$. Compare these lengths and we look for the minimum one.
def nearest_tile_ind_mu(point:_ndarray, div_RZ:_ndarray) -> (int, float):
point = _np.asarray(point)
mu_array = - (div_RZ[:-1,0]-point[0]) * (div_RZ[1:,0]-div_RZ[:-1,0]) - (div_RZ[:-1,1]-point[1]) * (div_RZ[1:,1]-div_RZ[:-1,1])
mu_array/= (div_RZ[1:,0]-div_RZ[:-1,0])**2 + (div_RZ[1:,1]-div_RZ[:-1,1])**2
mu_array[mu_array>1] = 1
mu_array[mu_array<0] = 0
tile_nearest_point_RZ = div_RZ[:-1] + mu_array[:,None]*(div_RZ[1:]-div_RZ[:-1])
tile_distance = tile_nearest_point_RZ - point[None,:]
tile_distance = _np.linalg.norm(tile_distance, axis=1)
tile_ind = _np.argmin(tile_distance)
return tile_ind, mu_array[tile_ind]
def nearest_point_accu_len(point:_ndarray, div_RZ:_ndarray) -> float:
tile_ind, tile_mu = nearest_tile_ind_mu(point, div_RZ)
return accu_len_from_origin(tile_ind, tile_mu, div_RZ)
def seg_len_accu(div_RZ:_ndarray) -> _ndarray:
seg_len = _np.linalg.norm(div_RZ[1:,:] - div_RZ[:-1,:], axis=1)
seg_len_accu_array = _np.zeros_like(seg_len)
seg_len_accu_array[0] = seg_len[0]
for i in range(len(seg_len)-1):
seg_len_accu_array[i+1] += seg_len_accu_array[i]+seg_len[i+1]
return seg_len_accu_array
if __name__ == "__main__":
pass
|
[
"numpy.zeros_like",
"numpy.sum",
"numpy.asarray",
"numpy.argmin",
"numpy.linalg.norm"
] |
[((159, 209), 'numpy.linalg.norm', '_np.linalg.norm', (['(div_RZ[1:] - div_RZ[:-1])'], {'axis': '(-1)'}), '(div_RZ[1:] - div_RZ[:-1], axis=-1)\n', (174, 209), True, 'import numpy as _np\n'), ((2241, 2259), 'numpy.asarray', '_np.asarray', (['point'], {}), '(point)\n', (2252, 2259), True, 'import numpy as _np\n'), ((2692, 2730), 'numpy.linalg.norm', '_np.linalg.norm', (['tile_distance'], {'axis': '(1)'}), '(tile_distance, axis=1)\n', (2707, 2730), True, 'import numpy as _np\n'), ((2746, 2771), 'numpy.argmin', '_np.argmin', (['tile_distance'], {}), '(tile_distance)\n', (2756, 2771), True, 'import numpy as _np\n'), ((3063, 3118), 'numpy.linalg.norm', '_np.linalg.norm', (['(div_RZ[1:, :] - div_RZ[:-1, :])'], {'axis': '(1)'}), '(div_RZ[1:, :] - div_RZ[:-1, :], axis=1)\n', (3078, 3118), True, 'import numpy as _np\n'), ((3142, 3165), 'numpy.zeros_like', '_np.zeros_like', (['seg_len'], {}), '(seg_len)\n', (3156, 3165), True, 'import numpy as _np\n'), ((277, 309), 'numpy.sum', '_np.sum', (['div_tile_len[:tile_ind]'], {}), '(div_tile_len[:tile_ind])\n', (284, 309), True, 'import numpy as _np\n')]
|
# This program is to create visualization of the Iris dataset
# Export images of plots to png files
# Author:<NAME>
from statistics import stdev
from isort import file
from nbformat import write
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# Read in the original iris.data file and print its content
iris = pd.read_csv('iris_ds.data', sep=',', header = None)
print('The original file is with no column names:\n', iris)
# This content has no header/column names only the raw data listed by Species
# Add column names to the imported datafile by creating the list [cols]
cols = ['Sepal Length', 'Sepal Width','Petal Length','Petal Width', 'Species']
# Create a new dataframe with the column names using the names argument
iris2 = pd.read_csv('iris_ds.data', sep=',', names = cols)
print("The dataframe now includes the column names: \n",iris2)
# HISTOGRAMS for each attribute variable
# COMMENTED OUT THIS SECTION:
# This solution is for individually plotting a histogram for each feature
#plt.hist(iris2["Sepal Length"])
#plt.xlabel("cm")
#plt.ylabel("Count")
#plt.title("Sepal Length")
#plt.savefig("Hist_sepal_length.png")
#plt.close()
#plt.hist(iris2["Sepal Width"])
#plt.xlabel("cm")
#plt.ylabel("Count")
#plt.title("Sepal Width")
#plt.savefig("Hist_sepal_width.png")
#plt.close()
#plt.hist(iris2["Petal Length"])
#plt.xlabel("cm")
#plt.ylabel("Count")
#plt.title("Petal Length")
#plt.savefig("Hist_petal_length.png")
#plt.close()
#plt.hist(iris2["Petal Width"])
#plt.xlabel("cm")
#plt.ylabel("Count")
#plt.title("Petal Width")
#plt.savefig("Hist_petal_width.png")
#plt.close()
# DEFINING a function to do the plotting for all 4 features and outputting into a subplot
def histplot(x):
plt.hist(iris2[x])
plt.title(x, size = 14, c="Blue") #setting the subtitles for each subplot
plt.xlabel("cm", size=10) #Labelling the axis
plt.ylabel("Count", size=10)
plt.xticks(size = 10) #Setting the text size for the axis
plt.yticks(size = 10)
plt.subplots_adjust(left=0.1,
bottom=0.1,
right=0.9,
top=0.9,
wspace=0.5,
hspace=1.0) #adjusts spacing to make the subplots more legible
# APPLYING THE FUNCTION TO PLOT INDIVIDUAL HISTOGRAMS
plt.subplot(2,2,1) #set the number of (rows, columns, location) of each plot Top left
histplot("Sepal Length")
plt.subplot(2,2,2) #Top right
histplot("Sepal Width")
plt.subplot(2,2,3) #Bottom left
histplot("Petal Length")
plt.subplot(2,2,4) #Bottom right
histplot("Petal Width")
plt.savefig("Histogram_by features.png") # save output image to png file
plt.close() # Close the plotting
# SCATTER plots for each pair of the variables using Seaborn
# DEFINING A FUNCTION TO PLOT THE DESIRED GRAPH
def scatter(x, y):
sns.set_context("notebook")
custom_palette=["red","green","purple"]
sns.set_palette(custom_palette)
g=sns.relplot(data=iris2, kind="scatter", hue="Species", x=x, y=y, height=5)
g.set(xlabel=x, ylabel=y)
sns.move_legend(g, loc="upper center", bbox_to_anchor=(.5, .9), ncol = 3)
plt.title('Iris Data Scatter Plot', color = 'blue', size = 15, y = 1.2)
plt.tight_layout()
# APPLYING THE FUNCTION TO PLOT INDIVIDUAL SCATTER PLOTS
scatter("Sepal Length", "Sepal Width")
plt.savefig('Iris_ScatterPlot1.png')
plt.close()
scatter("Petal Length", "Petal Width")
plt.savefig('Iris_ScatterPlot2.png')
plt.close()
scatter("Petal Length", "Sepal Length")
plt.savefig('Iris_ScatterPlot3.png')
plt.close()
scatter("Sepal Width", "Petal Width")
plt.savefig('Iris_ScatterPlot4.png')
plt.close()
# MATRIX OF SCATTER/HITOGRAM PLOTS using Seaborn PairGrid solution
# Provides a faster and more comprehensive summary of the individual pair plots
# Helps understanding the data through clear visualization
# This is to separate the attribute variables into a list
features = ['Sepal Length', 'Sepal Width','Petal Length','Petal Width']
# Use of PAIRGRID setting diagonal plots to be histograms which will provide a distribution plot of each variable
# Chose scatterplot to plot each individual data by the species of the flower
g = sns.PairGrid(data=iris2, hue = "Species", vars = features, height=5, aspect=5/5) #hue is to group the data by different colours for the 3 species
g.map_diag(sns.histplot) #diagonal plots to be histograms
g.map_offdiag(sns.scatterplot) #other plots to be scatter plots
g.add_legend() # add legend to understand the colours
g.fig.suptitle("Iris Data PairGrid Plot") #add title to the pairplot
plt.tight_layout()
plt.savefig('Iris_Features_PairPlot.png') #save image to png file
plt.close()
# BOXPLOT presentation of individual species
#
# Initially the individual boxplots were generated by repeating the same commands
# in 4 repetitive blocks
# i replaced it with a function defined to plot the boxplots and used a for loop to iterate
# through the list of features and save each plot as a figure with file name
# Boxplot_feature.png
# DEFINING BOXPLOT FUNCTION
def boxplot(y):
g = sns.catplot(data=iris2, kind="box", x="Species",y=y)
plt.title("Iris Boxplot", c="Blue", size=15)
plt.xticks(size=10)
plt.yticks(size=10)
plt.tight_layout()
plt.savefig("Boxplot_"+y+".png")
plt.close()
# USE OF FOOR LOOP WHEN CALLING BOXPLOT() FUNCTION
index = 0
for feature in features:
index += 1
y = feature
boxplot(y)
# COMMENTED OUT THIS SECTION AS RELACED BY FUNCTION AND FOR LOOP
# g=sns.catplot(data=iris2, kind="box", x="Species",y=features[0])
# plt.title("Iris Boxplot", c="Blue", size=15)
# plt.xticks(size=10)
# plt.yticks(size=10)
# plt.tight_layout()
# plt.savefig('Iris_BP_by_Species1.png')
# plt.close()
# g=sns.catplot(data=iris2, kind="box", x="Species",y=features[1])
# plt.title("Iris Boxplot 2", c="Blue", size=15)
# plt.xticks(size=10)
# plt.yticks(size=10)
# plt.tight_layout()
# plt.savefig('Iris_BP_by_Species2.png')
# plt.close()
# g=sns.catplot(data=iris2, kind="box", x="Species",y=features[2])
# plt.title("Iris Boxplot 3", c="Blue", size=15)
# plt.xticks(size=10)
# plt.yticks(size=10)
# plt.tight_layout()
# plt.savefig('Iris_BP_by_Species3.png')
# plt.close()
# g=sns.catplot(data=iris2, kind="box", x="Species",y=features[3])
# plt.title("Iris Boxplot 4", c="Blue", size=15)
# plt.xticks(size=10)
# plt.yticks(size=10)
# plt.tight_layout()
# plt.savefig('Iris_BP_by_Species4.png')
# plt.close()
# ANOTHER WAY TO PREVIOUS SOLUTION FOR BOXPLOT VISUALIZATION
# By using the pd.melt() function available in Pandas which allows to
# Change the format of the dataframe and differentiate between identifier variables (Iris Species)
# and measured variables (Iris Features)
# Creating new dataframe "iris3" to use for visualization purposes
iris3 = pd.melt(iris2, id_vars=["Species"], value_vars=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"])
# print(iris3.loc[::50]) - printed to see how the new df looked like, excluding as it is not required for this exercise
# print(iris3.head()) - printed to see first 5 lines, excluding as it is not required for this exercise
# BOXPLOT VISUALIZATION using "iris3" dataframe
sns.set_context("notebook") #set the scale of the plot
sns.set_style("whitegrid") # set gridlines to run across the graph
ax = sns.boxplot(data=iris3, x="variable", y="value", hue="Species") # plotting data as boxplots in one plot
# Moving legend from the plot "ax" using the bbox_to_anchor function, setting the number of species
sns.move_legend(ax, "upper center", bbox_to_anchor=(.5, 1.1), ncol = 3, title=None, frameon = False)
# Setting the title of the plot with offset of "y" to space it away from the graph
sns.despine()
plt.title("Iris - Features Side-by-Side Boxplots", y=1.1)
# using the tight layout function to ensure that all data/labels are included in the plot
plt.tight_layout()
plt.yticks(np.arange(0, 10, step=1))
plt.savefig('Iris_SidebySide_Boxplot.png')
plt.close()
# HEATMAP useful when trying to establish correlation between variables.
# Used Seaborn to generate heatmap, used formatting to make it more presentable
sns.heatmap(iris2.corr(), annot=True, cmap="Purples", linewidths=1.5, linecolor="white", xticklabels=True, yticklabels=True)
sns.set_context("notebook", font_scale=1)
plt.title("Iris Attributes - Heatmap", c="Blue", size=15)
plt.xticks(size=10)
plt.yticks(rotation=90, size=10) # rotation of ylabel text as it was not showing on the plot with the full text length
plt.savefig('Iris_Heatmap.png')
plt.close()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"numpy.arange",
"seaborn.relplot",
"seaborn.move_legend",
"seaborn.PairGrid",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"seaborn.set_context",
"seaborn.set_style",
"seaborn.boxplot",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"pandas.melt",
"seaborn.set_palette",
"matplotlib.pyplot.subplot",
"seaborn.catplot",
"matplotlib.pyplot.hist",
"seaborn.despine",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((360, 409), 'pandas.read_csv', 'pd.read_csv', (['"""iris_ds.data"""'], {'sep': '""","""', 'header': 'None'}), "('iris_ds.data', sep=',', header=None)\n", (371, 409), True, 'import pandas as pd\n'), ((784, 832), 'pandas.read_csv', 'pd.read_csv', (['"""iris_ds.data"""'], {'sep': '""","""', 'names': 'cols'}), "('iris_ds.data', sep=',', names=cols)\n", (795, 832), True, 'import pandas as pd\n'), ((2366, 2386), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (2377, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2522), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2513, 2522), True, 'import matplotlib.pyplot as plt\n'), ((2582, 2602), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2593, 2602), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2685), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (2676, 2685), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2788), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Histogram_by features.png"""'], {}), "('Histogram_by features.png')\n", (2759, 2788), True, 'import matplotlib.pyplot as plt\n'), ((2824, 2835), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2833, 2835), True, 'import matplotlib.pyplot as plt\n'), ((3520, 3556), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Iris_ScatterPlot1.png"""'], {}), "('Iris_ScatterPlot1.png')\n", (3531, 3556), True, 'import matplotlib.pyplot as plt\n'), ((3557, 3568), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3566, 3568), True, 'import matplotlib.pyplot as plt\n'), ((3609, 3645), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Iris_ScatterPlot2.png"""'], {}), "('Iris_ScatterPlot2.png')\n", (3620, 3645), True, 'import matplotlib.pyplot as plt\n'), ((3646, 3657), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3655, 3657), True, 'import matplotlib.pyplot as plt\n'), ((3699, 3735), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Iris_ScatterPlot3.png"""'], {}), "('Iris_ScatterPlot3.png')\n", (3710, 3735), True, 'import matplotlib.pyplot as plt\n'), ((3736, 3747), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3745, 3747), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3823), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Iris_ScatterPlot4.png"""'], {}), "('Iris_ScatterPlot4.png')\n", (3798, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3824, 3835), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3833, 3835), True, 'import matplotlib.pyplot as plt\n'), ((4373, 4451), 'seaborn.PairGrid', 'sns.PairGrid', ([], {'data': 'iris2', 'hue': '"""Species"""', 'vars': 'features', 'height': '(5)', 'aspect': '(5 / 5)'}), "(data=iris2, hue='Species', vars=features, height=5, aspect=5 / 5)\n", (4385, 4451), True, 'import seaborn as sns\n'), ((4775, 4793), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4791, 4793), True, 'import matplotlib.pyplot as plt\n'), ((4794, 4835), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Iris_Features_PairPlot.png"""'], {}), "('Iris_Features_PairPlot.png')\n", (4805, 4835), True, 'import matplotlib.pyplot as plt\n'), ((4862, 4873), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4871, 4873), True, 'import matplotlib.pyplot as plt\n'), ((6994, 7108), 'pandas.melt', 'pd.melt', (['iris2'], {'id_vars': "['Species']", 'value_vars': "['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width']"}), "(iris2, id_vars=['Species'], value_vars=['Sepal Length',\n 'Sepal Width', 'Petal Length', 'Petal Width'])\n", (7001, 7108), True, 'import pandas as pd\n'), ((7381, 7408), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {}), "('notebook')\n", (7396, 7408), True, 'import seaborn as sns\n'), ((7440, 7466), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (7453, 7466), True, 'import seaborn as sns\n'), ((7520, 7583), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'iris3', 'x': '"""variable"""', 'y': '"""value"""', 'hue': '"""Species"""'}), "(data=iris3, x='variable', y='value', hue='Species')\n", (7531, 7583), True, 'import seaborn as sns\n'), ((7725, 7826), 'seaborn.move_legend', 'sns.move_legend', (['ax', '"""upper center"""'], {'bbox_to_anchor': '(0.5, 1.1)', 'ncol': '(3)', 'title': 'None', 'frameon': '(False)'}), "(ax, 'upper center', bbox_to_anchor=(0.5, 1.1), ncol=3,\n title=None, frameon=False)\n", (7740, 7826), True, 'import seaborn as sns\n'), ((7910, 7923), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (7921, 7923), True, 'import seaborn as sns\n'), ((7924, 7981), 'matplotlib.pyplot.title', 'plt.title', (['"""Iris - Features Side-by-Side Boxplots"""'], {'y': '(1.1)'}), "('Iris - Features Side-by-Side Boxplots', y=1.1)\n", (7933, 7981), True, 'import matplotlib.pyplot as plt\n'), ((8072, 8090), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8088, 8090), True, 'import matplotlib.pyplot as plt\n'), ((8128, 8170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Iris_SidebySide_Boxplot.png"""'], {}), "('Iris_SidebySide_Boxplot.png')\n", (8139, 8170), True, 'import matplotlib.pyplot as plt\n'), ((8171, 8182), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8180, 8182), True, 'import matplotlib.pyplot as plt\n'), ((8465, 8506), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1)'}), "('notebook', font_scale=1)\n", (8480, 8506), True, 'import seaborn as sns\n'), ((8507, 8564), 'matplotlib.pyplot.title', 'plt.title', (['"""Iris Attributes - Heatmap"""'], {'c': '"""Blue"""', 'size': '(15)'}), "('Iris Attributes - Heatmap', c='Blue', size=15)\n", (8516, 8564), True, 'import matplotlib.pyplot as plt\n'), ((8565, 8584), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(10)'}), '(size=10)\n', (8575, 8584), True, 'import matplotlib.pyplot as plt\n'), ((8585, 8617), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'rotation': '(90)', 'size': '(10)'}), '(rotation=90, size=10)\n', (8595, 8617), True, 'import matplotlib.pyplot as plt\n'), ((8704, 8735), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Iris_Heatmap.png"""'], {}), "('Iris_Heatmap.png')\n", (8715, 8735), True, 'import matplotlib.pyplot as plt\n'), ((8736, 8747), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8745, 8747), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1778), 'matplotlib.pyplot.hist', 'plt.hist', (['iris2[x]'], {}), '(iris2[x])\n', (1768, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1814), 'matplotlib.pyplot.title', 'plt.title', (['x'], {'size': '(14)', 'c': '"""Blue"""'}), "(x, size=14, c='Blue')\n", (1792, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1888), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""cm"""'], {'size': '(10)'}), "('cm', size=10)\n", (1873, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1951), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {'size': '(10)'}), "('Count', size=10)\n", (1933, 1951), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1975), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(10)'}), '(size=10)\n', (1966, 1975), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2051), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(10)'}), '(size=10)\n', (2042, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2147), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.1)', 'right': '(0.9)', 'top': '(0.9)', 'wspace': '(0.5)', 'hspace': '(1.0)'}), '(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.5,\n hspace=1.0)\n', (2077, 2147), True, 'import matplotlib.pyplot as plt\n'), ((3023, 3050), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {}), "('notebook')\n", (3038, 3050), True, 'import seaborn as sns\n'), ((3099, 3130), 'seaborn.set_palette', 'sns.set_palette', (['custom_palette'], {}), '(custom_palette)\n', (3114, 3130), True, 'import seaborn as sns\n'), ((3137, 3211), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'iris2', 'kind': '"""scatter"""', 'hue': '"""Species"""', 'x': 'x', 'y': 'y', 'height': '(5)'}), "(data=iris2, kind='scatter', hue='Species', x=x, y=y, height=5)\n", (3148, 3211), True, 'import seaborn as sns\n'), ((3246, 3319), 'seaborn.move_legend', 'sns.move_legend', (['g'], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, 0.9)', 'ncol': '(3)'}), "(g, loc='upper center', bbox_to_anchor=(0.5, 0.9), ncol=3)\n", (3261, 3319), True, 'import seaborn as sns\n'), ((3324, 3389), 'matplotlib.pyplot.title', 'plt.title', (['"""Iris Data Scatter Plot"""'], {'color': '"""blue"""', 'size': '(15)', 'y': '(1.2)'}), "('Iris Data Scatter Plot', color='blue', size=15, y=1.2)\n", (3333, 3389), True, 'import matplotlib.pyplot as plt\n'), ((3400, 3418), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3416, 3418), True, 'import matplotlib.pyplot as plt\n'), ((5275, 5328), 'seaborn.catplot', 'sns.catplot', ([], {'data': 'iris2', 'kind': '"""box"""', 'x': '"""Species"""', 'y': 'y'}), "(data=iris2, kind='box', x='Species', y=y)\n", (5286, 5328), True, 'import seaborn as sns\n'), ((5332, 5376), 'matplotlib.pyplot.title', 'plt.title', (['"""Iris Boxplot"""'], {'c': '"""Blue"""', 'size': '(15)'}), "('Iris Boxplot', c='Blue', size=15)\n", (5341, 5376), True, 'import matplotlib.pyplot as plt\n'), ((5381, 5400), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(10)'}), '(size=10)\n', (5391, 5400), True, 'import matplotlib.pyplot as plt\n'), ((5405, 5424), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(10)'}), '(size=10)\n', (5415, 5424), True, 'import matplotlib.pyplot as plt\n'), ((5429, 5447), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5445, 5447), True, 'import matplotlib.pyplot as plt\n'), ((5452, 5488), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Boxplot_' + y + '.png')"], {}), "('Boxplot_' + y + '.png')\n", (5463, 5488), True, 'import matplotlib.pyplot as plt\n'), ((5489, 5500), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5498, 5500), True, 'import matplotlib.pyplot as plt\n'), ((8102, 8126), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {'step': '(1)'}), '(0, 10, step=1)\n', (8111, 8126), True, 'import numpy as np\n')]
|
from typing import Tuple
import numpy as np
def camera_from_world_transform(d: float = 1.0) -> np.ndarray:
"""Define a transformation matrix in homogeneous coordinates that
transforms coordinates from world space to camera space, according
to the coordinate systems in Question 1.
Args:
d (float, optional): Total distance of displacement between world and camera
origins. Will always be greater than or equal to zero. Defaults to 1.0.
Returns:
T (np.ndarray): Left-hand transformation matrix, such that c = Tw
for world coordinate w and camera coordinate c as column vectors.
Shape = (4,4) where 4 means 3D+1 for homogeneous.
"""
T = np.eye(4)
# YOUR CODE HERE
T = np.array([
[-1/np.sqrt(2), 0, 1/np.sqrt(2), 0],
[0, 1, 0, 0],
[-1/np.sqrt(2), 0, -1/np.sqrt(2), d],
[0, 0, 0, 1]
])
# END YOUR CODE
assert T.shape == (4, 4)
return T
def apply_transform(T: np.ndarray, points: np.ndarray) -> Tuple[np.ndarray]:
"""Apply a transformation matrix to a set of points.
Hint: You'll want to first convert all of the points to homogeneous coordinates.
Each point in the (3,N) shape edges is a length 3 vector for x, y, and z, so
appending a 1 after z to each point will make this homogeneous coordinates.
You shouldn't need any loops for this function.
Args:
T (np.ndarray):
Left-hand transformation matrix, such that c = Tw
for world coordinate w and camera coordinate c as column vectors.
Shape = (4,4) where 4 means 3D+1 for homogeneous.
points (np.ndarray):
Shape = (3,N) where 3 means 3D and N is the number of points to transform.
Returns:
points_transformed (np.ndarray):
Transformed points.
Shape = (3,N) where 3 means 3D and N is the number of points.
"""
N = points.shape[1]
assert points.shape == (3, N)
# You'll replace this!
points_transformed = np.vstack([points, np.ones(N)])
# YOUR CODE HERE
points_transformed = T @ points_transformed
points_transformed = points_transformed[:-1, :]
# END YOUR CODE
assert points_transformed.shape == (3, N)
return points_transformed
def intersection_from_lines(
a_0: np.ndarray, a_1: np.ndarray, b_0: np.ndarray, b_1: np.ndarray
) -> np.ndarray:
"""Find the intersection of two lines (infinite length), each defined by a
pair of points.
Args:
a_0 (np.ndarray): First point of first line; shape `(2,)`.
a_1 (np.ndarray): Second point of first line; shape `(2,)`.
b_0 (np.ndarray): First point of second line; shape `(2,)`.
b_1 (np.ndarray): Second point of second line; shape `(2,)`.
Returns:
np.ndarray: the intersection of the two lines definied by (a0, a1)
and (b0, b1).
"""
# Validate inputs
assert a_0.shape == a_1.shape == b_0.shape == b_1.shape == (2,)
assert a_0.dtype == a_1.dtype == b_0.dtype == b_1.dtype == np.float
# Intersection point between lines
out = np.zeros(2)
# YOUR CODE HERE
line_one = np.cross(np.append(a_1, [[1]]), np.append(a_0, [[1]]))
line_two = np.cross(np.append(b_1, [[1]]), np.append(b_0, [[1]]))
out = np.cross(line_one, line_two)
out = out / out[-1]
out = out[:-1]
# END YOUR CODE
assert out.shape == (2,)
assert out.dtype == np.float
return out
def optical_center_from_vanishing_points(
v0: np.ndarray, v1: np.ndarray, v2: np.ndarray
) -> np.ndarray:
"""Compute the optical center of our camera intrinsics from three vanishing
points corresponding to mutually orthogonal directions.
Hints:
- Your `intersection_from_lines()` implementation might be helpful here.
- It might be worth reviewing vector projection with dot products.
Args:
v0 (np.ndarray): Vanishing point in image space; shape `(2,)`.
v1 (np.ndarray): Vanishing point in image space; shape `(2,)`.
v2 (np.ndarray): Vanishing point in image space; shape `(2,)`.
Returns:
np.ndarray: Optical center; shape `(2,)`.
"""
assert v0.shape == v1.shape == v2.shape == (2,), "Wrong shape!"
optical_center = np.zeros(2)
# YOUR CODE HERE
line_one = np.cross(np.append(v0.T, [[1]]), np.append(v1.T, [[1]]))
line_two = np.cross(np.append(v1.T, [[1]]), np.append(v2.T, [[1]]))
m_line_one_perp = line_one[1] / line_one[0]
m_line_two_perp = line_two[1] / line_two[0]
line_one_perp_b = v2[1] - (m_line_one_perp * v2[0])
line_two_perp_b = v0[1] - (m_line_two_perp * v0[0])
line_one_perp = np.array([-m_line_one_perp, 1, -line_one_perp_b])
line_two_perp = np.array([-m_line_two_perp, 1, -line_two_perp_b])
final = np.cross(line_one_perp, line_two_perp)
final_final = final / final[-1]
optical_center = final_final[:-1]
# END YOUR CODE
assert optical_center.shape == (2,)
return optical_center
def focal_length_from_two_vanishing_points(
v0: np.ndarray, v1: np.ndarray, optical_center: np.ndarray
) -> np.ndarray:
"""Compute focal length of camera, from two vanishing points and the
calibrated optical center.
Args:
v0 (np.ndarray): Vanishing point in image space; shape `(2,)`.
v1 (np.ndarray): Vanishing point in image space; shape `(2,)`.
optical_center (np.ndarray): Calibrated optical center; shape `(2,)`.
Returns:
float: Calibrated focal length.
"""
assert v0.shape == v1.shape == optical_center.shape == (2,), "Wrong shape!"
f = None
# YOUR CODE HERE
x_comp = (v0[0] - optical_center[0]) * (v1[0] - optical_center[0])
y_comp = (v0[1] - optical_center[1]) * (v1[1] - optical_center[1])
f = np.sqrt((x_comp + y_comp) / -1)
# END YOUR CODE
return float(f)
def physical_focal_length_from_calibration(
f: float, sensor_diagonal_mm: float, image_diagonal_pixels: float
) -> float:
"""Compute the physical focal length of our camera, in millimeters.
Args:
f (float): Calibrated focal length, using pixel units.
sensor_diagonal_mm (float): Length across the diagonal of our camera
sensor, in millimeters.
image_diagonal_pixels (float): Length across the diagonal of the
calibration image, in pixels.
Returns:
float: Calibrated focal length, in millimeters.
"""
f_mm = None
# YOUR CODE HERE
f_mm = (sensor_diagonal_mm / image_diagonal_pixels) * f
# END YOUR CODE
return f_mm
|
[
"numpy.cross",
"numpy.zeros",
"numpy.ones",
"numpy.append",
"numpy.array",
"numpy.eye",
"numpy.sqrt"
] |
[((721, 730), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (727, 730), True, 'import numpy as np\n'), ((3143, 3154), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3151, 3154), True, 'import numpy as np\n'), ((3327, 3355), 'numpy.cross', 'np.cross', (['line_one', 'line_two'], {}), '(line_one, line_two)\n', (3335, 3355), True, 'import numpy as np\n'), ((4296, 4307), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4304, 4307), True, 'import numpy as np\n'), ((4759, 4808), 'numpy.array', 'np.array', (['[-m_line_one_perp, 1, -line_one_perp_b]'], {}), '([-m_line_one_perp, 1, -line_one_perp_b])\n', (4767, 4808), True, 'import numpy as np\n'), ((4829, 4878), 'numpy.array', 'np.array', (['[-m_line_two_perp, 1, -line_two_perp_b]'], {}), '([-m_line_two_perp, 1, -line_two_perp_b])\n', (4837, 4878), True, 'import numpy as np\n'), ((4896, 4934), 'numpy.cross', 'np.cross', (['line_one_perp', 'line_two_perp'], {}), '(line_one_perp, line_two_perp)\n', (4904, 4934), True, 'import numpy as np\n'), ((5887, 5918), 'numpy.sqrt', 'np.sqrt', (['((x_comp + y_comp) / -1)'], {}), '((x_comp + y_comp) / -1)\n', (5894, 5918), True, 'import numpy as np\n'), ((3201, 3222), 'numpy.append', 'np.append', (['a_1', '[[1]]'], {}), '(a_1, [[1]])\n', (3210, 3222), True, 'import numpy as np\n'), ((3224, 3245), 'numpy.append', 'np.append', (['a_0', '[[1]]'], {}), '(a_0, [[1]])\n', (3233, 3245), True, 'import numpy as np\n'), ((3271, 3292), 'numpy.append', 'np.append', (['b_1', '[[1]]'], {}), '(b_1, [[1]])\n', (3280, 3292), True, 'import numpy as np\n'), ((3294, 3315), 'numpy.append', 'np.append', (['b_0', '[[1]]'], {}), '(b_0, [[1]])\n', (3303, 3315), True, 'import numpy as np\n'), ((4354, 4376), 'numpy.append', 'np.append', (['v0.T', '[[1]]'], {}), '(v0.T, [[1]])\n', (4363, 4376), True, 'import numpy as np\n'), ((4378, 4400), 'numpy.append', 'np.append', (['v1.T', '[[1]]'], {}), '(v1.T, [[1]])\n', (4387, 4400), True, 'import numpy as np\n'), ((4426, 4448), 'numpy.append', 'np.append', (['v1.T', '[[1]]'], {}), '(v1.T, [[1]])\n', (4435, 4448), True, 'import numpy as np\n'), ((4450, 4472), 'numpy.append', 'np.append', (['v2.T', '[[1]]'], {}), '(v2.T, [[1]])\n', (4459, 4472), True, 'import numpy as np\n'), ((2067, 2077), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (2074, 2077), True, 'import numpy as np\n'), ((783, 793), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (790, 793), True, 'import numpy as np\n'), ((800, 810), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (807, 810), True, 'import numpy as np\n'), ((850, 860), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (857, 860), True, 'import numpy as np\n'), ((868, 878), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (875, 878), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
from common.nets import LinearNet
from common.modules.NoisyLinear import NoisyLinear
def fanin_init(size, fanin=None):
fanin = fanin or size[0]
v = 1. / np.sqrt(fanin)
return torch.Tensor(size).uniform_(-v, v)
class Actor(nn.Module):
def __init__(self, n_observation, n_action,
layers, activation=torch.nn.ELU,
layer_norm=False,
parameters_noise=False, parameters_noise_factorised=False,
last_activation=torch.nn.Tanh, init_w=3e-3):
super(Actor, self).__init__()
if parameters_noise:
def linear_layer(x_in, x_out):
return NoisyLinear(x_in, x_out, factorised=parameters_noise_factorised)
else:
linear_layer = nn.Linear
self.feature_net = LinearNet(
layers=[n_observation] + layers,
activation=activation,
layer_norm=layer_norm,
linear_layer=linear_layer)
self.policy_net = LinearNet(
layers=[self.feature_net.output_shape, n_action],
activation=last_activation,
layer_norm=False
)
self.init_weights(init_w)
def init_weights(self, init_w):
for layer in self.feature_net.net:
if isinstance(layer, nn.Linear):
layer.weight.data = fanin_init(layer.weight.data.size())
for layer in self.policy_net.net:
if isinstance(layer, nn.Linear):
layer.weight.data.uniform_(-init_w, init_w)
def forward(self, observation):
x = observation
x = self.feature_net.forward(x)
x = self.policy_net.forward(x)
return x
class Critic(nn.Module):
def __init__(self, n_observation, n_action,
layers, activation=torch.nn.ELU,
layer_norm=False,
parameters_noise=False, parameters_noise_factorised=False,
init_w=3e-3):
super(Critic, self).__init__()
if parameters_noise:
def linear_layer(x_in, x_out):
return NoisyLinear(x_in, x_out, factorised=parameters_noise_factorised)
else:
linear_layer = nn.Linear
self.feature_net = LinearNet(
layers=[n_observation + n_action] + layers,
activation=activation,
layer_norm=layer_norm,
linear_layer=linear_layer)
self.value_net = nn.Linear(self.feature_net.output_shape, 1)
self.init_weights(init_w)
def init_weights(self, init_w):
for layer in self.feature_net.net:
if isinstance(layer, nn.Linear):
layer.weight.data = fanin_init(layer.weight.data.size())
self.value_net.weight.data.uniform_(-init_w, init_w)
def forward(self, observation, action):
x = torch.cat((observation, action), dim=1)
x = self.feature_net.forward(x)
x = self.value_net.forward(x)
return x
|
[
"common.modules.NoisyLinear.NoisyLinear",
"torch.cat",
"common.nets.LinearNet",
"torch.Tensor",
"torch.nn.Linear",
"numpy.sqrt"
] |
[((218, 232), 'numpy.sqrt', 'np.sqrt', (['fanin'], {}), '(fanin)\n', (225, 232), True, 'import numpy as np\n'), ((854, 973), 'common.nets.LinearNet', 'LinearNet', ([], {'layers': '([n_observation] + layers)', 'activation': 'activation', 'layer_norm': 'layer_norm', 'linear_layer': 'linear_layer'}), '(layers=[n_observation] + layers, activation=activation,\n layer_norm=layer_norm, linear_layer=linear_layer)\n', (863, 973), False, 'from common.nets import LinearNet\n'), ((1045, 1155), 'common.nets.LinearNet', 'LinearNet', ([], {'layers': '[self.feature_net.output_shape, n_action]', 'activation': 'last_activation', 'layer_norm': '(False)'}), '(layers=[self.feature_net.output_shape, n_action], activation=\n last_activation, layer_norm=False)\n', (1054, 1155), False, 'from common.nets import LinearNet\n'), ((2280, 2410), 'common.nets.LinearNet', 'LinearNet', ([], {'layers': '([n_observation + n_action] + layers)', 'activation': 'activation', 'layer_norm': 'layer_norm', 'linear_layer': 'linear_layer'}), '(layers=[n_observation + n_action] + layers, activation=activation,\n layer_norm=layer_norm, linear_layer=linear_layer)\n', (2289, 2410), False, 'from common.nets import LinearNet\n'), ((2481, 2524), 'torch.nn.Linear', 'nn.Linear', (['self.feature_net.output_shape', '(1)'], {}), '(self.feature_net.output_shape, 1)\n', (2490, 2524), True, 'import torch.nn as nn\n'), ((2876, 2915), 'torch.cat', 'torch.cat', (['(observation, action)'], {'dim': '(1)'}), '((observation, action), dim=1)\n', (2885, 2915), False, 'import torch\n'), ((244, 262), 'torch.Tensor', 'torch.Tensor', (['size'], {}), '(size)\n', (256, 262), False, 'import torch\n'), ((710, 774), 'common.modules.NoisyLinear.NoisyLinear', 'NoisyLinear', (['x_in', 'x_out'], {'factorised': 'parameters_noise_factorised'}), '(x_in, x_out, factorised=parameters_noise_factorised)\n', (721, 774), False, 'from common.modules.NoisyLinear import NoisyLinear\n'), ((2136, 2200), 'common.modules.NoisyLinear.NoisyLinear', 'NoisyLinear', (['x_in', 'x_out'], {'factorised': 'parameters_noise_factorised'}), '(x_in, x_out, factorised=parameters_noise_factorised)\n', (2147, 2200), False, 'from common.modules.NoisyLinear import NoisyLinear\n')]
|
from pyannote.audio.pipeline.speech_turn_assignment import SpeechTurnClosestAssignment
import torch
import itertools
import torch.nn.functional as F
from pyannote.pipeline.parameter import Uniform
from pyannote.core.utils.distance import cdist
from pyannote.core.utils.distance import dist_range
from pyannote.core.utils.distance import l2_normalize
from pyannote.core import Annotation
from pyannote.audio.pipeline.utils import assert_int_labels, assert_string_labels
from pyannote.audio.features.wrapper import Wrapper, Wrappable
from typing import Optional
import numpy as np
import warnings
from pyannote.pipeline import Pipeline
class ClosestAssignment(Pipeline):
"""Assign each sample to the closest target
Parameters
----------
metric : `str`, optional
Distance metric. Defaults to 'cosine'
normalize : `bool`, optional
L2 normalize vectors before clustering.
Hyper-parameters
----------------
threshold : `float`
Do not assign if distance greater than `threshold`.
"""
def __init__(self, metric: Optional[str] = 'cosine',
normalize: Optional[bool] = False):
super().__init__()
self.metric = metric
self.normalize = normalize
min_dist, max_dist = dist_range(metric=self.metric,
normalize=self.normalize)
if not np.isfinite(max_dist):
# this is arbitray and might lead to suboptimal results
max_dist = 1e6
msg = (f'bounding distance threshold to {max_dist:g}: '
f'this might lead to suboptimal results.')
warnings.warn(msg)
self.threshold = Uniform(min_dist, max_dist)
def __call__(self, X_target, X):
"""Assign each sample to its closest class (if close enough)
Parameters
----------
X_target : `np.ndarray`
(n_targets, n_dimensions) target embeddings
X : `np.ndarray`
(n_samples, n_dimensions) sample embeddings
Returns
-------
assignments : `np.ndarray`
(n_samples, ) sample assignments
"""
if self.normalize:
X_target = l2_normalize(X_target)
X = l2_normalize(X)
distance = cdist(X_target, X, metric=self.metric)
idx = np.argsort(distance, axis=0)
for i, k in enumerate(idx[0]):
if distance[k, i] > self.threshold:
# do not assign
idx[0][i] = -i
return idx
class ClosestAssignmentAlwaysAssign(ClosestAssignment):
def __call__(self, X_target, X):
if self.normalize:
X_target = l2_normalize(X_target)
X = l2_normalize(X)
distance = cdist(X_target, X, metric=self.metric)
idx = np.argsort(distance, axis=0)
return idx
class SpeechTurnClosestAssignmentNew(SpeechTurnClosestAssignment):
def __init__(self, embedding: Wrappable = None, metric: Optional[str] = "cosine"):
super().__init__(embedding=embedding, metric=metric)
self.closest_assignment = ClosestAssignment(metric=self.metric)
def __call__(
self, current_file: dict, speech_turns: Annotation, targets: Annotation
) -> Annotation:
"""Assign each speech turn to closest target (if close enough)
Parameters
----------
current_file : `dict`
File as provided by a pyannote.database protocol.
speech_turns : `Annotation`
Speech turns. Should only contain `int` labels.
targets : `Annotation`
Targets. Should only contain `str` labels.
Returns
-------
assigned : `Annotation`
Assigned speech turns.
"""
assert_string_labels(targets, "targets")
assert_int_labels(speech_turns, "speech_turns")
embedding = self._embedding(current_file)
# gather targets embedding
labels = targets.labels()
X_targets, targets_labels = [], []
for l, label in enumerate(labels):
timeline = targets.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
continue
targets_labels.append(label)
X_targets.append(np.mean(x, axis=0))
# gather speech turns embedding
labels = speech_turns.labels()
X, assigned_labels, skipped_labels = [], [], []
for l, label in enumerate(labels):
timeline = speech_turns.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
skipped_labels.append(label)
continue
assigned_labels.append(label)
X.append(np.mean(x, axis=0))
# assign speech turns to closest class
assignments = self.closest_assignment(np.vstack(X_targets), np.vstack(X))
mapping = {
label: targets_labels[k]
for label, k in zip(assigned_labels, assignments[0])
if not k < 0
}
mapping1 = {
label: targets_labels[k]
for label, k in zip(assigned_labels, assignments[1])
if not k < 0
}
return speech_turns.rename_labels(mapping=mapping), speech_turns.copy().rename_labels(mapping=mapping1)
class SpeechTurnClosestAssignmentMultiSpeaker(SpeechTurnClosestAssignment):
def __init__(self, gnet, device, embedding: Wrappable = None, metric: Optional[str] = "cosine"):
super(SpeechTurnClosestAssignmentMultiSpeaker, self).__init__(embedding, metric)
self.closest_assignment = ClosestAssignmentAlwaysAssign(metric=self.metric)
self.g_net = gnet.to(device)
self.device = device
def __call__(
self, current_file: dict, speech_turns: Annotation, targets: Annotation
) -> Annotation:
assert_string_labels(targets, "targets")
assert_int_labels(speech_turns, "speech_turns")
embedding = self._embedding(current_file)
# gather targets embedding
labels = targets.labels()
X_targets, targets_labels = [], []
for l, label in enumerate(labels):
timeline = targets.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
continue
targets_labels.append(label)
X_targets.append(np.mean(x, axis=0))
# gather speech turns embedding
labels = speech_turns.labels()
X, assigned_labels, skipped_labels = [], [], []
for l, label in enumerate(labels):
timeline = speech_turns.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
skipped_labels.append(label)
continue
assigned_labels.append(label)
X.append(np.mean(x, axis=0))
# assign speech turns to closest class
targets = np.vstack(X_targets)
num_targets = len(targets)
targets_tensor = torch.tensor(targets).to(self.device).float()
if targets_tensor.size(0) > 1:
# targets_tensor = F.normalize(torch.tensor(targets).to(device).float())
combinations = torch.tensor(list(itertools.combinations(list(range(num_targets)),2)))
comb2_a = targets_tensor[combinations.transpose(-2, -1)[0]]
comb2_b = targets_tensor[combinations.transpose(-2, -1)[1]]
merged2 = self.g_net(comb2_a, comb2_b)
new_targets = torch.cat([targets_tensor, merged2], 0).cpu().detach().numpy()
else:
new_targets = targets
for comb in list(itertools.combinations(list(range(num_targets)),2)):
targets_labels.append(f'{targets_labels[comb[0]]}_{targets_labels[comb[1]]}')
assignments = self.closest_assignment(new_targets, np.vstack(X))
mapping = {
label: targets_labels[k]
for label, k in zip(assigned_labels, assignments[0])
if not k < 0
}
return speech_turns.rename_labels(mapping=mapping)
class SpeechTurnClosestAssignmentMerge(SpeechTurnClosestAssignment):
def __init__(self, gnet, device, embedding: Wrappable = None, metric: Optional[str] = "cosine"):
super(SpeechTurnClosestAssignmentMerge, self).__init__(embedding, metric)
self.g_net = gnet.to(device)
self.device = device
self.closest_assignment = ClosestAssignmentAlwaysAssign(metric=self.metric)
def __call__(
self, current_file: dict, speech_turns: Annotation, targets: Annotation
) -> Annotation:
assert_string_labels(targets, "targets")
assert_int_labels(speech_turns, "speech_turns")
embedding = self._embedding(current_file)
# gather targets embedding
labels = targets.labels()
X_targets, targets_labels = [], []
for l, label in enumerate(labels):
timeline = targets.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
continue
targets_labels.append(label)
X_targets.append(np.mean(x, axis=0))
# gather speech turns embedding
labels = speech_turns.labels()
X, assigned_labels, skipped_labels = [], [], []
for l, label in enumerate(labels):
timeline = speech_turns.label_timeline(label, copy=False)
# be more and more permissive until we have
# at least one embedding for current speech turn
for mode in ["strict", "center", "loose"]:
x = embedding.crop(timeline, mode=mode)
if len(x) > 0:
break
# skip labels so small we don't have any embedding for it
if len(x) < 1:
skipped_labels.append(label)
continue
assigned_labels.append(label)
X.append(np.mean(x, axis=0))
assignments_original = self.closest_assignment(np.vstack(X_targets), np.vstack(X))
mapping_original = {
label: targets_labels[k]
for label, k in zip(assigned_labels, assignments_original[0])
if not k < 0
}
speech_turn_original = speech_turns.copy().rename_labels(mapping=mapping_original)
# assign speech turns to closest class
targets = np.vstack(X_targets)
num_targets = len(targets)
targets_tensor = torch.tensor(targets).to(self.device).float()
# targets_tensor = F.normalize(torch.tensor(targets).to(device).float())
if num_targets > 1:
combinations = torch.tensor(list(itertools.combinations(list(range(num_targets)),2)))
comb2_a = targets_tensor[combinations.transpose(-2, -1)[0]]
comb2_b = targets_tensor[combinations.transpose(-2, -1)[1]]
merged2 = self.g_net(comb2_a, comb2_b)
new_targets = merged2.cpu().detach().numpy()
targets_labels_merge = []
for comb in list(itertools.combinations(list(range(num_targets)),2)):
targets_labels_merge.append(f'{targets_labels[comb[0]]}_{targets_labels[comb[1]]}')
else:
new_targets = targets
targets_labels_merge = [f'{targets_labels[0]}_{targets_labels[0]}']
assignments = self.closest_assignment(new_targets, np.vstack(X))
mapping = {
label: targets_labels_merge[k]
for label, k in zip(assigned_labels, assignments[0])
if not k < 0
}
speech_turn_merge = speech_turns.rename_labels(mapping=mapping)
return speech_turn_original, speech_turn_merge
|
[
"pyannote.pipeline.parameter.Uniform",
"pyannote.core.utils.distance.l2_normalize",
"pyannote.core.utils.distance.dist_range",
"pyannote.core.utils.distance.cdist",
"numpy.isfinite",
"torch.cat",
"numpy.argsort",
"pyannote.audio.pipeline.utils.assert_string_labels",
"numpy.mean",
"pyannote.audio.pipeline.utils.assert_int_labels",
"warnings.warn",
"torch.tensor",
"numpy.vstack"
] |
[((1279, 1335), 'pyannote.core.utils.distance.dist_range', 'dist_range', ([], {'metric': 'self.metric', 'normalize': 'self.normalize'}), '(metric=self.metric, normalize=self.normalize)\n', (1289, 1335), False, 'from pyannote.core.utils.distance import dist_range\n'), ((1695, 1722), 'pyannote.pipeline.parameter.Uniform', 'Uniform', (['min_dist', 'max_dist'], {}), '(min_dist, max_dist)\n', (1702, 1722), False, 'from pyannote.pipeline.parameter import Uniform\n'), ((2289, 2327), 'pyannote.core.utils.distance.cdist', 'cdist', (['X_target', 'X'], {'metric': 'self.metric'}), '(X_target, X, metric=self.metric)\n', (2294, 2327), False, 'from pyannote.core.utils.distance import cdist\n'), ((2342, 2370), 'numpy.argsort', 'np.argsort', (['distance'], {'axis': '(0)'}), '(distance, axis=0)\n', (2352, 2370), True, 'import numpy as np\n'), ((2761, 2799), 'pyannote.core.utils.distance.cdist', 'cdist', (['X_target', 'X'], {'metric': 'self.metric'}), '(X_target, X, metric=self.metric)\n', (2766, 2799), False, 'from pyannote.core.utils.distance import cdist\n'), ((2814, 2842), 'numpy.argsort', 'np.argsort', (['distance'], {'axis': '(0)'}), '(distance, axis=0)\n', (2824, 2842), True, 'import numpy as np\n'), ((3775, 3815), 'pyannote.audio.pipeline.utils.assert_string_labels', 'assert_string_labels', (['targets', '"""targets"""'], {}), "(targets, 'targets')\n", (3795, 3815), False, 'from pyannote.audio.pipeline.utils import assert_int_labels, assert_string_labels\n'), ((3824, 3871), 'pyannote.audio.pipeline.utils.assert_int_labels', 'assert_int_labels', (['speech_turns', '"""speech_turns"""'], {}), "(speech_turns, 'speech_turns')\n", (3841, 3871), False, 'from pyannote.audio.pipeline.utils import assert_int_labels, assert_string_labels\n'), ((6536, 6576), 'pyannote.audio.pipeline.utils.assert_string_labels', 'assert_string_labels', (['targets', '"""targets"""'], {}), "(targets, 'targets')\n", (6556, 6576), False, 'from pyannote.audio.pipeline.utils import assert_int_labels, assert_string_labels\n'), ((6585, 6632), 'pyannote.audio.pipeline.utils.assert_int_labels', 'assert_int_labels', (['speech_turns', '"""speech_turns"""'], {}), "(speech_turns, 'speech_turns')\n", (6602, 6632), False, 'from pyannote.audio.pipeline.utils import assert_int_labels, assert_string_labels\n'), ((8260, 8280), 'numpy.vstack', 'np.vstack', (['X_targets'], {}), '(X_targets)\n', (8269, 8280), True, 'import numpy as np\n'), ((9926, 9966), 'pyannote.audio.pipeline.utils.assert_string_labels', 'assert_string_labels', (['targets', '"""targets"""'], {}), "(targets, 'targets')\n", (9946, 9966), False, 'from pyannote.audio.pipeline.utils import assert_int_labels, assert_string_labels\n'), ((9975, 10022), 'pyannote.audio.pipeline.utils.assert_int_labels', 'assert_int_labels', (['speech_turns', '"""speech_turns"""'], {}), "(speech_turns, 'speech_turns')\n", (9992, 10022), False, 'from pyannote.audio.pipeline.utils import assert_int_labels, assert_string_labels\n'), ((12009, 12029), 'numpy.vstack', 'np.vstack', (['X_targets'], {}), '(X_targets)\n', (12018, 12029), True, 'import numpy as np\n'), ((1391, 1412), 'numpy.isfinite', 'np.isfinite', (['max_dist'], {}), '(max_dist)\n', (1402, 1412), True, 'import numpy as np\n'), ((1651, 1669), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (1664, 1669), False, 'import warnings\n'), ((2214, 2236), 'pyannote.core.utils.distance.l2_normalize', 'l2_normalize', (['X_target'], {}), '(X_target)\n', (2226, 2236), False, 'from pyannote.core.utils.distance import l2_normalize\n'), ((2253, 2268), 'pyannote.core.utils.distance.l2_normalize', 'l2_normalize', (['X'], {}), '(X)\n', (2265, 2268), False, 'from pyannote.core.utils.distance import l2_normalize\n'), ((2686, 2708), 'pyannote.core.utils.distance.l2_normalize', 'l2_normalize', (['X_target'], {}), '(X_target)\n', (2698, 2708), False, 'from pyannote.core.utils.distance import l2_normalize\n'), ((2725, 2740), 'pyannote.core.utils.distance.l2_normalize', 'l2_normalize', (['X'], {}), '(X)\n', (2737, 2740), False, 'from pyannote.core.utils.distance import l2_normalize\n'), ((5527, 5547), 'numpy.vstack', 'np.vstack', (['X_targets'], {}), '(X_targets)\n', (5536, 5547), True, 'import numpy as np\n'), ((5549, 5561), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (5558, 5561), True, 'import numpy as np\n'), ((9165, 9177), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (9174, 9177), True, 'import numpy as np\n'), ((11640, 11660), 'numpy.vstack', 'np.vstack', (['X_targets'], {}), '(X_targets)\n', (11649, 11660), True, 'import numpy as np\n'), ((11662, 11674), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (11671, 11674), True, 'import numpy as np\n'), ((13002, 13014), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (13011, 13014), True, 'import numpy as np\n'), ((4625, 4643), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4632, 4643), True, 'import numpy as np\n'), ((5413, 5431), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5420, 5431), True, 'import numpy as np\n'), ((7386, 7404), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (7393, 7404), True, 'import numpy as np\n'), ((8174, 8192), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (8181, 8192), True, 'import numpy as np\n'), ((10776, 10794), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (10783, 10794), True, 'import numpy as np\n'), ((11564, 11582), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (11571, 11582), True, 'import numpy as np\n'), ((8341, 8362), 'torch.tensor', 'torch.tensor', (['targets'], {}), '(targets)\n', (8353, 8362), False, 'import torch\n'), ((12090, 12111), 'torch.tensor', 'torch.tensor', (['targets'], {}), '(targets)\n', (12102, 12111), False, 'import torch\n'), ((8826, 8865), 'torch.cat', 'torch.cat', (['[targets_tensor, merged2]', '(0)'], {}), '([targets_tensor, merged2], 0)\n', (8835, 8865), False, 'import torch\n')]
|
import numpy as np
def calculate_perplexity(log_probs):
# https://web.stanford.edu/class/cs124/lec/languagemodeling.pdf
perp = 0
for p in log_probs:
perp += -p
return np.exp(perp / len(log_probs))
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
# from https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
def corpus_iterator(raw_data, batch_size, num_steps):
# Pulled from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py#L82
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
yield (x, y)
import os
def temp():
for folder, subs, files in os.walk('E:/JLM/train/experiments'):
for filename in files:
if 'cout.txt' in filename:
print(folder)
path = os.path.join(folder, filename)
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Validation perplexity' in line:
print(line.strip('\n').split(':')[1].strip())
temp()
|
[
"numpy.log",
"numpy.random.multinomial",
"os.walk",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"os.path.join"
] |
[((715, 749), 'numpy.array', 'np.array', (['raw_data'], {'dtype': 'np.int32'}), '(raw_data, dtype=np.int32)\n', (723, 749), True, 'import numpy as np\n'), ((829, 878), 'numpy.zeros', 'np.zeros', (['[batch_size, batch_len]'], {'dtype': 'np.int32'}), '([batch_size, batch_len], dtype=np.int32)\n', (837, 878), True, 'import numpy as np\n'), ((1346, 1381), 'os.walk', 'os.walk', (['"""E:/JLM/train/experiments"""'], {}), "('E:/JLM/train/experiments')\n", (1353, 1381), False, 'import os\n'), ((419, 428), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (425, 428), True, 'import numpy as np\n'), ((451, 460), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (457, 460), True, 'import numpy as np\n'), ((502, 532), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'a', '(1)'], {}), '(1, a, 1)\n', (523, 532), True, 'import numpy as np\n'), ((470, 479), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (476, 479), True, 'import numpy as np\n'), ((1522, 1552), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (1534, 1552), False, 'import os\n')]
|
import numpy as np
from SoftThreshold import soft_threshold
def shrunken_centroids_fit(model, X_train, y_train, _lambda):
"""对收缩质心模型进行训练
关于该模型训练的详细原理可参考书籍
《The Elements of Statistical Learning》(ESL)2nd editor,section 18.2
Input:
model: DiscrimModel实例
Xtrain: 设计矩阵, shape=(n_samples, dim)
ytrain: 类标签索引值, shape=(n_samples,)
_lambda: 用于Cross-Validation
Output:
model
"""
X_train = np.array(X_train) # 将数据转换为numpy类型
y_train = np.array(y_train)
n_classes = len(np.unique(y_train)) # 类别的数量
n_samples, dim = X_train.shape # 获取样本的数量和每个样本的维度
ns_per_class = np.empty((n_classes, )) # 每个类中样本的数量
# 计算混合标准差
x_bar = np.mean(X_train, axis=0) # shape = (dim,)
s_error = np.zeros((dim, )) # 初始化标准差
for c in range(n_classes):
index = (y_train == c)
ns_per_class[c] = np.sum(index) # 在类c中共有多少个样本
# 如果在类c中不存在样本,则使用均值x_bar作为该类分布的质心
if ns_per_class[c] == 0:
centroid = x_bar
else:
centroid = np.mean(X_train[index.flatten()], axis=0)
temp1 = X_train[index.flatten()]
temp2 = centroid[np.newaxis, :]
temp3 = np.power(temp1 - temp2, 2)
s_error = s_error + np.sum(temp3, axis=0)
sigma = np.power(s_error/(n_samples - n_classes), 0.5) # 混合标准差,shape=(dim,)
s0 = np.median(sigma) # 中位数
mu = model.mu # shape = (n_classes,dim)
m = np.empty((n_classes, ))
offset = np.empty((n_classes, dim))
for c in range(n_classes):
if ns_per_class[c] == 0:
m[c] = 0
else:
# ESL中的式18.4
m[c] = np.power((1/ns_per_class[c] - 1/n_samples), 0.5)
# ESL 中的式18.4
offset[c, :] = np.true_divide(mu[c, :] - x_bar[np.newaxis, :], m[c]*(sigma + s0))
# ESL 中的式18.5
offset[c, :] = soft_threshold(offset[c, :], _lambda)
# ESL 中的式18.7
mu[c, :] = x_bar + m[c]*(sigma+s0)*offset[c, :]
model.mu = mu
model.sigma_pooled_diag = np.power(sigma, 2)
model.shrunken_centroids = offset
return model
|
[
"numpy.sum",
"numpy.true_divide",
"numpy.median",
"numpy.empty",
"numpy.power",
"numpy.zeros",
"SoftThreshold.soft_threshold",
"numpy.mean",
"numpy.array",
"numpy.unique"
] |
[((432, 449), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (440, 449), True, 'import numpy as np\n'), ((494, 511), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (502, 511), True, 'import numpy as np\n'), ((649, 671), 'numpy.empty', 'np.empty', (['(n_classes,)'], {}), '((n_classes,))\n', (657, 671), True, 'import numpy as np\n'), ((715, 739), 'numpy.mean', 'np.mean', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (722, 739), True, 'import numpy as np\n'), ((780, 796), 'numpy.zeros', 'np.zeros', (['(dim,)'], {}), '((dim,))\n', (788, 796), True, 'import numpy as np\n'), ((1312, 1360), 'numpy.power', 'np.power', (['(s_error / (n_samples - n_classes))', '(0.5)'], {}), '(s_error / (n_samples - n_classes), 0.5)\n', (1320, 1360), True, 'import numpy as np\n'), ((1391, 1407), 'numpy.median', 'np.median', (['sigma'], {}), '(sigma)\n', (1400, 1407), True, 'import numpy as np\n'), ((1545, 1567), 'numpy.empty', 'np.empty', (['(n_classes,)'], {}), '((n_classes,))\n', (1553, 1567), True, 'import numpy as np\n'), ((1582, 1608), 'numpy.empty', 'np.empty', (['(n_classes, dim)'], {}), '((n_classes, dim))\n', (1590, 1608), True, 'import numpy as np\n'), ((2123, 2141), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (2131, 2141), True, 'import numpy as np\n'), ((532, 550), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (541, 550), True, 'import numpy as np\n'), ((909, 922), 'numpy.sum', 'np.sum', (['index'], {}), '(index)\n', (915, 922), True, 'import numpy as np\n'), ((1223, 1249), 'numpy.power', 'np.power', (['(temp1 - temp2)', '(2)'], {}), '(temp1 - temp2, 2)\n', (1231, 1249), True, 'import numpy as np\n'), ((1846, 1914), 'numpy.true_divide', 'np.true_divide', (['(mu[c, :] - x_bar[np.newaxis, :])', '(m[c] * (sigma + s0))'], {}), '(mu[c, :] - x_bar[np.newaxis, :], m[c] * (sigma + s0))\n', (1860, 1914), True, 'import numpy as np\n'), ((1958, 1995), 'SoftThreshold.soft_threshold', 'soft_threshold', (['offset[c, :]', '_lambda'], {}), '(offset[c, :], _lambda)\n', (1972, 1995), False, 'from SoftThreshold import soft_threshold\n'), ((1278, 1299), 'numpy.sum', 'np.sum', (['temp3'], {'axis': '(0)'}), '(temp3, axis=0)\n', (1284, 1299), True, 'import numpy as np\n'), ((1752, 1802), 'numpy.power', 'np.power', (['(1 / ns_per_class[c] - 1 / n_samples)', '(0.5)'], {}), '(1 / ns_per_class[c] - 1 / n_samples, 0.5)\n', (1760, 1802), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyAMG: Algebraic Multigrid Solvers in Python
PyAMG is a library of Algebraic Multigrid (AMG)
solvers with a convenient Python interface.
PyAMG features implementations of:
- Ruge-Stuben (RS) or Classical AMG
- AMG based on Smoothed Aggregation (SA)
- Adaptive Smoothed Aggregation (αSA)
- Compatible Relaxation (CR)
- Krylov methods such as CG, GMRES, FGMRES, BiCGStab, MINRES, etc
PyAMG is primarily written in Python with
supporting C++ code for performance critical operations.
"""
import os
import sys
import subprocess
import setuptools
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
version = '4.1.0'
isreleased = False
install_requires = (
'numpy>=1.7.0',
'scipy>=0.12.0',
'pytest>=2',
)
# set the version information
# https://github.com/numpy/numpy/commits/master/setup.py
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
out = _minimal_ext_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
GIT_BRANCH = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
GIT_BRANCH = ''
return GIT_REVISION
def set_version_info(VERSION, ISRELEASED):
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('pyamg/version.py'):
try:
import imp
version = imp.load_source("pyamg.version", "pyamg/version.py")
GIT_REVISION = version.git_revision
except ImportError:
raise ImportError('Unable to read version information.')
else:
GIT_REVISION = 'Unknown'
GIT_BRANCH = ''
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev0' + '+' + GIT_REVISION[:7]
print(GIT_REVISION)
print(FULLVERSION)
return FULLVERSION, GIT_REVISION
def write_version_py(VERSION,
FULLVERSION,
GIT_REVISION,
ISRELEASED,
filename='pyamg/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
fullversion, git_revision = set_version_info(version, isreleased)
write_version_py(version, fullversion, git_revision, isreleased,
filename='pyamg/version.py')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
pytest.main(self.test_args)
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
l_opts = {
'msvc': [],
'unix': [],
}
if sys.platform == 'darwin':
l_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
def build_extensions(self):
try:
self.compiler.compiler_so.remove("-Wstrict-prototypes")
except (AttributeError, ValueError):
pass
ct = self.compiler.compiler_type
c_opts = self.c_opts.get(ct, [])
l_opts = self.l_opts.get(ct, [])
if ct == 'unix':
c_opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
c_opts.append('-fvisibility=hidden')
for ext in self.extensions:
ext.extra_compile_args = c_opts
ext.extra_link_args = l_opts
ext.define_macros = [('VERSION_INFO', '"{}"'.format(self.distribution.get_version()))]
build_ext.build_extensions(self)
# identify extension modules
# since numpy is needed (for the path), need to bootstrap the setup
# http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
def finalize_options(self):
build_ext.finalize_options(self)
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
# The issue:
# https://github.com/pybind/pybind11/issues/1067
#
# pybind11 will install files to
# TMP/pybind11-version.egg/*.h
# TMP/pybind11-version.egg/detail/*.h
#
# We need this to look like
# TMP/pybind11/*.h
# TMP/pybind11/detail/*.h
# TMPDIR/pybind11-2.2.4-py3.7.egg/pybind11/__init__.py
f = pybind11.__file__
# TMPDIR/pybind11-2.2.4-py3.7.egg/pybind11/
d = os.path.dirname(f)
# TMPDIR/pybind11-2.2.4-py3.7.egg
dd = os.path.dirname(d)
# TMPDIR
tmpdir = os.path.dirname(dd)
# check if not a half-install
if not os.path.exists(os.path.join(dd, 'pybind11.h')):
return pybind11.get_include(self.user)
# if it *is* a half-install
# Then copy all files to
# TMPDIR/pybind11
if not os.path.isdir(os.path.join(tmpdir, 'pybind11')):
import shutil
shutil.copytree(dd, os.path.join(tmpdir, 'pybind11'))
return tmpdir
amg_core_headers = ['evolution_strength.h',
'graph.h',
'krylov.h',
'linalg.h',
'relaxation.h',
'ruge_stuben.h',
'smoothed_aggregation.h']
amg_core_headers = [f.replace('.h', '') for f in amg_core_headers]
ext_modules = [Extension('pyamg.amg_core.%s' % f,
sources=['pyamg/amg_core/%s_bind.cpp' % f],
include_dirs=[get_pybind_include(), get_pybind_include(user=True)],
undef_macros=['NDEBUG'],
language='c++') for f in amg_core_headers]
ext_modules += [Extension('pyamg.amg_core.tests.bind_examples',
sources=['pyamg/amg_core/tests/bind_examples_bind.cpp'],
include_dirs=[get_pybind_include(), get_pybind_include(user=True)],
language='c++')]
setup(
name='pyamg',
version=fullversion,
keywords=['algebraic multigrid AMG sparse matrix preconditioning'],
author='<NAME>, <NAME>, and <NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/pyamg/pyamg',
download_url='https://github.com/pyamg/pyamg/releases',
license='MIT',
platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
description=__doc__.split('\n')[0],
long_description=__doc__,
#
packages=find_packages(exclude=['doc']),
package_data={'pyamg': ['gallery/example_data/*.mat', 'gallery/mesh_data/*.npz']},
include_package_data=False,
install_requires=install_requires,
zip_safe=False,
#
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt, 'test': PyTest},
setup_requires=['numpy', 'pybind11'],
#
tests_require=['pytest'],
#
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
[
"tempfile.NamedTemporaryFile",
"subprocess.Popen",
"pybind11.get_include",
"os.path.dirname",
"os.path.exists",
"setuptools.command.build_ext.build_ext.finalize_options",
"pytest.main",
"os.environ.get",
"imp.load_source",
"setuptools.command.test.test.finalize_options",
"numpy.get_include",
"setuptools.command.build_ext.build_ext.build_extensions",
"os.path.join",
"setuptools.find_packages"
] |
[((1906, 1928), 'os.path.exists', 'os.path.exists', (['""".git"""'], {}), "('.git')\n", (1920, 1928), False, 'import os\n'), ((1976, 2010), 'os.path.exists', 'os.path.exists', (['"""pyamg/version.py"""'], {}), "('pyamg/version.py')\n", (1990, 2010), False, 'import os\n'), ((3476, 3510), 'setuptools.command.test.test.finalize_options', 'TestCommand.finalize_options', (['self'], {}), '(self)\n', (3504, 3510), True, 'from setuptools.command.test import test as TestCommand\n'), ((3686, 3713), 'pytest.main', 'pytest.main', (['self.test_args'], {}), '(self.test_args)\n', (3697, 3713), False, 'import pytest\n'), ((3980, 4027), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {'suffix': '""".cpp"""'}), "('w', suffix='.cpp')\n", (4007, 4027), False, 'import tempfile\n'), ((5824, 5856), 'setuptools.command.build_ext.build_ext.build_extensions', 'build_ext.build_extensions', (['self'], {}), '(self)\n', (5850, 5856), False, 'from setuptools.command.build_ext import build_ext\n'), ((6101, 6133), 'setuptools.command.build_ext.build_ext.finalize_options', 'build_ext.finalize_options', (['self'], {}), '(self)\n', (6127, 6133), False, 'from setuptools.command.build_ext import build_ext\n'), ((7101, 7119), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (7116, 7119), False, 'import os\n'), ((7176, 7194), 'os.path.dirname', 'os.path.dirname', (['d'], {}), '(d)\n', (7191, 7194), False, 'import os\n'), ((7230, 7249), 'os.path.dirname', 'os.path.dirname', (['dd'], {}), '(dd)\n', (7245, 7249), False, 'import os\n'), ((9147, 9177), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['doc']"}), "(exclude=['doc'])\n", (9160, 9177), False, 'from setuptools import setup, find_packages, Extension\n'), ((1173, 1190), 'os.environ.get', 'os.environ.get', (['k'], {}), '(k)\n', (1187, 1190), False, 'import os\n'), ((6233, 6252), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (6250, 6252), False, 'import numpy\n'), ((7371, 7402), 'pybind11.get_include', 'pybind11.get_include', (['self.user'], {}), '(self.user)\n', (7391, 7402), False, 'import pybind11\n'), ((2070, 2122), 'imp.load_source', 'imp.load_source', (['"""pyamg.version"""', '"""pyamg/version.py"""'], {}), "('pyamg.version', 'pyamg/version.py')\n", (2085, 2122), False, 'import imp\n'), ((7319, 7349), 'os.path.join', 'os.path.join', (['dd', '"""pybind11.h"""'], {}), "(dd, 'pybind11.h')\n", (7331, 7349), False, 'import os\n'), ((7528, 7560), 'os.path.join', 'os.path.join', (['tmpdir', '"""pybind11"""'], {}), "(tmpdir, 'pybind11')\n", (7540, 7560), False, 'import os\n'), ((7621, 7653), 'os.path.join', 'os.path.join', (['tmpdir', '"""pybind11"""'], {}), "(tmpdir, 'pybind11')\n", (7633, 7653), False, 'import os\n'), ((1382, 1436), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'env': 'env'}), '(cmd, stdout=subprocess.PIPE, env=env)\n', (1398, 1436), False, 'import subprocess\n')]
|
import os
import argparse
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.utils.data as data
from PIL import Image
import cv2
import numpy as np
from glob import glob
import random
import sys
sys.path.append(".")
from utils import default_loader_img, default_loader_wf
def make_dataset(args, dir, training):
# list img files end with png
if training:
img_paths = sorted(glob(os.path.join(dir, '{}/*.png'.format("images/train"))))
wf_paths = [p.replace('images/train', 'wireframes/train') for p in img_paths]
# print("The length of the training set is: {}".format(len(img_paths)))
else:
img_paths = sorted(glob(os.path.join(dir, '{}/*.png'.format("images/test"))))
wf_paths = [p.replace('images/test', 'wireframes/test') for p in img_paths]
# print("The length of the test set is: {}".format(len(img_paths)))
# return img-wf pairs
return img_paths, wf_paths
def custom_transform(img, wf, size):
if random.random() < 0.5:
# random crop for both img/wf
new_size = int(size*1.2)
# different interpolations can be used here, haven't thoroughly tested
img = transforms.Resize((new_size, new_size), Image.LANCZOS)(img)
wf = transforms.Resize((new_size, new_size), Image.LANCZOS)(wf)
i = random.randint(0, new_size - size)
j = random.randint(0, new_size - size)
img = img.crop((i, j, i + size, j + size))
wf = wf.crop((i, j, i + size, j + size))
else:
w, h = img.size
if h != w or h != size:
img = transforms.Resize((size, size), Image.LANCZOS)(img)
wf = transforms.Resize((size, size), Image.LANCZOS)(wf)
# optional color jitter augmentation
# img = transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0)(img)
# use the same seed to control random horizontal flip for both img and wf
seed = np.random.randint(123321)
random.seed(seed)
img = transforms.RandomHorizontalFlip(p=0.5)(img)
random.seed(seed)
wf = transforms.RandomHorizontalFlip(p=0.5)(wf)
color_histogram = img.histogram() # used in color guided rendering
color_histogram = torch.tensor(color_histogram, dtype=torch.float)/float(size*size)
img = transforms.ToTensor()(img)
wf = transforms.ToTensor()(wf)
# conventional normalization for gan models
img = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])(img)
wf = transforms.Normalize(mean=[0.5], std=[0.5])(wf)
return img, wf, color_histogram
def custom_transform_eval(img, wf, size):
w, h = img.size
if h != w or h != size:
img = transforms.Resize((size, size), Image.LANCZOS)(img)
wf = transforms.Resize((size, size), Image.LANCZOS)(wf)
color_histogram = img.histogram()
color_histogram = torch.tensor(color_histogram, dtype=torch.float)/float(size*size)
img = transforms.ToTensor()(img)
wf = transforms.ToTensor()(wf)
img = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])(img)
wf = transforms.Normalize(mean=[0.5], std=[0.5])(wf)
return img, wf, color_histogram
def get_loader(args, batch_size, shuffle=True, num_workers=16, training=True):
"""Returns torch.utils.data.DataLoader for wireframe dataset."""
dataset = WireframeDataset(args, training=training)
num_imgs = len(dataset)
if training:
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
drop_last=True)
else:
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
drop_last=False)
return data_loader, num_imgs
class WireframeDataset(data.Dataset):
"""Wireframe Dataset compatible with torch.utils.data.DataLoader."""
def __init__(self, args, training):
self.args = args
self.out_size = args.img_size
self.training = training
self.root = args.root_path
if not os.path.exists(self.root):
raise Exception("[!] {} not exists.".format(self.root))
# return paths and labels
samples_image, samples_wf = make_dataset(self.args, self.root, training=self.training)
self.images = samples_image
self.wireframes = samples_wf
def __getitem__(self, index):
"""Returns (augumented) wireframe data."""
# retrieve the img-line pairs
img_path = self.images[index]
wf_path = self.wireframes[index]
img = default_loader_img(img_path)
wf = default_loader_wf(wf_path)
if self.training:
img, wf, color_histogram = custom_transform(img, wf, size=self.out_size)
else:
img, wf, color_histogram = custom_transform_eval(img, wf, size=self.out_size)
return img, wf, color_histogram
def __repr__(self):
fmt_str = 'Dataset: Wireframes' + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
return fmt_str
def __len__(self):
return len(self.images)
|
[
"sys.path.append",
"random.randint",
"torchvision.transforms.RandomHorizontalFlip",
"torch.utils.data.DataLoader",
"utils.default_loader_img",
"utils.default_loader_wf",
"os.path.exists",
"random.random",
"numpy.random.randint",
"random.seed",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torch.tensor",
"torchvision.transforms.ToTensor"
] |
[((258, 278), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (273, 278), False, 'import sys\n'), ((2018, 2043), 'numpy.random.randint', 'np.random.randint', (['(123321)'], {}), '(123321)\n', (2035, 2043), True, 'import numpy as np\n'), ((2049, 2066), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2060, 2066), False, 'import random\n'), ((2127, 2144), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2138, 2144), False, 'import random\n'), ((1056, 1071), 'random.random', 'random.random', ([], {}), '()\n', (1069, 1071), False, 'import random\n'), ((1393, 1427), 'random.randint', 'random.randint', (['(0)', '(new_size - size)'], {}), '(0, new_size - size)\n', (1407, 1427), False, 'import random\n'), ((1441, 1475), 'random.randint', 'random.randint', (['(0)', '(new_size - size)'], {}), '(0, new_size - size)\n', (1455, 1475), False, 'import random\n'), ((2078, 2116), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2109, 2116), True, 'import torchvision.transforms as transforms\n'), ((2155, 2193), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2186, 2193), True, 'import torchvision.transforms as transforms\n'), ((2295, 2343), 'torch.tensor', 'torch.tensor', (['color_histogram'], {'dtype': 'torch.float'}), '(color_histogram, dtype=torch.float)\n', (2307, 2343), False, 'import torch\n'), ((2372, 2393), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2391, 2393), True, 'import torchvision.transforms as transforms\n'), ((2409, 2430), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2428, 2430), True, 'import torchvision.transforms as transforms\n'), ((2497, 2560), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (2517, 2560), True, 'import torchvision.transforms as transforms\n'), ((2576, 2619), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5]', 'std': '[0.5]'}), '(mean=[0.5], std=[0.5])\n', (2596, 2619), True, 'import torchvision.transforms as transforms\n'), ((2954, 3002), 'torch.tensor', 'torch.tensor', (['color_histogram'], {'dtype': 'torch.float'}), '(color_histogram, dtype=torch.float)\n', (2966, 3002), False, 'import torch\n'), ((3033, 3054), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3052, 3054), True, 'import torchvision.transforms as transforms\n'), ((3070, 3091), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3089, 3091), True, 'import torchvision.transforms as transforms\n'), ((3109, 3172), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (3129, 3172), True, 'import torchvision.transforms as transforms\n'), ((3188, 3231), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5]', 'std': '[0.5]'}), '(mean=[0.5], std=[0.5])\n', (3208, 3231), True, 'import torchvision.transforms as transforms\n'), ((3560, 3687), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers', 'drop_last': '(True)'}), '(dataset=dataset, batch_size=batch_size, shuffle\n =True, num_workers=num_workers, drop_last=True)\n', (3587, 3687), False, 'import torch\n'), ((3921, 4050), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'drop_last': '(False)'}), '(dataset=dataset, batch_size=batch_size, shuffle\n =False, num_workers=num_workers, drop_last=False)\n', (3948, 4050), False, 'import torch\n'), ((5125, 5153), 'utils.default_loader_img', 'default_loader_img', (['img_path'], {}), '(img_path)\n', (5143, 5153), False, 'from utils import default_loader_img, default_loader_wf\n'), ((5168, 5194), 'utils.default_loader_wf', 'default_loader_wf', (['wf_path'], {}), '(wf_path)\n', (5185, 5194), False, 'from utils import default_loader_img, default_loader_wf\n'), ((1247, 1301), 'torchvision.transforms.Resize', 'transforms.Resize', (['(new_size, new_size)', 'Image.LANCZOS'], {}), '((new_size, new_size), Image.LANCZOS)\n', (1264, 1301), True, 'import torchvision.transforms as transforms\n'), ((1321, 1375), 'torchvision.transforms.Resize', 'transforms.Resize', (['(new_size, new_size)', 'Image.LANCZOS'], {}), '((new_size, new_size), Image.LANCZOS)\n', (1338, 1375), True, 'import torchvision.transforms as transforms\n'), ((2773, 2819), 'torchvision.transforms.Resize', 'transforms.Resize', (['(size, size)', 'Image.LANCZOS'], {}), '((size, size), Image.LANCZOS)\n', (2790, 2819), True, 'import torchvision.transforms as transforms\n'), ((2839, 2885), 'torchvision.transforms.Resize', 'transforms.Resize', (['(size, size)', 'Image.LANCZOS'], {}), '((size, size), Image.LANCZOS)\n', (2856, 2885), True, 'import torchvision.transforms as transforms\n'), ((4595, 4620), 'os.path.exists', 'os.path.exists', (['self.root'], {}), '(self.root)\n', (4609, 4620), False, 'import os\n'), ((1666, 1712), 'torchvision.transforms.Resize', 'transforms.Resize', (['(size, size)', 'Image.LANCZOS'], {}), '((size, size), Image.LANCZOS)\n', (1683, 1712), True, 'import torchvision.transforms as transforms\n'), ((1736, 1782), 'torchvision.transforms.Resize', 'transforms.Resize', (['(size, size)', 'Image.LANCZOS'], {}), '((size, size), Image.LANCZOS)\n', (1753, 1782), True, 'import torchvision.transforms as transforms\n')]
|
import numpy as np
import matplotlib.pyplot as plt
Ts = np.arange(1.0,4.1,0.1)
entropies20 = np.loadtxt('entropies20.txt')
free20 = np.loadtxt('freeenergies20.txt')
energies20 = np.loadtxt('energies20.txt')
varenergies20 = np.loadtxt('varenergies20.txt')
n = len(Ts)
entropy = np.zeros(n)
free = np.zeros(n)
energy = np.zeros(n)
varenergy = np.zeros(n)
with open('scan50.log') as f:
for i, ln in enumerate(f):
_, data = ln.strip().split('|')
data = [float(s) for s in data.split()]
free[i] = data[4]
entropy[i] = data[5]
energy[i] = data[2]
varenergy[i] = data[3]
print(data)
entropyaug = np.zeros(n)
freeaug = np.zeros(n)
with open('scan50augment.log') as f:
for i, ln in enumerate(f):
_, data = ln.strip().split('|')
data = [float(s) for s in data.split()]
freeaug[i] = data[4]
entropyaug[i] = data[5]
plt.figure()
plt.plot(Ts, entropies20, 'k', label='exact L=20 2D Ising')
plt.plot(Ts, entropyaug, 'mo', label='xgboost entropy augmented')
plt.plot(Ts, entropy, 'ro', label='xgboost entropy')
plt.xlabel('T')
plt.ylabel('S')
plt.legend()
plt.figure()
plt.plot(Ts, energies20, 'k', label='exact L=20 2D Ising')
plt.plot(Ts, energy, 'ro', label='MC energy')
plt.xlabel('T')
plt.ylabel('E')
plt.legend()
plt.figure()
plt.plot(Ts, varenergies20, 'k', label='exact L=20 2D Ising')
plt.plot(Ts, varenergy, 'ro', label='MC energy variance')
plt.xlabel('T')
plt.ylabel('var E')
plt.legend()
plt.figure()
plt.plot(Ts, free20, 'k', label='exact L=20 2D Ising')
plt.plot(Ts, freeaug, 'mo', label='xgboost entropy augmented + MC energy')
plt.plot(Ts, free, 'ro', label='xgboost entropy + MC energy')
plt.xlabel('T')
plt.ylabel('F')
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((57, 81), 'numpy.arange', 'np.arange', (['(1.0)', '(4.1)', '(0.1)'], {}), '(1.0, 4.1, 0.1)\n', (66, 81), True, 'import numpy as np\n'), ((96, 125), 'numpy.loadtxt', 'np.loadtxt', (['"""entropies20.txt"""'], {}), "('entropies20.txt')\n", (106, 125), True, 'import numpy as np\n'), ((135, 167), 'numpy.loadtxt', 'np.loadtxt', (['"""freeenergies20.txt"""'], {}), "('freeenergies20.txt')\n", (145, 167), True, 'import numpy as np\n'), ((181, 209), 'numpy.loadtxt', 'np.loadtxt', (['"""energies20.txt"""'], {}), "('energies20.txt')\n", (191, 209), True, 'import numpy as np\n'), ((226, 257), 'numpy.loadtxt', 'np.loadtxt', (['"""varenergies20.txt"""'], {}), "('varenergies20.txt')\n", (236, 257), True, 'import numpy as np\n'), ((282, 293), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (290, 293), True, 'import numpy as np\n'), ((301, 312), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (309, 312), True, 'import numpy as np\n'), ((322, 333), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (330, 333), True, 'import numpy as np\n'), ((346, 357), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (354, 357), True, 'import numpy as np\n'), ((656, 667), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (664, 667), True, 'import numpy as np\n'), ((678, 689), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (686, 689), True, 'import numpy as np\n'), ((910, 922), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (920, 922), True, 'import matplotlib.pyplot as plt\n'), ((923, 982), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'entropies20', '"""k"""'], {'label': '"""exact L=20 2D Ising"""'}), "(Ts, entropies20, 'k', label='exact L=20 2D Ising')\n", (931, 982), True, 'import matplotlib.pyplot as plt\n'), ((983, 1048), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'entropyaug', '"""mo"""'], {'label': '"""xgboost entropy augmented"""'}), "(Ts, entropyaug, 'mo', label='xgboost entropy augmented')\n", (991, 1048), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1101), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'entropy', '"""ro"""'], {'label': '"""xgboost entropy"""'}), "(Ts, entropy, 'ro', label='xgboost entropy')\n", (1057, 1101), True, 'import matplotlib.pyplot as plt\n'), ((1102, 1117), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T"""'], {}), "('T')\n", (1112, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1118, 1133), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""S"""'], {}), "('S')\n", (1128, 1133), True, 'import matplotlib.pyplot as plt\n'), ((1134, 1146), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1144, 1146), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1160), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1158, 1160), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1219), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'energies20', '"""k"""'], {'label': '"""exact L=20 2D Ising"""'}), "(Ts, energies20, 'k', label='exact L=20 2D Ising')\n", (1169, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1220, 1265), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'energy', '"""ro"""'], {'label': '"""MC energy"""'}), "(Ts, energy, 'ro', label='MC energy')\n", (1228, 1265), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T"""'], {}), "('T')\n", (1276, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""E"""'], {}), "('E')\n", (1292, 1297), True, 'import matplotlib.pyplot as plt\n'), ((1298, 1310), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1308, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1322, 1324), True, 'import matplotlib.pyplot as plt\n'), ((1325, 1386), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'varenergies20', '"""k"""'], {'label': '"""exact L=20 2D Ising"""'}), "(Ts, varenergies20, 'k', label='exact L=20 2D Ising')\n", (1333, 1386), True, 'import matplotlib.pyplot as plt\n'), ((1387, 1444), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'varenergy', '"""ro"""'], {'label': '"""MC energy variance"""'}), "(Ts, varenergy, 'ro', label='MC energy variance')\n", (1395, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1460), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T"""'], {}), "('T')\n", (1455, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1480), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""var E"""'], {}), "('var E')\n", (1471, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1493), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1491, 1493), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1507), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1505, 1507), True, 'import matplotlib.pyplot as plt\n'), ((1508, 1562), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'free20', '"""k"""'], {'label': '"""exact L=20 2D Ising"""'}), "(Ts, free20, 'k', label='exact L=20 2D Ising')\n", (1516, 1562), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1637), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'freeaug', '"""mo"""'], {'label': '"""xgboost entropy augmented + MC energy"""'}), "(Ts, freeaug, 'mo', label='xgboost entropy augmented + MC energy')\n", (1571, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1699), 'matplotlib.pyplot.plot', 'plt.plot', (['Ts', 'free', '"""ro"""'], {'label': '"""xgboost entropy + MC energy"""'}), "(Ts, free, 'ro', label='xgboost entropy + MC energy')\n", (1646, 1699), True, 'import matplotlib.pyplot as plt\n'), ((1700, 1715), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T"""'], {}), "('T')\n", (1710, 1715), True, 'import matplotlib.pyplot as plt\n'), ((1716, 1731), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F"""'], {}), "('F')\n", (1726, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1744), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1755, 1757), True, 'import matplotlib.pyplot as plt\n')]
|
import os
import json
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
def compute_iou(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
# Get coordinates of intersecting box
tli_row = max(box_1[0], box_2[0])
tli_col = max(box_1[1], box_2[1])
bri_row = min(box_1[2], box_2[2])
bri_col = min(box_1[3], box_2[3])
# Calculate are of intersecting box
heighti = max(bri_row - tli_row, 0)
widthi = max(bri_col - tli_col, 0)
intersection_area = heighti * widthi
if intersection_area == 0:
return 0
# Get area of union
box_1_height = box_1[2] - box_1[0]
box_1_width = box_1[3] - box_1[1]
box_2_height = box_2[2] - box_2[0]
box_2_width = box_2[3] - box_1[1]
box_1_area = box_1_height * box_1_width
box_2_area = box_2_height * box_2_width
iou = float(intersection_area) / (box_1_area + box_2_area - intersection_area)
assert (iou >= 0) and (iou <= 1.0)
return iou
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5):
'''
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<gts> is a dictionary containing ground truth bounding boxes for a
collection of images.
'''
TP = 0
FP = 0
FN = 0
'''
BEGIN YOUR CODE
'''
for pred_file, pred in preds.items():
gt = gts[pred_file]
pred = [x for x in pred if float(x[4]) >= conf_thr]
for i in range(len(gt)):
if len(pred) == 0:
FN += (len(gt) - i)
break
ious = np.zeros(len(pred))
for j in range(len(pred)):
iou = compute_iou(pred[j][:4], gt[i])
ious[j] = iou
# get position of max
max_iou = max(ious)
max_iou_ix = ious.tolist().index(max_iou)
if max_iou > iou_thr:
# match the gt to this pred as a TP
TP += 1
# remove this pred from preds so we don't double count it
pred.pop(max_iou_ix)
else:
# if not, this gt is a FN
FN += 1
# at the end, number of extra preds is FP
FP += len(pred)
'''
END YOUR CODE
'''
return TP, FP, FN
# set a path for predictions and annotations:
preds_path = '../data/hw02_preds'
gts_path = '../data/hw02_annotations'
# load splits:
split_path = '../data/hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
# Set this parameter to True when you're done with algorithm development:
done_tweaking = True
'''
Load training data.
'''
with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
preds_train = json.load(f)
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts_train = json.load(f)
if done_tweaking:
'''
Load test data.
'''
with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
preds_test = json.load(f)
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts_test = json.load(f)
# For a fixed IoU threshold, vary the confidence thresholds.
# The code below gives an example on the training set for one IoU threshold.
iou_threshs = [0.25, 0.5, 0.75]
confidence_thrs = []
for fname in preds_train:
for box in preds_train[fname]:
confidence_thrs.append(float(box[4]))
confidence_thrs = np.random.choice(confidence_thrs, 100)
confidence_thrs = np.sort(confidence_thrs)
for iou_thresh in tqdm(iou_threshs):
tp_train = np.zeros(len(confidence_thrs))
fp_train = np.zeros(len(confidence_thrs))
fn_train = np.zeros(len(confidence_thrs))
for i, conf_thr in tqdm(enumerate(confidence_thrs), leave=False):
tp_train[i], fp_train[i], fn_train[i] = compute_counts(preds_train, gts_train, iou_thr=iou_thresh, conf_thr=conf_thr)
# Plot training set PR curves
P = tp_train / (tp_train + fp_train)
R = tp_train / (tp_train + fn_train)
plt.plot(R,P, '-o', markersize=2)
plt.legend(["IOU Thresh 0.25", "IOU Thresh 0.5", "IOU Thresh 0.75"])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.savefig('train_PR_curve.png')
if done_tweaking:
print('Code for plotting test set PR curves.')
plt.figure()
# For a fixed IoU threshold, vary the confidence thresholds.
# The code below gives an example on the training set for one IoU threshold.
iou_threshs = [0.25, 0.5, 0.75]
confidence_thrs = []
for fname in preds_test:
for box in preds_test[fname]:
confidence_thrs.append(float(box[4]))
confidence_thrs = np.random.choice(confidence_thrs, 100)
confidence_thrs = np.sort(confidence_thrs)
for iou_thresh in tqdm(iou_threshs):
tp_test = np.zeros(len(confidence_thrs))
fp_test = np.zeros(len(confidence_thrs))
fn_test = np.zeros(len(confidence_thrs))
for i, conf_thr in tqdm(enumerate(confidence_thrs), leave=False):
tp_test[i], fp_test[i], fn_test[i] = compute_counts(preds_test, gts_test, iou_thr=iou_thresh, conf_thr=conf_thr)
# Plot training set PR curves
P = tp_test / (tp_test + fp_test)
R = tp_test / (tp_test + fn_test)
plt.plot(R,P, '-o', markersize=2)
plt.legend(["IOU Thresh 0.25", "IOU Thresh 0.5", "IOU Thresh 0.75"])
plt.savefig('test_PR_curve.png')
|
[
"tqdm.tqdm",
"json.load",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.sort",
"matplotlib.pyplot.figure",
"numpy.random.choice",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.savefig"
] |
[((3893, 3931), 'numpy.random.choice', 'np.random.choice', (['confidence_thrs', '(100)'], {}), '(confidence_thrs, 100)\n', (3909, 3931), True, 'import numpy as np\n'), ((3950, 3974), 'numpy.sort', 'np.sort', (['confidence_thrs'], {}), '(confidence_thrs)\n', (3957, 3974), True, 'import numpy as np\n'), ((3994, 4011), 'tqdm.tqdm', 'tqdm', (['iou_threshs'], {}), '(iou_threshs)\n', (3998, 4011), False, 'from tqdm import tqdm\n'), ((4504, 4572), 'matplotlib.pyplot.legend', 'plt.legend', (["['IOU Thresh 0.25', 'IOU Thresh 0.5', 'IOU Thresh 0.75']"], {}), "(['IOU Thresh 0.25', 'IOU Thresh 0.5', 'IOU Thresh 0.75'])\n", (4514, 4572), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4593), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4583, 4593), True, 'import matplotlib.pyplot as plt\n'), ((4594, 4617), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4604, 4617), True, 'import matplotlib.pyplot as plt\n'), ((4618, 4651), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""train_PR_curve.png"""'], {}), "('train_PR_curve.png')\n", (4629, 4651), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2914), 'os.path.join', 'os.path.join', (['split_path', '"""file_names_train.npy"""'], {}), "(split_path, 'file_names_train.npy')\n", (2878, 2914), False, 'import os\n'), ((2941, 2988), 'os.path.join', 'os.path.join', (['split_path', '"""file_names_test.npy"""'], {}), "(split_path, 'file_names_test.npy')\n", (2953, 2988), False, 'import os\n'), ((3197, 3209), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3206, 3209), False, 'import json\n'), ((3297, 3309), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3306, 3309), False, 'import json\n'), ((4469, 4503), 'matplotlib.pyplot.plot', 'plt.plot', (['R', 'P', '"""-o"""'], {'markersize': '(2)'}), "(R, P, '-o', markersize=2)\n", (4477, 4503), True, 'import matplotlib.pyplot as plt\n'), ((4726, 4738), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4736, 4738), True, 'import matplotlib.pyplot as plt\n'), ((5086, 5124), 'numpy.random.choice', 'np.random.choice', (['confidence_thrs', '(100)'], {}), '(confidence_thrs, 100)\n', (5102, 5124), True, 'import numpy as np\n'), ((5147, 5171), 'numpy.sort', 'np.sort', (['confidence_thrs'], {}), '(confidence_thrs)\n', (5154, 5171), True, 'import numpy as np\n'), ((5195, 5212), 'tqdm.tqdm', 'tqdm', (['iou_threshs'], {}), '(iou_threshs)\n', (5199, 5212), False, 'from tqdm import tqdm\n'), ((5731, 5799), 'matplotlib.pyplot.legend', 'plt.legend', (["['IOU Thresh 0.25', 'IOU Thresh 0.5', 'IOU Thresh 0.75']"], {}), "(['IOU Thresh 0.25', 'IOU Thresh 0.5', 'IOU Thresh 0.75'])\n", (5741, 5799), True, 'import matplotlib.pyplot as plt\n'), ((5804, 5836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test_PR_curve.png"""'], {}), "('test_PR_curve.png')\n", (5815, 5836), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3168), 'os.path.join', 'os.path.join', (['preds_path', '"""preds_train.json"""'], {}), "(preds_path, 'preds_train.json')\n", (3136, 3168), False, 'import os\n'), ((3221, 3269), 'os.path.join', 'os.path.join', (['gts_path', '"""annotations_train.json"""'], {}), "(gts_path, 'annotations_train.json')\n", (3233, 3269), False, 'import os\n'), ((3456, 3468), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3465, 3468), False, 'import json\n'), ((3562, 3574), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3571, 3574), False, 'import json\n'), ((5692, 5726), 'matplotlib.pyplot.plot', 'plt.plot', (['R', 'P', '"""-o"""'], {'markersize': '(2)'}), "(R, P, '-o', markersize=2)\n", (5700, 5726), True, 'import matplotlib.pyplot as plt\n'), ((3381, 3424), 'os.path.join', 'os.path.join', (['preds_path', '"""preds_test.json"""'], {}), "(preds_path, 'preds_test.json')\n", (3393, 3424), False, 'import os\n'), ((3484, 3531), 'os.path.join', 'os.path.join', (['gts_path', '"""annotations_test.json"""'], {}), "(gts_path, 'annotations_test.json')\n", (3496, 3531), False, 'import os\n')]
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
# Adopted from https://github.com/pytorch/pytorch/blob/master/torch/nn/intrinsic/qat/modules/conv_fused.py
class _ConvBnNd(nn.modules.conv._ConvNd):
_version = 2
def __init__(
self,
# ConvNd args
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels,
kernel_size, stride, padding, dilation,
transposed, output_padding, groups, False,
padding_mode)
assert qconfig, 'qconfig must be provided for QAT module'
self.frozen = freeze_bn if self.training else True
self.bn = nn.BatchNorm2d(out_channels, eps, momentum, True, True)
self.weight_quantizer = qconfig.weight
self.bias_quantizer = qconfig.bias
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
# as they modify the same state
if self.training:
if freeze_bn:
self.freeze_bn()
else:
self.update_bn()
else:
self.freeze_bn()
def reset_running_stats(self):
self.bn.reset_running_stats()
def reset_bn_parameters(self):
self.bn.reset_running_stats()
init.uniform_(self.bn.weight)
init.zeros_(self.bn.bias)
# note: below is actully for conv, not BN
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def batch_stats(self, x, bias=None):
"""Get the batch mean and variance of x and updates the BatchNorm's running mean and average.
Args:
x (torch.Tensor): input batch.
bias (torch.Tensor): the bias that is to be applied to the batch.
Returns:
(mean, variance)
Note:
In case of `nn.Linear`, x may be of shape (N, C, L) or (N, L)
where N is batch size, C is number of channels, L is the features size.
The batch norm computes the stats over C in the first case or L on the second case.
The batch normalization layer is
(`nn.BatchNorm1d`)[https://pytorch.org/docs/stable/nn.html#batchnorm1d]
In case of `nn.Conv2d`, x is of shape (N, C, H, W)
where H,W are the image dimensions, and the batch norm computes the stats over C.
The batch normalization layer is
(`nn.BatchNorm2d`)[https://pytorch.org/docs/stable/nn.html#batchnorm2d]
"""
channel_size = self.bn.num_features
self.bn.num_batches_tracked += 1
# Calculate current batch stats
batch_mean = x.transpose(0, 1).contiguous().view(channel_size, -1).mean(1)
# BatchNorm currently uses biased variance (without Bessel's correction) as was discussed at
# https://github.com/pytorch/pytorch/issues/1410
#
# also see the source code itself:
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Normalization.cpp#L216
batch_var = x.transpose(0, 1).contiguous().view(channel_size, -1).var(
1, unbiased=False)
# Update running stats
with torch.no_grad():
biased_batch_mean = batch_mean + (bias if bias is not None else 0)
# However - running_var is updated using unbiased variance!
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Normalization.cpp#L223
n = x.numel() / channel_size
corrected_var = batch_var * (n / float(n - 1))
momentum = self.bn.momentum
if momentum is None:
# momentum is None - we compute a cumulative moving average
# as noted in https://pytorch.org/docs/stable/nn.html#batchnorm2d
momentum = 1. / float(self.bn.num_batches_tracked)
self.bn.running_mean.mul_(1 - momentum).add_(momentum * biased_batch_mean)
self.bn.running_var.mul_(1 - momentum).add_(momentum * corrected_var)
return batch_mean, batch_var
def reset_parameters(self):
super(_ConvBnNd, self).reset_parameters()
def update_bn(self):
self.frozen = False
self.bn.training = True
return self
def freeze_bn(self):
if self.frozen:
return
with torch.no_grad():
# The same implementation as nndct_shared/optimzation/fuse_conv_bn.py
# is used so that the test accruacy is same as the deployable model.
gamma = self.bn.weight.detach().cpu().numpy()
beta = self.bn.bias.detach().cpu().numpy()
running_var = self.bn.running_var.detach().cpu().numpy()
running_mean = self.bn.running_mean.detach().cpu().numpy()
epsilon = self.bn.eps
scale = gamma / np.sqrt(running_var + epsilon)
offset = beta - running_mean * scale
weight = self.weight.detach().cpu().numpy()
weight = np.multiply(
weight.transpose(1, 2, 3, 0), scale).transpose(3, 0, 1, 2)
self.weight.copy_(torch.from_numpy(weight))
bias = self.bias.detach.cpu().numpy() if self.bias is not None else 0
bias = torch.from_numpy(bias * scale + offset)
if self.bias is not None:
self.bias.copy_(bias)
else:
self.bias = nn.Parameter(bias)
self.frozen = True
self.bn.training = False
return
def broadcast_correction(self, c: torch.Tensor):
"""Broadcasts a correction factor to the output for elementwise operations."""
expected_output_dim = 4
view_fillers_dim = expected_output_dim - c.dim() - 1
view_filler = (1,) * view_fillers_dim
expected_view_shape = c.shape + view_filler
return c.view(*expected_view_shape)
def broadcast_correction_weight(self, c):
"""Broadcasts a correction factor to the weight."""
if c.dim() != 1:
raise ValueError("Correction factor needs to have a single dimension")
expected_weight_dim = 4
view_fillers_dim = expected_weight_dim - c.dim()
view_filler = (1,) * view_fillers_dim
expected_view_shape = c.shape + view_filler
return c.view(*expected_view_shape)
def extra_repr(self):
return super(_ConvBnNd, self).extra_repr()
def forward(self, x):
gamma, beta = self.bn.weight, self.bn.bias
if self.frozen:
quantized_weight = self.weight_quantizer(self.weight)
quantized_bias = self.bias_quantizer(self.bias)
return self._conv_forward(x, quantized_weight, quantized_bias)
if self.training:
batch_mean, batch_var = self.batch_stats(
self._conv_forward(x, self.weight), self.bias)
recip_sigma_batch = torch.rsqrt(batch_var + self.bn.eps)
with torch.no_grad():
sigma_running = torch.sqrt(self.bn.running_var + self.bn.eps)
w_corrected = self.weight * self.broadcast_correction_weight(
gamma / sigma_running)
w_quantized = self.weight_quantizer(w_corrected)
recip_c = self.broadcast_correction(sigma_running * recip_sigma_batch)
bias_corrected = beta - gamma * batch_mean * recip_sigma_batch
bias_quantized = self.broadcast_correction(
self.bias_quantizer(bias_corrected))
y = self._conv_forward(x, w_quantized, None)
y.mul_(recip_c).add_(bias_quantized)
else:
with torch.no_grad():
recip_sigma_running = torch.rsqrt(self.bn.running_var + self.bn.eps)
w_corrected = self.weight * self.broadcast_correction_weight(
gamma * recip_sigma_running)
w_quantized = self.weight_quantizer(w_corrected)
corrected_mean = self.bn.running_mean - (
self.bias if self.bias is not None else 0)
bias_corrected = beta - gamma * corrected_mean * recip_sigma_running
bias_quantized = self.bias_quantizer(bias_corrected)
y = self._conv_forward(x, w_quantized, bias_quantized)
#print('w_quantized:', w_quantized.sum())
#print('bias_quantized:', bias_quantized.sum())
#print('conv2d output:', y.sum())
return y
def train(self, mode=True):
"""Batchnorm's training behavior is using the self.training flag. Prevent
changing it if BN is frozen. This makes sure that calling `model.train()`
on a model with a frozen BN will behave properly.
"""
self.training = mode
if not self.frozen:
for module in self.children():
module.train(mode)
return self
# ===== Serialization version history =====
#
# Version 1/None
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- gamma : Tensor
# |--- beta : Tensor
# |--- running_mean : Tensor
# |--- running_var : Tensor
# |--- num_batches_tracked : Tensor
#
# Version 2
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- bn : Module
# |--- weight : Tensor (moved from v1.self.gamma)
# |--- bias : Tensor (moved from v1.self.beta)
# |--- running_mean : Tensor (moved from v1.self.running_mean)
# |--- running_var : Tensor (moved from v1.self.running_var)
# |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if version is None or version == 1:
# BN related parameters and buffers were moved into the BN module for v2
v2_to_v1_names = {
'bn.weight': 'gamma',
'bn.bias': 'beta',
'bn.running_mean': 'running_mean',
'bn.running_var': 'running_var',
'bn.num_batches_tracked': 'num_batches_tracked',
}
for v2_name, v1_name in v2_to_v1_names.items():
if prefix + v1_name in state_dict:
state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
state_dict.pop(prefix + v1_name)
elif strict:
missing_keys.append(prefix + v2_name)
super(_ConvBnNd,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
@classmethod
def from_float(cls, conv, bn, qconfig):
"""Create a qat module from a float module."""
assert qconfig, 'Input float module must have a valid qconfig'
convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,
conv.stride, conv.padding, conv.dilation, conv.groups,
conv.bias is not None, conv.padding_mode, bn.eps, bn.momentum,
False, qconfig)
convbn.weight = conv.weight
convbn.bias = conv.bias
convbn.bn.weight = bn.weight
convbn.bn.bias = bn.bias
convbn.bn.running_mean = bn.running_mean
convbn.bn.running_var = bn.running_var
convbn.bn.num_batches_tracked = bn.num_batches_tracked
convbn.bn.eps = bn.eps
return convbn
class ConvBatchNorm2d(_ConvBnNd, nn.Conv2d):
"""A ConvBatchNorm2d module is a module fused from Conv2d and BatchNorm2d,
attached with FakeQuantize modules for both output activation and weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d`.
Implementation details: https://arxiv.org/pdf/1806.08342.pdf section 3.2.2
Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
activation_quant_fn: fake quant module for output activation
weight_fake_quant: fake quant module for weight
"""
def __init__(
self,
# ConvNd args
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
_ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, False, _pair(0), groups, bias,
padding_mode, eps, momentum, freeze_bn, qconfig)
def _conv_forward(self, input, w, b=None):
return F.conv2d(input, w, b, self.stride, self.padding, self.dilation,
self.groups)
# TODO(yuwang): Move to top api for user.
def update_bn(mod):
if type(mod) in set([ConvBatchNorm2d]):
mod.update_bn()
def freeze_bn(mod):
if type(mod) in set([ConvBatchNorm2d]):
mod.freeze_bn()
|
[
"torch.nn.Parameter",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.from_numpy",
"math.sqrt",
"torch.sqrt",
"torch.nn.init.uniform_",
"torch.nn.functional.conv2d",
"torch.nn.init.zeros_",
"torch.nn.BatchNorm2d",
"torch.Tensor",
"torch.rsqrt",
"torch.no_grad",
"numpy.sqrt",
"torch.nn.modules.utils._pair",
"torch.nn.modules.conv._ConvNd.__init__"
] |
[((2010, 2180), 'torch.nn.modules.conv._ConvNd.__init__', 'nn.modules.conv._ConvNd.__init__', (['self', 'in_channels', 'out_channels', 'kernel_size', 'stride', 'padding', 'dilation', 'transposed', 'output_padding', 'groups', '(False)', 'padding_mode'], {}), '(self, in_channels, out_channels,\n kernel_size, stride, padding, dilation, transposed, output_padding,\n groups, False, padding_mode)\n', (2042, 2180), True, 'import torch.nn as nn\n'), ((2415, 2470), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels', 'eps', 'momentum', '(True)', '(True)'], {}), '(out_channels, eps, momentum, True, True)\n', (2429, 2470), True, 'import torch.nn as nn\n'), ((3080, 3109), 'torch.nn.init.uniform_', 'init.uniform_', (['self.bn.weight'], {}), '(self.bn.weight)\n', (3093, 3109), False, 'from torch.nn import init\n'), ((3114, 3139), 'torch.nn.init.zeros_', 'init.zeros_', (['self.bn.bias'], {}), '(self.bn.bias)\n', (3125, 3139), False, 'from torch.nn import init\n'), ((13697, 13715), 'torch.nn.modules.utils._pair', '_pair', (['kernel_size'], {}), '(kernel_size)\n', (13702, 13715), False, 'from torch.nn.modules.utils import _pair\n'), ((13729, 13742), 'torch.nn.modules.utils._pair', '_pair', (['stride'], {}), '(stride)\n', (13734, 13742), False, 'from torch.nn.modules.utils import _pair\n'), ((13757, 13771), 'torch.nn.modules.utils._pair', '_pair', (['padding'], {}), '(padding)\n', (13762, 13771), False, 'from torch.nn.modules.utils import _pair\n'), ((13787, 13802), 'torch.nn.modules.utils._pair', '_pair', (['dilation'], {}), '(dilation)\n', (13792, 13802), False, 'from torch.nn.modules.utils import _pair\n'), ((14082, 14158), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', 'w', 'b', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(input, w, b, self.stride, self.padding, self.dilation, self.groups)\n', (14090, 14158), True, 'import torch.nn.functional as F\n'), ((3234, 3281), 'torch.nn.init._calculate_fan_in_and_fan_out', 'init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (3268, 3281), False, 'from torch.nn import init\n'), ((3324, 3363), 'torch.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (3337, 3363), False, 'from torch.nn import init\n'), ((4948, 4963), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4961, 4963), False, 'import torch\n'), ((5980, 5995), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5993, 5995), False, 'import torch\n'), ((6790, 6829), 'torch.from_numpy', 'torch.from_numpy', (['(bias * scale + offset)'], {}), '(bias * scale + offset)\n', (6806, 6829), False, 'import torch\n'), ((8268, 8304), 'torch.rsqrt', 'torch.rsqrt', (['(batch_var + self.bn.eps)'], {}), '(batch_var + self.bn.eps)\n', (8279, 8304), False, 'import torch\n'), ((13929, 13937), 'torch.nn.modules.utils._pair', '_pair', (['(0)'], {}), '(0)\n', (13934, 13937), False, 'from torch.nn.modules.utils import _pair\n'), ((2596, 2622), 'torch.Tensor', 'torch.Tensor', (['out_channels'], {}), '(out_channels)\n', (2608, 2622), False, 'import torch\n'), ((3300, 3317), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (3309, 3317), False, 'import math\n'), ((6428, 6458), 'numpy.sqrt', 'np.sqrt', (['(running_var + epsilon)'], {}), '(running_var + epsilon)\n', (6435, 6458), True, 'import numpy as np\n'), ((6674, 6698), 'torch.from_numpy', 'torch.from_numpy', (['weight'], {}), '(weight)\n', (6690, 6698), False, 'import torch\n'), ((6924, 6942), 'torch.nn.Parameter', 'nn.Parameter', (['bias'], {}), '(bias)\n', (6936, 6942), True, 'import torch.nn as nn\n'), ((8316, 8331), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8329, 8331), False, 'import torch\n'), ((8357, 8402), 'torch.sqrt', 'torch.sqrt', (['(self.bn.running_var + self.bn.eps)'], {}), '(self.bn.running_var + self.bn.eps)\n', (8367, 8402), False, 'import torch\n'), ((8919, 8934), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8932, 8934), False, 'import torch\n'), ((8966, 9012), 'torch.rsqrt', 'torch.rsqrt', (['(self.bn.running_var + self.bn.eps)'], {}), '(self.bn.running_var + self.bn.eps)\n', (8977, 9012), False, 'import torch\n')]
|
#!/bin/sh
"""
Pipeline to prepare data from new patients :
1) smoothed data
2) combat normalise ( ! make sure your combat parameters & info dict are updated)
3) inter & intra normalisation
"""
# Import packages
import os
import argparse
import pandas as pd
import numpy as np
from meld_classifier.meld_cohort import MeldCohort, MeldSubject
from meld_classifier.data_preprocessing import Preprocess, Feature
from meld_classifier.paths import BASE_PATH, NORM_CONTROLS_PARAMS_FILE, COMBAT_PARAMS_FILE, NEWSUBJECTS_DATASET
def create_dataset_file(subjects, output_path):
df=pd.DataFrame()
subjects_id = [subject for subject in subjects]
df['subject_id']=subjects_id
df['split']=['test' for subject in subjects]
df.to_csv(output_path)
if __name__ == "__main__":
#parse commandline arguments
parser = argparse.ArgumentParser(description='data-processing on new subject ')
parser.add_argument('-ids','--list_ids',
help='Subject ID.',
required=True,)
parser.add_argument('-d', '--output_dir',
type=str,
help='path to store hdf5 files',
default=BASE_PATH)
parser.add_argument("--withoutflair",
action="store_true",
default=False,
help="do not use flair information")
args = parser.parse_args()
subject_ids = np.array(np.loadtxt(args.list_ids, dtype='str',ndmin=1))
output_dir = args.output_dir
dataset_newSubject = os.path.join(BASE_PATH, NEWSUBJECTS_DATASET)
# Set features and smoothed values
if args.withoutflair:
features = {
".on_lh.thickness.mgh": 10,
".on_lh.w-g.pct.mgh" : 10,
".on_lh.pial.K_filtered.sm20.mgh": None,
'.on_lh.sulc.mgh' : 5,
'.on_lh.curv.mgh' : 5,
}
else:
features = {
".on_lh.thickness.mgh": 10,
".on_lh.w-g.pct.mgh" : 10,
".on_lh.pial.K_filtered.sm20.mgh": None,
'.on_lh.sulc.mgh' : 5,
'.on_lh.curv.mgh' : 5,
'.on_lh.gm_FLAIR_0.25.mgh' : 10,
'.on_lh.gm_FLAIR_0.5.mgh' : 10,
'.on_lh.gm_FLAIR_0.75.mgh' : 10,
".on_lh.gm_FLAIR_0.mgh": 10,
'.on_lh.wm_FLAIR_0.5.mgh' : 10,
'.on_lh.wm_FLAIR_1.mgh' : 10,
}
feat = Feature()
features_smooth = [feat.smooth_feat(feature, features[feature]) for feature in features]
features_combat = [feat.combat_feat(feature) for feature in features_smooth]
### INITIALISE ###
#create dataset
create_dataset_file(subject_ids, dataset_newSubject)
### SMOOTH DATA ###
#-----------------------------------------------------------------------------------------------
print('PROCESS 1 : SMOOTHING')
#create cohort for the new subject
c_raw = MeldCohort(hdf5_file_root='{site_code}_{group}_featurematrix.hdf5', dataset=dataset_newSubject, data_dir=output_dir)
#create object smoothing
smoothing = Preprocess(c_raw,
write_hdf5_file_root='{site_code}_{group}_featurematrix_smoothed.hdf5',
data_dir=output_dir)
for feature in np.sort(list(set(features))):
print(feature)
smoothing.smooth_data(feature, features[feature])
### COMBAT DATA ###
#-----------------------------------------------------------------------------------------------
print('PROCESS 2 : COMBAT')
#create cohort for the new subject
c_smooth = MeldCohort(hdf5_file_root='{site_code}_{group}_featurematrix_smoothed.hdf5', dataset=dataset_newSubject)
#get combat parameters
combat_params_file = os.path.join(BASE_PATH, COMBAT_PARAMS_FILE)
#create object combat
combat =Preprocess(c_smooth,
write_hdf5_file_root='{site_code}_{group}_featurematrix_combat.hdf5',
data_dir=output_dir)
#features names
for feature in features_smooth:
print(feature)
combat.combat_new_subject(feature, combat_params_file)
### INTRA, INTER & ASYMETRY ###
#-----------------------------------------------------------------------------------------------
print('PROCESS 3 : INTRA, INTER & ASYMETRY')
#create cohort to normalise
c_combat = MeldCohort(hdf5_file_root='{site_code}_{group}_featurematrix_combat.hdf5', dataset=dataset_newSubject, data_dir=output_dir)
# provide mean and std parameter for normalisation by controls
param_norms_file = os.path.join(BASE_PATH, NORM_CONTROLS_PARAMS_FILE)
# create object normalisation
norm = Preprocess(c_combat,
write_hdf5_file_root='{site_code}_{group}_featurematrix_combat.hdf5',
data_dir=output_dir)
# call functions to normalise data
for feature in features_combat:
print(feature)
norm.intra_inter_subject(feature, params_norm = param_norms_file)
norm.asymmetry_subject(feature, params_norm = param_norms_file )
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"meld_classifier.meld_cohort.MeldCohort",
"numpy.loadtxt",
"meld_classifier.data_preprocessing.Preprocess",
"meld_classifier.data_preprocessing.Feature",
"os.path.join"
] |
[((580, 594), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (592, 594), True, 'import pandas as pd\n'), ((831, 901), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""data-processing on new subject """'}), "(description='data-processing on new subject ')\n", (854, 901), False, 'import argparse\n'), ((1514, 1558), 'os.path.join', 'os.path.join', (['BASE_PATH', 'NEWSUBJECTS_DATASET'], {}), '(BASE_PATH, NEWSUBJECTS_DATASET)\n', (1526, 1558), False, 'import os\n'), ((2207, 2216), 'meld_classifier.data_preprocessing.Feature', 'Feature', ([], {}), '()\n', (2214, 2216), False, 'from meld_classifier.data_preprocessing import Preprocess, Feature\n'), ((2712, 2833), 'meld_classifier.meld_cohort.MeldCohort', 'MeldCohort', ([], {'hdf5_file_root': '"""{site_code}_{group}_featurematrix.hdf5"""', 'dataset': 'dataset_newSubject', 'data_dir': 'output_dir'}), "(hdf5_file_root='{site_code}_{group}_featurematrix.hdf5', dataset\n =dataset_newSubject, data_dir=output_dir)\n", (2722, 2833), False, 'from meld_classifier.meld_cohort import MeldCohort, MeldSubject\n'), ((2874, 2989), 'meld_classifier.data_preprocessing.Preprocess', 'Preprocess', (['c_raw'], {'write_hdf5_file_root': '"""{site_code}_{group}_featurematrix_smoothed.hdf5"""', 'data_dir': 'output_dir'}), "(c_raw, write_hdf5_file_root=\n '{site_code}_{group}_featurematrix_smoothed.hdf5', data_dir=output_dir)\n", (2884, 2989), False, 'from meld_classifier.data_preprocessing import Preprocess, Feature\n'), ((3382, 3490), 'meld_classifier.meld_cohort.MeldCohort', 'MeldCohort', ([], {'hdf5_file_root': '"""{site_code}_{group}_featurematrix_smoothed.hdf5"""', 'dataset': 'dataset_newSubject'}), "(hdf5_file_root='{site_code}_{group}_featurematrix_smoothed.hdf5',\n dataset=dataset_newSubject)\n", (3392, 3490), False, 'from meld_classifier.meld_cohort import MeldCohort, MeldSubject\n'), ((3539, 3582), 'os.path.join', 'os.path.join', (['BASE_PATH', 'COMBAT_PARAMS_FILE'], {}), '(BASE_PATH, COMBAT_PARAMS_FILE)\n', (3551, 3582), False, 'import os\n'), ((3621, 3737), 'meld_classifier.data_preprocessing.Preprocess', 'Preprocess', (['c_smooth'], {'write_hdf5_file_root': '"""{site_code}_{group}_featurematrix_combat.hdf5"""', 'data_dir': 'output_dir'}), "(c_smooth, write_hdf5_file_root=\n '{site_code}_{group}_featurematrix_combat.hdf5', data_dir=output_dir)\n", (3631, 3737), False, 'from meld_classifier.data_preprocessing import Preprocess, Feature\n'), ((4161, 4288), 'meld_classifier.meld_cohort.MeldCohort', 'MeldCohort', ([], {'hdf5_file_root': '"""{site_code}_{group}_featurematrix_combat.hdf5"""', 'dataset': 'dataset_newSubject', 'data_dir': 'output_dir'}), "(hdf5_file_root='{site_code}_{group}_featurematrix_combat.hdf5',\n dataset=dataset_newSubject, data_dir=output_dir)\n", (4171, 4288), False, 'from meld_classifier.meld_cohort import MeldCohort, MeldSubject\n'), ((4375, 4425), 'os.path.join', 'os.path.join', (['BASE_PATH', 'NORM_CONTROLS_PARAMS_FILE'], {}), '(BASE_PATH, NORM_CONTROLS_PARAMS_FILE)\n', (4387, 4425), False, 'import os\n'), ((4471, 4587), 'meld_classifier.data_preprocessing.Preprocess', 'Preprocess', (['c_combat'], {'write_hdf5_file_root': '"""{site_code}_{group}_featurematrix_combat.hdf5"""', 'data_dir': 'output_dir'}), "(c_combat, write_hdf5_file_root=\n '{site_code}_{group}_featurematrix_combat.hdf5', data_dir=output_dir)\n", (4481, 4587), False, 'from meld_classifier.data_preprocessing import Preprocess, Feature\n'), ((1405, 1452), 'numpy.loadtxt', 'np.loadtxt', (['args.list_ids'], {'dtype': '"""str"""', 'ndmin': '(1)'}), "(args.list_ids, dtype='str', ndmin=1)\n", (1415, 1452), True, 'import numpy as np\n')]
|
""" Main lib for project_watson Project
"""
import multiprocessing
import time
import warnings
from tempfile import mkdtemp
import joblib
import mlflow
import pandas as pd
import numpy as np
import sys
from project_watson.data import get_data, get_snli
from project_watson.params import MLFLOW_URI
from project_watson.utils import simple_time_tracker
from project_watson.model import *
from keras.callbacks import EarlyStopping
from sklearn.model_selection import train_test_split
class Configuration():
"""
All configuration for running an experiment
"""
def __init__(
self,
model_name,
translation = True,
max_length = 64,
padding = True,
batch_size = 128,
epochs = 5,
learning_rate = 1e-5,
metrics = ["sparse_categorical_accuracy"],
verbose = 1,
train_splits = 5,
accelerator = "TPU",
myluckynumber = 13
):
# seed and accelerator
self.SEED = myluckynumber
# paths
self.PATH_TRAIN = "project_watson/data/train.csv"
self.PATH_TEST = "project_watson/data/test.csv"
# splits
self.TRAIN_SPLITS = train_splits
# mapping of language
self.LANGUAGE_MAP = {
"English" : 0,
"Chinese" : 1,
"Arabic" : 2,
"French" : 3,
"Swahili" : 4,
"Urdu" : 5,
"Vietnamese": 6,
"Russian" : 7,
"Hindi" : 8,
"Greek" : 9,
"Thai" : 10,
"Spanish" : 11,
"German" : 12,
"Turkish" : 13,
"Bulgarian" : 14
}
self.INVERSE_LANGUAGE_MAP = {v: k for k, v in self.LANGUAGE_MAP.items()}
# model configuration
self.MODEL_NAME = model_name
self.TRANSLATION = translation
# self.TOKENIZER = AutoTokenizer.from_pretrained(self.MODEL_NAME)
# model hyperparameters
self.MAX_LENGTH = max_length
self.PAD_TO_MAX_LENGTH = padding
self.BATCH_SIZE = batch_size
self.EPOCHS = epochs
self.LEARNING_RATE = learning_rate
self.METRICS = metrics
self.VERBOSE = verbose
# initializing accelerator
# self.initialize_accelerator()
# def initialize_accelerator(self):
# """
# Initializing accelerator
# """
# # checking TPU first
# if self.ACCELERATOR == "TPU":
# print("Connecting to TPU")
# try:
# tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
# print(f"Running on TPU {tpu.master()}")
# except ValueError:
# print("Could not connect to TPU")
# tpu = None
# if tpu:
# try:
# print("Initializing TPU")
# tf.config.experimental_connect_to_cluster(tpu)
# tf.tpu.experimental.initialize_tpu_system(tpu)
# self.strategy = tf.distribute.experimental.TPUStrategy(tpu)
# self.tpu = tpu
# print("TPU initialized")
# except:
# e = sys.exc_info()[0]
# print( "Error TPU not initialized: %s" % e )
# else:
# print("Unable to initialize TPU")
# self.ACCELERATOR = "GPU"
# # default for CPU and GPU
# if self.ACCELERATOR != "TPU":
# print("Using default strategy for CPU and single GPU")
# self.strategy = tf.distribute.get_strategy()
# # checking GPUs
# if self.ACCELERATOR == "GPU":
# print(f"GPUs Available: {len(tf.config.experimental.list_physical_devices('GPU'))}")
# # defining replicas
# self.AUTO = tf.data.experimental.AUTOTUNE
# self.REPLICAS = self.strategy.num_replicas_in_sync
# print(f"REPLICAS: {self.REPLICAS}")
def train(self):
try:
print("Initializing TPU")
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
print("TPU initialized")
except:
e = sys.exc_info()[0]
print( "Error TPU not initialized: %s" % e )
params = dict(
model_name="bert-base-multilingual-cased",
max_len=50,
)
self.model = build_model(**params)
df = get_data()
X = df.drop(columns=['label'], axis=1)
y = df['label']
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.3, random_state=42)
tokenizer = create_tokenizer()
train_input = bert_encode(X_train.premise.values, X_train.hypothesis.values, tokenizer)
test_input = bert_encode(X_test.premise.values, X_test.hypothesis.values, tokenizer)
self.model.fit(train_input, y_train, epochs = 5, verbose = 2, batch_size = 32, validation_split = 0.3,learning_rate = 1e-5,)
def pred(self, X_pred):
predictions = [np.argmax(i) for i in model.predict(X_pred)]
return predictions
def accuracy(self, y_pred, y_true):
return sum(y_pred == y_true) / len(y_pred)
if __name__ == '__main__':
conf = Configuration(
model_name = 'bert-base-multilingual-cased',
translation = True,
max_length = 64,
padding = True,
batch_size = 128,
epochs = 3,
metrics = ["sparse_categorical_accuracy"],
verbose = 1,
)
conf.train()
# test = pd.read_csv("data/test.csv")
# test_input = bert_encode(test.premise.values, test.hypothesis.values, tokenizer)
# y_pred = conf.pred(test_input)
# acc = conf.accuracy(y_pred, y_true)
|
[
"sklearn.model_selection.train_test_split",
"project_watson.data.get_data",
"sys.exc_info",
"numpy.argmax"
] |
[((4590, 4600), 'project_watson.data.get_data', 'get_data', ([], {}), '()\n', (4598, 4600), False, 'from project_watson.data import get_data, get_snli\n'), ((4716, 4770), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X, y, test_size=0.3, random_state=42)\n', (4732, 4770), False, 'from sklearn.model_selection import train_test_split\n'), ((5184, 5196), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (5193, 5196), True, 'import numpy as np\n'), ((4352, 4366), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4364, 4366), False, 'import sys\n')]
|
#
# Copyright (C) 2019-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# This file is based in part on deepspeech_openvino_0.5.py by <NAME> at
# https://github.com/openvinotoolkit/open_model_zoo/pull/419, commit 529805d011d9b405f142b2b40f4d202bd403a4f1 on Sep 19, 2019.
#
from copy import deepcopy
import numpy as np
from asr_utils.pipelines import BlockedSeqPipelineStage
class RnnSeqPipelineStage(BlockedSeqPipelineStage):
def __init__(self, profile, ie, model, device='CPU'):
"""
Load/compile to the target device the IE IR file with the network and initialize the pipeline stage.
profile (dict), a dict with pre/post-processing parameters, see profiles.py
ie (IECore), IECore object for model loading/compilation/inference
model (str), filename of .xml IR file
device (str), inferemnce device
"""
self.p = deepcopy(profile)
assert self.p['num_context_frames'] % 2 == 1, "num_context_frames must be odd"
padding_len = self.p['num_context_frames'] // 2
super().__init__(
block_len=16, context_len=self.p['num_context_frames'] - 1,
left_padding_len=padding_len, right_padding_len=padding_len,
padding_shape=(self.p['num_mfcc_dct_coefs'],), cut_alignment=True)
net = ie.read_network(model=model)
self.exec_net = ie.load_network(network=net, device_name=device)
def _reset_state(self):
super()._reset_state()
self._rnn_state = None
def process_data(self, data, finish=False):
if data is not None:
assert len(data.shape) == 2
return super().process_data(data, finish=finish)
def _process_blocks(self, buffer, finish=False):
assert buffer.shape[0] >= self._block_len + self._context_len
processed = []
for start_pos in range(self._context_len, buffer.shape[0] - self._block_len + 1, self._block_len):
block = buffer[start_pos - self._context_len:start_pos + self._block_len]
processed.append(self._process_block(block, finish=finish and start_pos + self._block_len >= buffer.shape[0]))
assert not self._cut_alignment or processed[-1].shape[0] == self._block_len, "Networks with stride != 1 are not supported"
# Here start_pos is its value on the last iteration of the loop
buffer_skip_len = start_pos + self._block_len - self._context_len
return processed, buffer_skip_len
def _process_block(self, mfcc_features, finish=False):
assert mfcc_features.shape[0] == self._block_len + self._context_len, "Wrong data length: _process_block() accepts a single block of data"
# Create a view into the array with overlapping strides to simulate convolution with FC.
# NB: Replacing this and the first FC layer with conv1d may improve speed a little.
mfcc_features = np.lib.stride_tricks.as_strided(
mfcc_features,
(self._block_len, self._context_len + 1, self.p['num_mfcc_dct_coefs']),
(mfcc_features.strides[0], mfcc_features.strides[0], mfcc_features.strides[1]),
writeable = False,
)
if self._rnn_state is None:
state_h = np.zeros(self.exec_net.input_info[self.p['in_state_h']].input_data.shape)
state_c = np.zeros(self.exec_net.input_info[self.p['in_state_c']].input_data.shape)
else:
state_h, state_c = self._rnn_state
infer_res = self.exec_net.infer(inputs={
self.p['in_state_c']: state_c,
self.p['in_state_h']: state_h,
self.p['in_data']: [mfcc_features],
})
state_c = infer_res[self.p['out_state_c']]
state_h = infer_res[self.p['out_state_h']]
self._rnn_state = (state_h, state_c)
probs = infer_res[self.p['out_data']].squeeze(1)
return probs
|
[
"copy.deepcopy",
"numpy.lib.stride_tricks.as_strided",
"numpy.zeros"
] |
[((898, 915), 'copy.deepcopy', 'deepcopy', (['profile'], {}), '(profile)\n', (906, 915), False, 'from copy import deepcopy\n'), ((2896, 3121), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['mfcc_features', "(self._block_len, self._context_len + 1, self.p['num_mfcc_dct_coefs'])", '(mfcc_features.strides[0], mfcc_features.strides[0], mfcc_features.strides[1])'], {'writeable': '(False)'}), "(mfcc_features, (self._block_len, self.\n _context_len + 1, self.p['num_mfcc_dct_coefs']), (mfcc_features.strides\n [0], mfcc_features.strides[0], mfcc_features.strides[1]), writeable=False)\n", (2927, 3121), True, 'import numpy as np\n'), ((3232, 3305), 'numpy.zeros', 'np.zeros', (["self.exec_net.input_info[self.p['in_state_h']].input_data.shape"], {}), "(self.exec_net.input_info[self.p['in_state_h']].input_data.shape)\n", (3240, 3305), True, 'import numpy as np\n'), ((3328, 3401), 'numpy.zeros', 'np.zeros', (["self.exec_net.input_info[self.p['in_state_c']].input_data.shape"], {}), "(self.exec_net.input_info[self.p['in_state_c']].input_data.shape)\n", (3336, 3401), True, 'import numpy as np\n')]
|
from Arch1 import Arch1CNN
from Arch2 import Arch2CNN
import torch
import numpy as np
import torchvision
from torchvision import models, transforms
import torch.nn as nn
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Dataset,DataLoader
from xray_dataloader_zscored import ChestXrayDataset
#from itertools import izip
batch_size = 16
p_val= 0.1
p_test = 0.2
transform = transforms.Compose([transforms.Resize((256,256)),transforms.ToTensor()])
transform1 = transforms.Compose([transforms.Resize((512,512)),transforms.ToTensor()])
arch1model = Arch1CNN()
arch2model = Arch2CNN()
arch1model.load_state_dict(torch.load('arch1_dropout.pt'))
arch2model.load_state_dict(torch.load('arch2_new.pt'))
arch1model.eval()
arch2model.eval()
use_cuda = torch.cuda.is_available()
if use_cuda:
computing_device = torch.device("cuda")
num_workers = 1
pin_memory = True
print("Testing on GPU")
else:
computing_device = torch.device("cpu")
num_workers = 0
pin_memory = False
print("Testing on CPU")
test_ind = np.loadtxt("test_ind.txt").astype(np.int32)
dataset = ChestXrayDataset(transform)
sample_test = SubsetRandomSampler(test_ind)
test_loader = DataLoader(dataset, batch_size=batch_size,
sampler=sample_test, num_workers=num_workers, pin_memory= pin_memory)
dataset2 = ChestXrayDataset(transform1)
test_loader2 = DataLoader(dataset2,batch_size = batch_size,sampler = sample_test,num_workers = num_workers, pin_memory = pin_memory)
models = [arch1model,arch2model];
confusionMatrix = np.zeros((15,15))
minibatch_number = 0
for (images1,labels),(images2,labels2) in zip(test_loader,test_loader2):
print("Minibatch number",minibatch_number)
minibatch_number += 1
images1, labels = images1.to(computing_device), labels.to(computing_device)
arch1model.to(computing_device)
logitsarch1 = arch1model(images1)
predictionarch1 = (logitsarch1.cpu().detach().numpy() > 0).astype(np.int32)
del logitsarch1
arch1model.cpu()
arch2model.to(computing_device)
images2 = images2.to(computing_device)
logitsarch2 = arch2model(images2)
predictionarch2 = (logitsarch2.cpu().detach().numpy() > 0).astype(np.int32)
del logitsarch2
arch2model.cpu()
# Taking the union to retain as much predictions as possible
# Each model could have learned a feature better and thus would predict some classes better
prediction = predictionarch1+predictionarch2
prediction[prediction == 2] = 1
labelsArray = (labels.cpu().detach().numpy()).astype(np.int32)
for row in range(len(prediction)):
indexPrediction = np.where(prediction[row] == 1)[0] + 1
indexLabels = np.where(labelsArray[row] == 1)[0] + 1
#Remove common elements
commonElements = np.intersect1d(indexPrediction, indexLabels)
for i in commonElements:
confusionMatrix[i,i] += 1
excessPrediction = np.setdiff1d(indexPrediction,commonElements)
excessLabels = np.setdiff1d(indexLabels,commonElements)
#Zero pad if either of the two arrays is null
if len(excessPrediction) == 0 :
excessPrediction = np.zeros(1,np.int32)
if len(excessLabels) == 0:
excessLabels = np.zeros(1,np.int32)
for i in excessPrediction:
for j in excessLabels:
confusionMatrix[i,j] += 1
np.savetxt('Confusion_matrix_Ensemble.txt',confusionMatrix)
|
[
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.savetxt",
"numpy.zeros",
"numpy.setdiff1d",
"torchvision.transforms.ToTensor",
"xray_dataloader_zscored.ChestXrayDataset",
"numpy.where",
"torch.cuda.is_available",
"numpy.loadtxt",
"torch.device",
"numpy.intersect1d",
"Arch2.Arch2CNN",
"Arch1.Arch1CNN",
"torchvision.transforms.Resize"
] |
[((583, 593), 'Arch1.Arch1CNN', 'Arch1CNN', ([], {}), '()\n', (591, 593), False, 'from Arch1 import Arch1CNN\n'), ((607, 617), 'Arch2.Arch2CNN', 'Arch2CNN', ([], {}), '()\n', (615, 617), False, 'from Arch2 import Arch2CNN\n'), ((782, 807), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (805, 807), False, 'import torch\n'), ((1123, 1150), 'xray_dataloader_zscored.ChestXrayDataset', 'ChestXrayDataset', (['transform'], {}), '(transform)\n', (1139, 1150), False, 'from xray_dataloader_zscored import ChestXrayDataset\n'), ((1165, 1194), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['test_ind'], {}), '(test_ind)\n', (1184, 1194), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((1209, 1325), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'sample_test', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory'}), '(dataset, batch_size=batch_size, sampler=sample_test, num_workers\n =num_workers, pin_memory=pin_memory)\n', (1219, 1325), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1362, 1390), 'xray_dataloader_zscored.ChestXrayDataset', 'ChestXrayDataset', (['transform1'], {}), '(transform1)\n', (1378, 1390), False, 'from xray_dataloader_zscored import ChestXrayDataset\n'), ((1406, 1522), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset2'], {'batch_size': 'batch_size', 'sampler': 'sample_test', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory'}), '(dataset2, batch_size=batch_size, sampler=sample_test,\n num_workers=num_workers, pin_memory=pin_memory)\n', (1416, 1522), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1577, 1595), 'numpy.zeros', 'np.zeros', (['(15, 15)'], {}), '((15, 15))\n', (1585, 1595), True, 'import numpy as np\n'), ((3410, 3470), 'numpy.savetxt', 'np.savetxt', (['"""Confusion_matrix_Ensemble.txt"""', 'confusionMatrix'], {}), "('Confusion_matrix_Ensemble.txt', confusionMatrix)\n", (3420, 3470), True, 'import numpy as np\n'), ((645, 675), 'torch.load', 'torch.load', (['"""arch1_dropout.pt"""'], {}), "('arch1_dropout.pt')\n", (655, 675), False, 'import torch\n'), ((704, 730), 'torch.load', 'torch.load', (['"""arch2_new.pt"""'], {}), "('arch2_new.pt')\n", (714, 730), False, 'import torch\n'), ((845, 865), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (857, 865), False, 'import torch\n'), ((965, 984), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (977, 984), False, 'import torch\n'), ((430, 459), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (447, 459), False, 'from torchvision import models, transforms\n'), ((459, 480), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (478, 480), False, 'from torchvision import models, transforms\n'), ((516, 545), 'torchvision.transforms.Resize', 'transforms.Resize', (['(512, 512)'], {}), '((512, 512))\n', (533, 545), False, 'from torchvision import models, transforms\n'), ((545, 566), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (564, 566), False, 'from torchvision import models, transforms\n'), ((1069, 1095), 'numpy.loadtxt', 'np.loadtxt', (['"""test_ind.txt"""'], {}), "('test_ind.txt')\n", (1079, 1095), True, 'import numpy as np\n'), ((2817, 2861), 'numpy.intersect1d', 'np.intersect1d', (['indexPrediction', 'indexLabels'], {}), '(indexPrediction, indexLabels)\n', (2831, 2861), True, 'import numpy as np\n'), ((2960, 3005), 'numpy.setdiff1d', 'np.setdiff1d', (['indexPrediction', 'commonElements'], {}), '(indexPrediction, commonElements)\n', (2972, 3005), True, 'import numpy as np\n'), ((3028, 3069), 'numpy.setdiff1d', 'np.setdiff1d', (['indexLabels', 'commonElements'], {}), '(indexLabels, commonElements)\n', (3040, 3069), True, 'import numpy as np\n'), ((3194, 3215), 'numpy.zeros', 'np.zeros', (['(1)', 'np.int32'], {}), '(1, np.int32)\n', (3202, 3215), True, 'import numpy as np\n'), ((3277, 3298), 'numpy.zeros', 'np.zeros', (['(1)', 'np.int32'], {}), '(1, np.int32)\n', (3285, 3298), True, 'import numpy as np\n'), ((2660, 2690), 'numpy.where', 'np.where', (['(prediction[row] == 1)'], {}), '(prediction[row] == 1)\n', (2668, 2690), True, 'import numpy as np\n'), ((2720, 2751), 'numpy.where', 'np.where', (['(labelsArray[row] == 1)'], {}), '(labelsArray[row] == 1)\n', (2728, 2751), True, 'import numpy as np\n')]
|
import numpy
import collections
import torch
Transition = collections.namedtuple("Transition", ("observation", "q_values", "action", "reward", "done"))
class Buffer():
def __init__(self, size, gamma, steps, observation_shape, actions_count):
self.size = size
self.gamma = gamma
self.steps = steps
self.observation_shape = observation_shape
self.actions_count = actions_count
self.ptr = 0
self.buffer = []
def _init_zeros(self):
for _ in range(0, self.size):
observation = numpy.zeros(self.observation_shape)
q_values = numpy.zeros(self.actions_count)
self.buffer.append(Transition(observation, q_values, 0, 0.0, True))
def length(self):
return len(self.buffer)
def add(self, observation, q_values, action, reward, done):
if self.length() == 0:
self._init_zeros()
self.buffer[self.ptr] = Transition(observation.copy(), q_values.copy(), action, reward, done)
self.ptr = (self.ptr+1)%self.size
def _print(self):
for i in range(self.length()):
#print(self.buffer[i].observation, end = " ")
print(self.buffer[i].q_values, end = " ")
print(self.buffer[i].action, end = " ")
print(self.buffer[i].reward, end = " ")
print(self.buffer[i].done, end = " ")
print("\n")
def get_random_batch(self, batch_size, device):
observation_shape = self.buffer[0].observation.shape
state_shape = (batch_size, ) + observation_shape[0:]
actions_count = len(self.buffer[0].q_values)
q_values_shape = (batch_size, ) + (actions_count, )
input = torch.zeros(state_shape, dtype=torch.float32).to(device)
target = torch.zeros(q_values_shape, dtype=torch.float32).to(device)
for i in range(0, batch_size):
n = numpy.random.randint(self.length() - self.steps - 1)
gamma_ = self.gamma
reward_sum = 0.0
for k in range(self.steps):
if self.buffer[n + k].done:
gamma_ = 0.0
reward_sum+= self.buffer[n + k].reward*(gamma_**k)
if self.buffer[n + self.steps].done:
gamma_ = 0.0
q_values = self.buffer[n].q_values.copy()
action = self.buffer[n].action
q_values[action] = reward_sum + gamma_*numpy.max(self.buffer[n + self.steps].q_values)
input[i] = torch.from_numpy(self.buffer[n].observation).to(device)
target[i] = torch.from_numpy(q_values).to(device)
return input, target
|
[
"numpy.zeros",
"numpy.max",
"collections.namedtuple",
"torch.zeros",
"torch.from_numpy"
] |
[((60, 157), 'collections.namedtuple', 'collections.namedtuple', (['"""Transition"""', "('observation', 'q_values', 'action', 'reward', 'done')"], {}), "('Transition', ('observation', 'q_values', 'action',\n 'reward', 'done'))\n", (82, 157), False, 'import collections\n'), ((575, 610), 'numpy.zeros', 'numpy.zeros', (['self.observation_shape'], {}), '(self.observation_shape)\n', (586, 610), False, 'import numpy\n'), ((637, 668), 'numpy.zeros', 'numpy.zeros', (['self.actions_count'], {}), '(self.actions_count)\n', (648, 668), False, 'import numpy\n'), ((1757, 1802), 'torch.zeros', 'torch.zeros', (['state_shape'], {'dtype': 'torch.float32'}), '(state_shape, dtype=torch.float32)\n', (1768, 1802), False, 'import torch\n'), ((1837, 1885), 'torch.zeros', 'torch.zeros', (['q_values_shape'], {'dtype': 'torch.float32'}), '(q_values_shape, dtype=torch.float32)\n', (1848, 1885), False, 'import torch\n'), ((2509, 2556), 'numpy.max', 'numpy.max', (['self.buffer[n + self.steps].q_values'], {}), '(self.buffer[n + self.steps].q_values)\n', (2518, 2556), False, 'import numpy\n'), ((2594, 2638), 'torch.from_numpy', 'torch.from_numpy', (['self.buffer[n].observation'], {}), '(self.buffer[n].observation)\n', (2610, 2638), False, 'import torch\n'), ((2674, 2700), 'torch.from_numpy', 'torch.from_numpy', (['q_values'], {}), '(q_values)\n', (2690, 2700), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""NNratio_1D.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/13zC6nTSvjEFa_-FFy6-Rpy5zH8bUUlMJ
"""
from __future__ import print_function
from __future__ import division
import keras
import os
from keras.models import Sequential
from keras import layers
from keras import backend as K
from keras.layers import Activation, Dense
from keras.constraints import Constraint
from keras.constraints import max_norm
from math import *
import numpy as np
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import scipy.integrate as integrate
import scipy.stats as st
import time
##!cat /proc/cpuinfo
##!cat /proc/meminfo
#!apt-get -qq install -y graphviz && pip install -q pydot
#import pydot
"""## Connect to Google Drive for storage
### Pydrive
"""
# install PyDrive
#!pip install -U -q PyDrive
#from pydrive.auth import GoogleAuth
#from pydrive.drive import GoogleDrive
#from google.colab import auth
#from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
#auth.authenticate_user()
#gauth = GoogleAuth()
#gauth.credentials = GoogleCredentials.get_application_default()
#drive = GoogleDrive(gauth)
# PyDrive reference:
# https://googledrive.github.io/PyDrive/docs/build/html/index.html
'''
# 2. Create & upload a file
uploaded = drive.CreateFile({'title': 'Sample upload.txt'})
uploaded.SetContentString('Sample upload file content')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# 3. Load a file by ID and print its contents.
downloaded = drive.CreateFile({'id': uploaded.get('id')})
print('Downloaded content "{}"'.format(downloaded.GetContentString()))
'''
"""# Main"""
#sigmoid function
def sigmoid(x):
return 1 / (1 + exp(-x))
#@title Glabal Parameters
epochs = 100000 #@param {type:"integer"}
batch_size = 1000 #@param {type:"integer"}
#Geteps = 0.005 #@param {type:"number"}
Geteps = 0.0015811 #@param {type:"number"}
Getmu = 0.8 #@param {type:"number"}
Getsigma = 0.02 #@param {type:"number"}
cut = 0. #@param {type:"number"}
gcut = 0. #@param {type:"number"}
def SM(x):
return exp(-8*x)
def BSM(x):
return exp(-(x-Getmu)**2/(2*Getsigma**2))
#Normalize distribution
SM_norm = integrate.quad(lambda y :SM(y),cut,1)
BSM_norm = integrate.quad(lambda y :BSM(y),cut,1)
#SM_norm_c = integrate.quad(lambda y :SMn(y),gcut,1)
#normalized distribution
def SMn(x):
return exp(-8*x)/SM_norm[0]
def BSMn(x):
return (SMn(x)+Geteps*BSM(x)/BSM_norm[0])/(1+Geteps)
SM_norm_c = integrate.quad(lambda y :SM(y),gcut,1)
def SMnc(x):
return exp(-8*x)/SM_norm_c[0]
#define probability distribution function
class P_SM(st.rv_continuous):
def _pdf(self,x):
return SMn(x)
class P_BSM(st.rv_continuous):
def _pdf(self,x):
return BSMn(x)
class P_SMc(st.rv_continuous):
def _pdf(self,x):
return SMnc(x)
SM_gen = P_SM(a=cut,b=1,name='sm_sample')
BSM_gen = P_BSM(a=cut,b=1,name='bsm_sample')
SMc_gen = P_SMc(a=gcut,b=1,name='smc_sample')
NRef=200000
Nbsm=20032
NR =20000
#load data
samples = np.load('Sample200k_2k_R1.npz')
Ref_sample=samples['Ref_sample']
bsm_sample=samples['bsm_sample']
#Data and References
#Ref_sample = SM_gen.rvs(size=NRef)
#bsm_sample= BSM_gen.rvs(size=Nbsm)
#sm_target = np.zeros(NRef)
#bsm_target = np.ones(Nbsm)
#x_train = np.append(Ref_sample,bsm_sample)
#y_train = np.append(sm_target,bsm_target)
rfw = NRef/NR
"""## Binning the Ref sample"""
Nbins=1000
H1d,edge = np.histogram(Ref_sample,bins=Nbins,range=(0,1))
#H1d_bsm, edge_bsm = np.histogram(bsm_sample, bins=Nbins, range=(0,1))
def moving_average(a, n=2) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
xpos = moving_average(edge,2);
#xpos_bsm = moving_average(edge_bsm,2);
wlist = []
wlist_bsm = []
for xidx, xval in enumerate(xpos):
wlist.append(H1d[xidx])
#for xidxb, xvalb in enumerate(xpos_bsm):
# wlist_bsm.append(H1d_bsm[xidxb])
x_train = np.append(xpos,bsm_sample)
#x_train = np.append(xpos,xpos_bsm)
sm_target = np.zeros(Nbins)
bsm_target = np.ones(Nbsm)
#bsm_target = np.ones(Nbins)
y_train = np.append(sm_target,bsm_target)
#weightloss1 = np.append(np.asarray(wlist),np.asarray(wlist_bsm))
weightloss1 = np.append(np.asarray(wlist),bsm_target)
weightloss = K.variable(value=weightloss1.reshape((Nbsm+Nbins,1)))
#from pydrive.auth import GoogleAuth
#from pydrive.drive import GoogleDrive
#from google.colab import auth
#from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
#auth.authenticate_user()
#gauth = GoogleAuth()
#gauth.credentials = GoogleCredentials.get_application_default()
#drive = GoogleDrive(gauth)
# PyDrive reference:
# https://googledrive.github.io/PyDrive/docs/build/html/index.html
# 2. Create & upload a file
#uploaded = drive.CreateFile({'title': 'Samples200k_2k_R2.npz'})
#uploaded.SetContentFile('Samples200k_2k_R2.npz')
#uploaded.Upload()
#print('Uploaded file with ID {}'.format(uploaded.get('id')))
"""## Build NN and Train"""
#define custom loss function
def customloss(yTrue,yPred):
return yTrue*K.log(1+K.exp(-yPred))+1/rfw*(1-yTrue)*K.log(1+K.exp(yPred))
def customlossML(sw):
Nt = Nbsm+Nbins
def lossML(yTrue,yPred):
sw_rs = K.reshape(sw,(Nt,1))
ytrue_rs = K.reshape(yTrue,(Nt,1))
ypred_rs = K.reshape(yPred,(Nt,1))
return -K.sum(ytrue_rs*sw_rs*ypred_rs)+K.sum((1-ytrue_rs)*sw_rs*(K.exp(ypred_rs)-1))/rfw
return lossML
def customlossMLws(yTrue,yPred):
return -yTrue*yPred+1/rfw*(1-yTrue)*(K.exp(yPred)-1)
rmsprop = keras.optimizers.RMSprop()
class WeightClip(Constraint):
'''Clips the weights incident to each hidden unit to be inside a range
'''
def __init__(self, c=2):
self.c = c
def __call__(self, p):
return K.clip(p, -self.c, self.c)
def get_config(self):
return {'name': self.__class__.__name__,
'c': self.c}
# build the model
model = Sequential()
model.add(Dense(4, activation='sigmoid',input_shape=(1,),kernel_regularizer=keras.regularizers.l2(0.),W_constraint = WeightClip(40),b_constraint=WeightClip(40)))
#model.add(Dense(4, activation='sigmoid',input_shape=(1,),kernel_regularizer=keras.regularizers.l2(0.0),kernel_constraint=max_norm(10.),bias_constraint=max_norm(10.)))
#model.add(Dense(4, activation='sigmoid',input_shape=(1,),kernel_regularizer=keras.regularizers.l2(0.0001)))
#model.add(Dense(3, activation='sigmoid'))
#model.add(Dense(3, activation='sigmoid'))
model.add(Dense(1,W_constraint=WeightClip(40)))
# compile the model
model.compile(loss=customlossML(weightloss),
optimizer='rmsprop')
#model.save_weights("model_4_1d.h5")
'''
#Visualize network
#from keras.utils import plot_model
#plot_model(model,to_file='model.png',show_shapes='True')
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
'''
#load weight and continue training
#import h5py
#sample1
#model.load_weights("modelML_9_cut5_D400_200_L2R0_MN0_sigmoid_2000k.h5")
#sample2
#model.load_weights("model_5_R2_R500k.h5")
"""### test statistics vs. Run"""
def t_vs_run(drun,ite):
t_v_array=[]
i=0;
while i < ite:
model.fit(x_train, y_train,
batch_size=len(x_train),
epochs=drun,
shuffle=False,
verbose=0);
#tary = 2*(model.predict(np.column_stack((bsm_cut,anglesBSM_c)))).flatten();
tary1 = 2*(model.predict(bsm_sample)).flatten();
tary3 = 2*np.vectorize(exp)(model.predict(Ref_sample)).flatten();
tary2 = -2*(model.evaluate(x=x_train,y=y_train,batch_size=len(x_train))).flatten();
#tary3 = -2*(model.evaluate(x=bsm_sample,y=bsm_target,batch_size=len(bsm_sample))).flatten();
t_v_array = np.append(t_v_array,np.array([np.sum(tary1),np.sum(tary2),np.sum(tary3)]));
i +=1
#model.save_weights("model1d_4.h5")
return t_v_array
#R14
#t = time.process_time()
#ta = t_vs_run(100000,5)
#print(time.process_time() - t)
#distribution of t
#Nsmb = 2000;
#rfw_tc = len(Ref_sample)/Nsmb;
def customlossMLc(yTrue,yPred):
return -yTrue*yPred+1/rfw_tc*(1-yTrue)*(K.exp(yPred)-1)
def customlossMLc(sw,Nt):
def lossML(yTrue,yPred):
sw_rs = K.reshape(sw,(Nt,1))
ytrue_rs = K.reshape(yTrue,(Nt,1))
ypred_rs = K.reshape(yPred,(Nt,1))
return -K.sum(ytrue_rs*sw_rs*ypred_rs)+K.sum((1-ytrue_rs)*sw_rs*(K.exp(ypred_rs)-1))/rfw
return lossML
class test_statistics:
'''
Compute test statistics given data sample(bsm or sm)
'''
def __init__(self,NNmodel):
#self.input_sample = input_sample
#self.ref_sample = ref_sample
self.NNmodel = NNmodel
#self.lossfunc = lossfunc
#self.data_sample_t = SMc_gen.rvs(size=2000)
#self.wlist_d = []
#self.anglesSM_t = angle_gen.rvs(size=Nsmb)
#train NN with data(input) sample and ref sample
#training variables
#generate sm sample
def train_t(self,epoch):
Nsmb = np.random.poisson(Nbsm,1)
#Nsmb = np.random.poisson(NR,1)
#sm ref data sample
#self.data_sample_t = SMc_gen.rvs(size=Nsmb[0])
#bsm data sample
#self.data_sample_t = BSM_gen.rvs(size=Nsmb[0])
self.data_sample_t = BSM_gen.rvs(size=Nbsm)
#bin the data sample
self.H1d_d, self.edge_d = np.histogram(self.data_sample_t, bins=Nbins, range=(0,1))
self.xpos_d = moving_average(self.edge_d,2);
#bsm_target_tc = np.ones(Nsmb[0])
bsm_target_tc = np.ones(Nbins)
#x_train_t = np.append(xpos,self.data_sample_t)
x_train_t = np.append(xpos,self.xpos_d)
self.wlist_d = []
for xidx, xval in enumerate(self.xpos_d):
self.wlist_d.append(self.H1d_d[xidx])
#bsm_target_t = np.ones(Nbsm_c)
y_train_t = np.append(sm_target,bsm_target_tc)
weightloss_t = np.append(np.asarray(wlist),self.wlist_d)
#weightloss_t = np.append(np.asarray(wlist),bsm_target_tc)
#weightlossrs = K.variable(value=weightloss_t.reshape((Nsmb[0]+Nbins,1)))
weightlossrs = K.variable(value=weightloss_t.reshape((2*Nbins,1)))
self.NNmodel.compile(loss=customlossMLc(weightlossrs,2*Nbins),optimizer=rmsprop)
#self.NNmodel.load_weights("modelML_4_1d_init.h5")
self.NNmodel.load_weights("model_4_1d.h5")
self.NNmodel.fit(x_train_t, y_train_t,
batch_size=len(x_train_t),
epochs=epoch,
shuffle=False,
verbose=0);
#tary = 2*(self.NNmodel.predict(self.sm_sample_t)).flatten();
tary = -2*(model.evaluate(x=x_train_t,y=y_train_t,batch_size=len(x_train_t))).flatten();
return (np.sum(tary))
def train_result(self):
yorig = []
for xi in xinput:
yorig.append(SMn(xi))
ypred = np.vectorize(exp)(model.predict(xinput));
ypred = ypred.flatten()*np.vectorize(SMn)(xinput);
#fig, ax = plt.subplots(projection='3d')
fig, ax = plt.subplots()
ax.plot(xinput,yorig,'*')
ax.plot(xinput,ypred,'.')
ax.hist(self.sm_sample_t, bins=25)
plt.yscale('log', nonposy='clip')
plt.title("SM: 1k, BSM: 100, Sample 1, 1M Rounds")
def t_vs_run(self,drun,ite):
Nsmb = np.random.poisson(Nbsm,1)
x_train_t = np.append(xpos,self.sm_sample_t)
bsm_target_tc = np.ones(Nsmb[0])
y_train_t = np.append(sm_target,bsm_target_tc)
weightloss_t = np.append(wlist,bsm_target_tc)
weightlossrs = K.variable(value=weightloss_t.reshape((Nsmb[0]+Nbins,1)))
self.NNmodel.compile(loss=customlossMLc(weightlossrs,Nsmb[0]+Nbins),optimizer=rmsprop)
t_v_array=[]
i=0;
while i < ite:
self.NNmodel.fit(x_train_t, y_train_t,
batch_size=len(x_train_t),
epochs=drun,
shuffle=False,
verbose=0);
#tary = 2*(self.NNmodel.predict(self.sm_sample_t)).flatten();
tary = -2*(model.evaluate(x=x_train_t,y=y_train_t,batch_size=len(x_train_t))).flatten();
t_v_array = np.append(t_v_array,np.sum(tary));
i +=1
return t_v_array
def t_value(self):
tary = 2*(self.NNmodel.predict(np.column_stack((sm_sample_t,anglesSM_t)))).flatten();
return (np.sum(tary))
"""# Save and Continue training"""
#load weight and continue training
#model.load_weights("model_5_3.h5")
#Save weight to HDF5
#model.save_weights("model2.h5")
#print("Save model to disk")
#sfilename = 't_value'
#Sample200k_4k.npz
#np.savez(sfilename,t_value=ta)
'''
model.fit(x_train, y_train,
batch_size=len(x_train),
epochs=15000,
sample_weight=weightloss,
verbose=0)
#tary = 2*(model.predict(np.column_stack((bsm_cut,anglesBSM_c)))).flatten();
tary = 2*(model.predict(bsm_sample)).flatten()
#print(np.sum(tary))
'''
#model_weight=model.get_weights()
#smtrain1 = test_statistics(model,customlossMLc)
#ta=smtrain1.t_vs_run(10000,200)
def get_tsm(Nsample):
tvalue_array = []
smtrain = test_statistics(model)
i=0
while i < Nsample:
tvalue_array.append(smtrain.train_t(800000))
i += 1
return tvalue_array
tarrayR1 = get_tsm(5)
#f = open('tbsm20kbin_Ref1kbin_R1M_08.txt','w')
f = open('tbsm20kbin_np_fixed_p_Ref1kbin_R1M_02.txt','w')
#f.write('{}'.format(model_weight))
#f.write('{}'.format(ta))
f.write('{}'.format(tarrayR1))
f.close()
|
[
"keras.regularizers.l2",
"numpy.load",
"numpy.sum",
"numpy.vectorize",
"keras.backend.reshape",
"keras.backend.exp",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"keras.backend.sum",
"numpy.append",
"numpy.histogram",
"numpy.cumsum",
"numpy.random.poisson",
"numpy.column_stack",
"keras.backend.clip",
"keras.models.Sequential",
"keras.optimizers.RMSprop"
] |
[((3180, 3211), 'numpy.load', 'np.load', (['"""Sample200k_2k_R1.npz"""'], {}), "('Sample200k_2k_R1.npz')\n", (3187, 3211), True, 'import numpy as np\n'), ((3588, 3638), 'numpy.histogram', 'np.histogram', (['Ref_sample'], {'bins': 'Nbins', 'range': '(0, 1)'}), '(Ref_sample, bins=Nbins, range=(0, 1))\n', (3600, 3638), True, 'import numpy as np\n'), ((4085, 4112), 'numpy.append', 'np.append', (['xpos', 'bsm_sample'], {}), '(xpos, bsm_sample)\n', (4094, 4112), True, 'import numpy as np\n'), ((4160, 4175), 'numpy.zeros', 'np.zeros', (['Nbins'], {}), '(Nbins)\n', (4168, 4175), True, 'import numpy as np\n'), ((4189, 4202), 'numpy.ones', 'np.ones', (['Nbsm'], {}), '(Nbsm)\n', (4196, 4202), True, 'import numpy as np\n'), ((4243, 4275), 'numpy.append', 'np.append', (['sm_target', 'bsm_target'], {}), '(sm_target, bsm_target)\n', (4252, 4275), True, 'import numpy as np\n'), ((5688, 5714), 'keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {}), '()\n', (5712, 5714), False, 'import keras\n'), ((6079, 6091), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6089, 6091), False, 'from keras.models import Sequential\n'), ((3747, 3772), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (3756, 3772), True, 'import numpy as np\n'), ((4365, 4382), 'numpy.asarray', 'np.asarray', (['wlist'], {}), '(wlist)\n', (4375, 4382), True, 'import numpy as np\n'), ((5380, 5402), 'keras.backend.reshape', 'K.reshape', (['sw', '(Nt, 1)'], {}), '(sw, (Nt, 1))\n', (5389, 5402), True, 'from keras import backend as K\n'), ((5416, 5441), 'keras.backend.reshape', 'K.reshape', (['yTrue', '(Nt, 1)'], {}), '(yTrue, (Nt, 1))\n', (5425, 5441), True, 'from keras import backend as K\n'), ((5455, 5480), 'keras.backend.reshape', 'K.reshape', (['yPred', '(Nt, 1)'], {}), '(yPred, (Nt, 1))\n', (5464, 5480), True, 'from keras import backend as K\n'), ((5920, 5946), 'keras.backend.clip', 'K.clip', (['p', '(-self.c)', 'self.c'], {}), '(p, -self.c, self.c)\n', (5926, 5946), True, 'from keras import backend as K\n'), ((8381, 8403), 'keras.backend.reshape', 'K.reshape', (['sw', '(Nt, 1)'], {}), '(sw, (Nt, 1))\n', (8390, 8403), True, 'from keras import backend as K\n'), ((8417, 8442), 'keras.backend.reshape', 'K.reshape', (['yTrue', '(Nt, 1)'], {}), '(yTrue, (Nt, 1))\n', (8426, 8442), True, 'from keras import backend as K\n'), ((8456, 8481), 'keras.backend.reshape', 'K.reshape', (['yPred', '(Nt, 1)'], {}), '(yPred, (Nt, 1))\n', (8465, 8481), True, 'from keras import backend as K\n'), ((9104, 9130), 'numpy.random.poisson', 'np.random.poisson', (['Nbsm', '(1)'], {}), '(Nbsm, 1)\n', (9121, 9130), True, 'import numpy as np\n'), ((9418, 9476), 'numpy.histogram', 'np.histogram', (['self.data_sample_t'], {'bins': 'Nbins', 'range': '(0, 1)'}), '(self.data_sample_t, bins=Nbins, range=(0, 1))\n', (9430, 9476), True, 'import numpy as np\n'), ((9583, 9597), 'numpy.ones', 'np.ones', (['Nbins'], {}), '(Nbins)\n', (9590, 9597), True, 'import numpy as np\n'), ((9666, 9694), 'numpy.append', 'np.append', (['xpos', 'self.xpos_d'], {}), '(xpos, self.xpos_d)\n', (9675, 9694), True, 'import numpy as np\n'), ((9863, 9898), 'numpy.append', 'np.append', (['sm_target', 'bsm_target_tc'], {}), '(sm_target, bsm_target_tc)\n', (9872, 9898), True, 'import numpy as np\n'), ((10680, 10692), 'numpy.sum', 'np.sum', (['tary'], {}), '(tary)\n', (10686, 10692), True, 'import numpy as np\n'), ((11225, 11251), 'numpy.random.poisson', 'np.random.poisson', (['Nbsm', '(1)'], {}), '(Nbsm, 1)\n', (11242, 11251), True, 'import numpy as np\n'), ((11267, 11300), 'numpy.append', 'np.append', (['xpos', 'self.sm_sample_t'], {}), '(xpos, self.sm_sample_t)\n', (11276, 11300), True, 'import numpy as np\n'), ((11320, 11336), 'numpy.ones', 'np.ones', (['Nsmb[0]'], {}), '(Nsmb[0])\n', (11327, 11336), True, 'import numpy as np\n'), ((11353, 11388), 'numpy.append', 'np.append', (['sm_target', 'bsm_target_tc'], {}), '(sm_target, bsm_target_tc)\n', (11362, 11388), True, 'import numpy as np\n'), ((11407, 11438), 'numpy.append', 'np.append', (['wlist', 'bsm_target_tc'], {}), '(wlist, bsm_target_tc)\n', (11416, 11438), True, 'import numpy as np\n'), ((12190, 12202), 'numpy.sum', 'np.sum', (['tary'], {}), '(tary)\n', (12196, 12202), True, 'import numpy as np\n'), ((6168, 6194), 'keras.regularizers.l2', 'keras.regularizers.l2', (['(0.0)'], {}), '(0.0)\n', (6189, 6194), False, 'import keras\n'), ((9927, 9944), 'numpy.asarray', 'np.asarray', (['wlist'], {}), '(wlist)\n', (9937, 9944), True, 'import numpy as np\n'), ((10805, 10822), 'numpy.vectorize', 'np.vectorize', (['exp'], {}), '(exp)\n', (10817, 10822), True, 'import numpy as np\n'), ((5491, 5525), 'keras.backend.sum', 'K.sum', (['(ytrue_rs * sw_rs * ypred_rs)'], {}), '(ytrue_rs * sw_rs * ypred_rs)\n', (5496, 5525), True, 'from keras import backend as K\n'), ((5661, 5673), 'keras.backend.exp', 'K.exp', (['yPred'], {}), '(yPred)\n', (5666, 5673), True, 'from keras import backend as K\n'), ((8299, 8311), 'keras.backend.exp', 'K.exp', (['yPred'], {}), '(yPred)\n', (8304, 8311), True, 'from keras import backend as K\n'), ((8492, 8526), 'keras.backend.sum', 'K.sum', (['(ytrue_rs * sw_rs * ypred_rs)'], {}), '(ytrue_rs * sw_rs * ypred_rs)\n', (8497, 8526), True, 'from keras import backend as K\n'), ((10875, 10892), 'numpy.vectorize', 'np.vectorize', (['SMn'], {}), '(SMn)\n', (10887, 10892), True, 'import numpy as np\n'), ((12009, 12021), 'numpy.sum', 'np.sum', (['tary'], {}), '(tary)\n', (12015, 12021), True, 'import numpy as np\n'), ((5246, 5259), 'keras.backend.exp', 'K.exp', (['(-yPred)'], {}), '(-yPred)\n', (5251, 5259), True, 'from keras import backend as K\n'), ((5285, 5297), 'keras.backend.exp', 'K.exp', (['yPred'], {}), '(yPred)\n', (5290, 5297), True, 'from keras import backend as K\n'), ((7946, 7959), 'numpy.sum', 'np.sum', (['tary1'], {}), '(tary1)\n', (7952, 7959), True, 'import numpy as np\n'), ((7960, 7973), 'numpy.sum', 'np.sum', (['tary2'], {}), '(tary2)\n', (7966, 7973), True, 'import numpy as np\n'), ((7974, 7987), 'numpy.sum', 'np.sum', (['tary3'], {}), '(tary3)\n', (7980, 7987), True, 'import numpy as np\n'), ((7652, 7669), 'numpy.vectorize', 'np.vectorize', (['exp'], {}), '(exp)\n', (7664, 7669), True, 'import numpy as np\n'), ((12123, 12165), 'numpy.column_stack', 'np.column_stack', (['(sm_sample_t, anglesSM_t)'], {}), '((sm_sample_t, anglesSM_t))\n', (12138, 12165), True, 'import numpy as np\n'), ((5548, 5563), 'keras.backend.exp', 'K.exp', (['ypred_rs'], {}), '(ypred_rs)\n', (5553, 5563), True, 'from keras import backend as K\n'), ((8549, 8564), 'keras.backend.exp', 'K.exp', (['ypred_rs'], {}), '(ypred_rs)\n', (8554, 8564), True, 'from keras import backend as K\n')]
|
import pytest
import numpy as np
@pytest.fixture
def color_values():
N = 16384
a = np.zeros((N,3), dtype=float)
for i in range(3):
a[:,i] = np.roll(np.linspace(0., 1., N), i)
return a
|
[
"numpy.zeros",
"numpy.linspace"
] |
[((92, 121), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {'dtype': 'float'}), '((N, 3), dtype=float)\n', (100, 121), True, 'import numpy as np\n'), ((169, 193), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'N'], {}), '(0.0, 1.0, N)\n', (180, 193), True, 'import numpy as np\n')]
|
import numpy as np
from copy import deepcopy
class Position(object):
"""
Position
Simple type for 4D space-time vector manipulation. The dimension order
in the vector is (t,x,y,z).
Access to dimensions should be only through the __getattr__ and __setattr__
methods. For example to set the x dim value to 14.0 : position.x = 14.0 .
/!\ Trying to access directly Position.data will raise a ValueError
exception as if data were not a Position attribute (see it as a strong
private attribute).
More details on __setattr__ and __getattr__ in their respective definitions.
Attributes
----------
data : numpy.array (shape=(4,)), private attribute
Hold the (t,x,y,z) values. Rational of having a vector instead of
separated t,x,y,z attributes is to be able to use numpy algebra.
However setting or getting t,x,y,z values is done as if
self.t, self.x, self.y, self.z where attributes of Position thanks
to the __setattr__ and __getattr__ methods. Example : setting the x
dimension value to 14.0 is done as such : position.x = 14.0
/!\ Trying to access directly Position.data will raise a ValueError
exception as if data were not a Position attribute (see it as a strong
private attribute).
Methods
-------
__add__(other), __sub__(other) -> Position:
Addition or subtraction of self with other (type(other) == Position).
__mult__(other) -> scalar:
Dot product of self and other
matrix multiplication, depending on type(other).
__eq__, __ne__ -> bool:
Comparison operator. Two Position are equal if and only
if norm(v1 - v2) = 0.0 . (type(other) == Position).
__getattr__(name) -> None:
Get a dimension value. (ex: xvalue = position.x, effectively call
xvalue = position.__getattr__('x'), and is equivalent to
xvalue = position.data[1]).
__setattr__(name, value) -> None:
Set a dimension value. (ex: position.x = 14.0 will effectively call
position.__setattr__('x', 14.0), and is equivalent to
position.data[1] = 14.0).
to_list() -> list(scalar):
Returns [self.t, self.x, self.y, self.z].
"""
def __init__(self, t=0.0, x=0.0, y=0.0, z=0.0):
"""
Parameters
----------
t : scalar, or Position, or list, or numpy.array .
scalar : self.t (self.data[0]) set to t.
Position : self.data is set to t.data (x,y,z ignored).
list : self.data is set to numpy.array(t) (x,y,z ignored).
numpy.array : self.data is set to t.
raise Exception if t.shape != (4,).
x,y,z : scalar
self.x (self.data[1]) set to x.
self.y (self.data[2]) set to y.
self.z (self.data[3]) set to z.
Ignored if t is not a scalar.
"""
if type(t) == list:
self.__init__(np.array(t))
elif type(t) == Position:
self.__init__(t.data)
elif type(t) == np.ndarray:
if not t.shape == (4,):
raise Exception("Position : invalid vector shape as constructor argument")
super().__setattr__('data', np.array(t))
else:
super().__setattr__('data', np.array([float(t),float(x),float(y),float(z)]))
def __repr__(self):
return "Position (t,x,y,z)"
def __str__(self):
return ('(t: ' + str(self.t) +
', x: ' + str(self.x) +
', y: ' + str(self.y) +
', z: ' + str(self.z) + ')')
def __add__(self, other):
"""Add two Position."""
return Position(self.data + other.data)
def __sub__(self, other):
"""Subtract two Position."""
return Position(self.data - other.data)
def __mul__(self, other):
"""Dot product of two Position, or matrix multiplication"""
if type(other) == Position:
return np.dot(self.data, other.data)
else:
return Position(self.data*other)
def __eq__(self, other):
"""Compare two Position. (True if norm(self - other) == 0.0)"""
return np.array_equal(self.data, other.data)
def __ne__(self, other):
"""Compare two Position. (True if norm(self - other) != 0.0)"""
return not np.array_equal(self.data, other.data)
def __getattr__(self, name):
"""Get self.{name}. name can only be 't','x','y' or 'z'"""
if name == 't':
return self.data[0]
if name == 'x':
return self.data[1]
if name == 'y':
return self.data[2]
if name == 'z':
return self.data[3]
raise ValueError("Position has no attribute '" + name + "'")
def __setattr__(self, name, value):
"""Set self.{name} to value. name can only be 't','x','y' or 'z'"""
if name == 't':
self.data[0] = value
elif name == 'x':
self.data[1] = value
elif name == 'y':
self.data[2] = value
elif name == 'z':
self.data[3] = value
else:
raise ValueError("'Position' object has no attribute '" + name + "'")
def __getstate__(self):
"""For serialization (pickling) purposes."""
return self.data
def __setstate__(self, data):
"""For serialization (unpickling) purposes."""
super().__setattr__('data', data)
def to_list(self):
"""Returns [self.t, self.x, self.y, self.z]"""
return list(self.data)
def copy(self):
return deepcopy(self)
def __copy__(self, memo):
return Position(self.t, self.x, self.y, self. z)
def __deepcopy__(self, memo):
return Position(deepcopy(self.data,memo))
|
[
"numpy.array_equal",
"numpy.dot",
"copy.deepcopy",
"numpy.array"
] |
[((4230, 4267), 'numpy.array_equal', 'np.array_equal', (['self.data', 'other.data'], {}), '(self.data, other.data)\n', (4244, 4267), True, 'import numpy as np\n'), ((5660, 5674), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (5668, 5674), False, 'from copy import deepcopy\n'), ((4023, 4052), 'numpy.dot', 'np.dot', (['self.data', 'other.data'], {}), '(self.data, other.data)\n', (4029, 4052), True, 'import numpy as np\n'), ((4390, 4427), 'numpy.array_equal', 'np.array_equal', (['self.data', 'other.data'], {}), '(self.data, other.data)\n', (4404, 4427), True, 'import numpy as np\n'), ((5824, 5849), 'copy.deepcopy', 'deepcopy', (['self.data', 'memo'], {}), '(self.data, memo)\n', (5832, 5849), False, 'from copy import deepcopy\n'), ((2987, 2998), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2995, 2998), True, 'import numpy as np\n'), ((3275, 3286), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (3283, 3286), True, 'import numpy as np\n')]
|
import sys
import numpy as np
import cv2
REMAP_INTERPOLATION = cv2.INTER_LINEAR
DEPTH_VISUALIZATION_SCALE = 2048
if len(sys.argv) != 2:
print("Syntax: {0} CALIBRATION_FILE".format(sys.argv[0]))
sys.exit(1)
calibration = np.load(sys.argv[1], allow_pickle=False)
imageSize = tuple(calibration["imageSize"])
leftMapX = calibration["leftMapX"]
leftMapY = calibration["leftMapY"]
leftROI = tuple(calibration["leftROI"])
rightMapX = calibration["rightMapX"]
rightMapY = calibration["rightMapY"]
rightROI = tuple(calibration["rightROI"])
CAMERA_WIDTH = 1280
CAMERA_HEIGHT = 720
# TODO: Use more stable identifiers
left = cv2.VideoCapture(0)
right = cv2.VideoCapture(2)
# Increase the resolution
left.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
left.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
right.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
right.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
# Use MJPEG to avoid overloading the USB 2.0 bus at this resolution
left.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
right.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
# The distortion in the left and right edges prevents a good calibration, so
# discard the edges
CROP_WIDTH = 960
def cropHorizontal(image):
return image[:,
int((CAMERA_WIDTH - CROP_WIDTH) / 2):
int(CROP_WIDTH + (CAMERA_WIDTH - CROP_WIDTH) / 2)]
# TODO: Why these values in particular?
# TODO: Try applying brightness/contrast/gamma adjustments to the images
stereoMatcher = cv2.StereoBM_create()
stereoMatcher.setMinDisparity(4)
stereoMatcher.setNumDisparities(128)
stereoMatcher.setBlockSize(21)
stereoMatcher.setROI1(leftROI)
stereoMatcher.setROI2(rightROI)
stereoMatcher.setSpeckleRange(16)
stereoMatcher.setSpeckleWindowSize(45)
# Grab both frames first, then retrieve to minimize latency between cameras
while(True):
if not left.grab() or not right.grab():
print("No more frames")
break
_, leftFrame = left.retrieve()
leftFrame = cropHorizontal(leftFrame)
leftHeight, leftWidth = leftFrame.shape[:2]
_, rightFrame = right.retrieve()
rightFrame = cropHorizontal(rightFrame)
rightHeight, rightWidth = rightFrame.shape[:2]
if (leftWidth, leftHeight) != imageSize:
print("Left camera has different size than the calibration data")
break
if (rightWidth, rightHeight) != imageSize:
print("Right camera has different size than the calibration data")
break
fixedLeft = cv2.remap(leftFrame, leftMapX, leftMapY, REMAP_INTERPOLATION)
fixedRight = cv2.remap(rightFrame, rightMapX, rightMapY, REMAP_INTERPOLATION)
grayLeft = cv2.cvtColor(fixedLeft, cv2.COLOR_BGR2GRAY)
grayRight = cv2.cvtColor(fixedRight, cv2.COLOR_BGR2GRAY)
depth = stereoMatcher.compute(grayLeft, grayRight)
cv2.imshow('left', fixedLeft)
cv2.imshow('right', fixedRight)
cv2.imshow('depth', depth / DEPTH_VISUALIZATION_SCALE)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
left.release()
right.release()
cv2.destroyAllWindows()
|
[
"cv2.StereoBM_create",
"numpy.load",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.remap",
"cv2.VideoCapture",
"cv2.destroyAllWindows",
"sys.exit"
] |
[((244, 284), 'numpy.load', 'np.load', (['sys.argv[1]'], {'allow_pickle': '(False)'}), '(sys.argv[1], allow_pickle=False)\n', (251, 284), True, 'import numpy as np\n'), ((653, 672), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (669, 672), False, 'import cv2\n'), ((682, 701), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(2)'], {}), '(2)\n', (698, 701), False, 'import cv2\n'), ((1570, 1591), 'cv2.StereoBM_create', 'cv2.StereoBM_create', ([], {}), '()\n', (1589, 1591), False, 'import cv2\n'), ((3138, 3161), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3159, 3161), False, 'import cv2\n'), ((215, 226), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (223, 226), False, 'import sys\n'), ((1039, 1070), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (1061, 1070), False, 'import cv2\n'), ((1104, 1135), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (1126, 1135), False, 'import cv2\n'), ((2585, 2646), 'cv2.remap', 'cv2.remap', (['leftFrame', 'leftMapX', 'leftMapY', 'REMAP_INTERPOLATION'], {}), '(leftFrame, leftMapX, leftMapY, REMAP_INTERPOLATION)\n', (2594, 2646), False, 'import cv2\n'), ((2665, 2729), 'cv2.remap', 'cv2.remap', (['rightFrame', 'rightMapX', 'rightMapY', 'REMAP_INTERPOLATION'], {}), '(rightFrame, rightMapX, rightMapY, REMAP_INTERPOLATION)\n', (2674, 2729), False, 'import cv2\n'), ((2748, 2791), 'cv2.cvtColor', 'cv2.cvtColor', (['fixedLeft', 'cv2.COLOR_BGR2GRAY'], {}), '(fixedLeft, cv2.COLOR_BGR2GRAY)\n', (2760, 2791), False, 'import cv2\n'), ((2809, 2853), 'cv2.cvtColor', 'cv2.cvtColor', (['fixedRight', 'cv2.COLOR_BGR2GRAY'], {}), '(fixedRight, cv2.COLOR_BGR2GRAY)\n', (2821, 2853), False, 'import cv2\n'), ((2917, 2946), 'cv2.imshow', 'cv2.imshow', (['"""left"""', 'fixedLeft'], {}), "('left', fixedLeft)\n", (2927, 2946), False, 'import cv2\n'), ((2952, 2983), 'cv2.imshow', 'cv2.imshow', (['"""right"""', 'fixedRight'], {}), "('right', fixedRight)\n", (2962, 2983), False, 'import cv2\n'), ((2989, 3043), 'cv2.imshow', 'cv2.imshow', (['"""depth"""', '(depth / DEPTH_VISUALIZATION_SCALE)'], {}), "('depth', depth / DEPTH_VISUALIZATION_SCALE)\n", (2999, 3043), False, 'import cv2\n'), ((3052, 3066), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3063, 3066), False, 'import cv2\n')]
|
import re
import torch
import itertools
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from typing import Callable, Tuple, List, Iterable, Dict, Union, Optional, Iterable
from collections import OrderedDict
from pathlib import Path
from tqdm import tqdm
from src.utils import FileHandler, mask2geojson, mask2mat, label_sem_map
from src.patching import TilerStitcherTorch
from src.metrics import Benchmarker
from src.dl.utils import tensor_to_ndarray
from .post_processing.processor_builder import PostProcBuilder
from .predictor import Predictor
SUFFIXES = (".jpeg", ".jpg", ".tif", ".tiff", ".png")
class FolderDataset(Dataset, FileHandler):
def __init__(
self,
folder_path: Union[str, Path],
pattern: Optional[str]="*",
sort_by_y: Optional[bool]=False,
xmax: Optional[int]=None,
ymax: Optional[int]=None,
auto_range: bool=False,
tile_size: Optional[Tuple[int, int]]=(1000, 1000)
) -> None:
"""
Simple pytorch folder dataset. Assumes that
folder_path contains only image files which are readable
by cv2.
Args:
----------
folder_path (Union[str, Path]):
path to the folder containig tile/image files
pattern (str, optional, default="*"):
file pattern for filtering only the files that contain
the pattern.
sort_by_y (bool, optional, default=False):
sorts a folder (containing tiles extracted by histoprep
package) by the y-coord rather than the x-coord
xmax (int, optional, default=None):
filters all the tile-files that contain x-coord less
or equal to this param in their filename. Works with
tiles extracted with histoprep.
See https://github.com/jopo666/HistoPrep
ymax (int, optional, default=None):
filters all the tile-files that contain y-coord less
or equal to this param in their filename. Works with
tiles extracted with histoprep.
See https://github.com/jopo666/HistoPrep
auto_range (bool, default=False):
Automatically filter tiles that contain ONE tissue
section rather than every redundant tissue section in
the wsi.
tile_size (Tuple[int, int], optional, default=(1000, 1000)):
size of the input tiles in the folder. Optional.
"""
super(FolderDataset, self).__init__()
self.tile_size = tile_size
folder_path = Path(folder_path)
assert folder_path.exists(), f"folder: {folder_path} does not exist"
assert folder_path.is_dir(), f"path: {folder_path} is not a folder"
assert all([f.suffix in SUFFIXES for f in folder_path.iterdir()]),(
f"files formats in given folder need to be in {SUFFIXES}"
)
# sort files
if sort_by_y:
self.fnames = sorted(
folder_path.glob(pattern),
key=lambda x: self._get_xy_coords(x.name)[1]
)
else:
self.fnames = sorted(folder_path.glob(pattern))
# filter by xy-cooridnates encoded in the filename
if xmax is not None:
self.fnames = [
f for f in self.fnames
if self._get_xy_coords(f.name)[0] <= xmax
]
if ymax is not None and not auto_range:
self.fnames = [
f for f in self.fnames
if self._get_xy_coords(f.name)[1] <= ymax
]
if auto_range:
ymin, ymax = self._get_auto_range(coord="y") # only y-axis for now
self.fnames = [
f for f in self.fnames
if ymin <= self._get_xy_coords(f.name)[1] <= ymax
]
def _get_xy_coords(self, fname: str) -> List[int]:
"""
Extract xy-coords from files named with x- and y- coordinates
in their file name.
example filename: "sumthing_4955_x-47000_y-25000.png
"""
assert re.findall(r"(x-\d+_y-\d+)", fname), (
"fname not in 'sumthing_x-[coord1]_y-[coord2]'-format",
"Set auto_range to False if filenames are not in this format"
)
xy_str = re.findall(r"(x-\d+_y-\d+)", fname)
xy = [int(c) for c in re.findall(r"\d+", xy_str[0])]
return xy
def _get_auto_range(
self,
coord: str="y",
section_ix: int=0,
section_length: int=6000
) -> Tuple[int, int]:
"""
Automatically extract a range of tiles that contain a section
of tissue in a whole slide image. This is pretty ad hoc
and requires histoprep extracted tiles and that the slides
contain many tissue sections. Use with care.
Args:
---------
coord (str, default="y"):
specify the range in either x- or y direction
section_ix (int, default=0):
the nth tissue section in the wsi in the direction of
the `coord` param. Starts from 0th index. E.g. If
`coord='y'` the 0th index is the upmost tissue section.
section_length (int, default=6000):
Threshold to concentrate only on tissue sections that
are larger than 6000 pixels
Returns:
--------
Tuple[int, int]: The start and end point of the tissue
section in the specified direction
"""
ix = 1 if coord == "y" else 0
coords = sorted(
set([self._get_xy_coords(f.name)[ix] for f in self.fnames])
)
try:
splits = []
split = []
for i in range(len(coords)-1):
if coords[i + 1] - coords[i] == self.tile_size[ix]:
split.append(coords[i])
else:
if i < len(coords) - 1:
split.append(coords[i])
splits.append(split)
split = []
ret_splits = [
split for split in splits
if len(split) >= section_length//self.tile_size[ix]
]
ret_split = ret_splits[section_ix]
return ret_split[0], ret_split[-1]
except:
# if there is only one tissue section, return min and max
start = min(coords, key=lambda x: x[ix])[ix]
end = max(coords, key=lambda x: x[ix])[ix]
return start, end
def __len__(self) -> int:
return len(self.fnames)
def __getitem__(self, index: int) -> torch.Tensor:
fn = self.fnames[index]
im = FileHandler.read_img(fn.as_posix())
im = torch.from_numpy(im.transpose(2, 0, 1))
return {
"im":im,
"file":fn.name[:-4]
}
class Inferer(FileHandler):
def __init__(
self,
model: pl.LightningModule,
in_data_dir: str,
gt_mask_dir: str=None,
tta: bool=False,
model_weights: str="last",
loader_batch_size: int=8,
loader_num_workers: int=8,
patch_size: Tuple[int, int]=(256, 256),
stride_size: int=128,
model_batch_size: int=None,
thresh_method: str="naive",
thresh: float=0.5,
apply_weights: bool=False,
post_proc_method: str=None,
n_images: int=None,
fn_pattern: str="*",
xmax: Optional[int]=None,
ymax: Optional[int]=None,
auto_range: Optional[bool]=False,
**kwargs
) -> None:
"""
Class to perform inference and post-processing
Args:
-----------
model (pl.LightningModule):
Input SegModel (lightning model)
in_data_dir (str):
This directory will be used as the input data directory.
Assumes that the directory contains only cv2 readable
image files: .png, .tif, etc
gt_mask_dir (str, default=None):
The directory of the test ground truth masks. Needed for
benchmarking only. The GT-masks need to be in .mat files
tta (bool, default=False):
If True, performs test time augmentation. Inference time
goes up with often marginal performance improvements.
model_weights (str, default="last"):
pytorch lightning saves the weights of the model for the
last epoch and best epoch (based on validation data).
One of ("best", "last").
loader_batch_size (int, default=8):
Number of images loaded from the input folder by the
workers per dataloader iteration. This is the DataLoader
batch size, NOT the batch size that is used during the
forward pass of the model.
loader_num_workers (int, default=8):
Number of threads/workers for torch dataloader
patch_size (Tuple[int, int], default=(256, 256)):
The size of the input patches that are fed to the
segmentation model.
stride_size (int, default=128):
If input images are larger than the model input image
size (patch_size), the images are tiled with a sliding
window into small patches with overlap. This param is
the stride size used in the sliding window operation.
Small stride for the sliding window results in less
artefacts and checkerboard effect in the resulting
prediction when the patches are stitched back to the
input image size. On the other hand small stride_size
means more patches and larger number of patches leads to
slower inference time and larger memory consumption.
model_batch_size (int, default=None):
The batch size that is used when the input is fed to the
model (actual model batch size). Use if the input images
need patching and the batch size for training batch size
is too large i.e. you get (cuda out of memmory error).
thresh_method (str, default="naive"):
Thresholding method for the soft masks from the instance
branch.One of ("naive", "argmax", "sauvola", "niblack").
thresh (float, default = 0.5):
threshold probability value. Only used if method="naive"
apply_weights (bool, default=True):
After a prediction, apply a weight matrix that assigns
bigger weight on pixels in center and less weight to
pixels on prediction boundaries. helps dealing with
prediction artefacts on tile/patch boundaries. NOTE:
This is only applied at the auxiliary branch prediction
since there tiling effect has the most impact.
(Especially, in HoVer-maps)
post_proc_method (str, default=None):
Defines the post-processing pipeline. If this is None,
then the post-processing pipeline is defined by the
aux_type of the model. If the aux_type of the model is
None, then the basic watershed post-processing pipeline
is used. The post-processing method is always specific
to the auxiliary maps that the model outputs so if the
aux_type == "hover", then the HoVer-Net and CellPose
pipelines can be used. One of: None, "hover","cellpose",
"drfns", "dcan", "dran".
n_images (int, default=None):
Number of images inferred before clearing the memory.
Useful if there is a large number of images in a folder.
The segmentation results are saved after n_images are
segmented and memory cleared for a new set of images.
fn_pattern (str, default="**):
A pattern in file names in the in_data_dir. For example,
for pannuke dataset you can run inference for only
images of specific tissue e.g. pattern = *_Adrenal_*.
xmax (int, optional, default=None):
Filters all the file names in the input directory that
contain x-coordinate less than this param in their
filename. I.e. the tiles in the folder need to contain
the x- and y- coordinates (in xy- order) in the filename
Example tile filename: "x-45000_y-50000.png".
ymax (int, optional, default=None):
Filters all the file names in the input directory that
contain y-coord less than this param in their filename.
I.e. the tiles in the folder need to contain the x- and
y- coords (in xy- order) in the filename. Example tile
filename: "x-45000_y-50000.png".
auto_range (bool, optional, default=False):
Automatically filter tiles from a folder to contain
only ONE tissue section rather than every redundant
tissue section in the wsi. The tiles in the folder need
to contain the x- and y-coords (in xy- order) in the
filename. Example tile filename: "x-45000_y-50000.png".
"""
assert isinstance(model, pl.LightningModule), (
"Input model needs to be a lightning model"
)
assert stride_size <= patch_size[0], (
f"stride_size: {stride_size} > {patch_size[0]}"
)
assert model_weights in ("best", "last")
# set model to device and to inference mode
self.model = model
self.model.cuda() if torch.cuda.is_available() else self.model.cpu()
self.model.eval()
torch.no_grad()
# Load trained weights for the model
self.exp_name = self.model.experiment_name
self.exp_version = self.model.experiment_version
ckpt_path = self.get_model_checkpoint(
experiment=self.exp_name,
version=self.exp_version,
which=model_weights
)
checkpoint = torch.load(
ckpt_path, map_location = lambda storage, loc: storage
)
self.model.load_state_dict(
checkpoint['state_dict'],
strict=False
)
self.patch_size = patch_size
self.stride_size = stride_size
# Set input data folder
self.in_data_dir = in_data_dir
# Set num images inferred before clearing mem (chunk size)
# By default there is no chunking.
self.n_images = len(list(Path(self.in_data_dir).iterdir()))
if n_images is not None:
self.n_images = n_images
# set gt mask folder
self.gt_mask_dir = None
if gt_mask_dir:
self.gt_mask_dir = sorted(
Path(gt_mask_dir).glob(fn_pattern)
)
# Batch sizes
self.model_batch_size = model_batch_size
self.loader_batch_size = loader_batch_size
# Set dataset dataloader
self.folderset = FolderDataset(
self.in_data_dir,
pattern=fn_pattern,
xmax=xmax,
ymax=ymax,
auto_range=auto_range
)
self.dataloader = DataLoader(
self.folderset,
batch_size=loader_batch_size,
shuffle=False, pin_memory=True,
num_workers=loader_num_workers
)
# set apply weights flag for aux branch and prdeictor class
self.apply_weights = apply_weights
self.predictor = Predictor(self.model, self.patch_size)
# set the post-processing pipeline. Defaults to
# model.aux_type if model has an auxiliary branch
self.post_proc_method = post_proc_method
if self.post_proc_method is None:
self.post_proc_method = "basic"
if self.model.aux_branch:
self.post_proc_method = self.model.aux_type
# Quick checks that a valid post-proc-method is used
msg = (
"post_proc_method does not match to model config. ",
f"set to: {self.post_proc_method} while the model ",
f"decoder_aux_branch is: {self.model.decoder_aux_branch}"
)
if self.model.decoder_aux_branch:
if self.model.decoder_aux_branch == "hover":
allowed = ("hover", "cellpose", "basic")
elif self.model.decoder_aux_branch == "dist":
allowed = ("drfns", "basic")
elif self.model.decoder_aux_branch == "contour":
allowed = ("dcan", "dran", "basic")
assert self.post_proc_method in allowed, msg
# init the post-processor
self.post_processor = PostProcBuilder.set_postprocessor(
post_proc_method=self.post_proc_method,
thresh_method=thresh_method,
thresh=thresh
)
# input norm flag and train data stats
self.norm = self.model.normalize_input
# self.stats = self.get_dataset_stats(
# self.model.train_data.as_posix()
# )
def _apply(
self,
var: Union[torch.Tensor, None],
op: Callable,
**kwargs
) -> Union[torch.Tensor, None]:
"""
Applies the given torch operation `op` to the given variable
`var`. This exists to catch memory errors if you're wondering
why...
Basically, if some cumulative torch operation overflows the GPU
memory, this catches the error, detaches the input tensor from
gpu and continues executing the operation on the cpu side. If
the `var` is None or an empty list/string etc. then this returns
None for convenience.
Args:
--------
var: (torch.Tensor or None):
The torch.Tensor or list of tensors that should be
detached and moved to cpu before applying operations
op (Callable):
the torch function/callable that causes the mem error
Returns:
--------
torch.Tensor or None: the ouput tensor or None
"""
if not isinstance(var, torch.Tensor) and not var:
return None
try:
var = op(var, **kwargs)
except RuntimeError as e:
if 'out of memory' in str(e):
if isinstance(var, list):
new_var = []
for elem in var:
elem = elem.detach()
if elem.is_cuda:
elem = elem.cpu()
new_var.append(elem)
elif isinstance(var, torch.Tensor):
var = var.detach()
if var.is_cuda:
var = var.cpu()
new_var = var
var = op(new_var, **kwargs)
return var
def _get_batch(
self,
patches: torch.Tensor,
batch_size: int
) -> torch.Tensor:
"""
Divide a set of patches into batches of patches
Args:
---------
patches (torch.Tensor):
Batch of patches in. Shape (C, num_patches, pH, pW)
batch_size (int):
size of the batch
Yields:
---------
torch.Tensor of shape (batch_size, C, H, W)
"""
for i in range(0, patches.shape[1], batch_size):
batch = patches[:, i:i+batch_size, ...].permute(1, 0, 2, 3)
yield batch
def _predict_batch(
self,
batch: torch.Tensor
) -> Tuple[Union[torch.Tensor, None]]:
"""
Forward pass + classify. Handles missing branches in the model.
Args:
---------
batch (torch.Tensor):
A batch of patches. Shape (B, C, patch_size, patch_size)
Returns:
---------
A tuple of tensors containing the predictions. If network
does no contain aux or type branch the predictions are None
"""
# TODO: tta
# pred = self.predictor.forward_pass(batch, norm=self.norm, mean=self.stats[0], std=self.stats[1])
pred = self.predictor.forward_pass(batch, norm=self.norm)
insts = self.predictor.classify(pred["instances"], act="softmax")
types = None
if pred["types"] is not None:
types = self.predictor.classify(pred["types"], act="softmax")
aux = None
if pred["aux"] is not None:
aux = self.predictor.classify(
pred["aux"], act=None, apply_weights=self.apply_weights
)
sem = None
if pred["sem"] is not None:
sem = self.predictor.classify(
pred["sem"], act="softmax", apply_weights=False
)
return insts, types, aux, sem
def _infer_large_img_batch(
self,
batch: torch.Tensor,
names: Tuple[str],
batch_loader: Iterable=None,
) -> Tuple[Iterable[Tuple[str, np.ndarray]]]:
"""
Run inference on large images that require tiling and back
stitching. I.e. For images larger than the model input size.
Args:
--------
batch (torch.Tensor):
A batch of patches. Shape (B, C, patch_size, patch_size)
names (Tuple[str]):
filenames of the different images (without the suffices)
batch_loader (Iterable, default=None):
tqdm loader object
Returns:
--------
Tuple: of Zip objects containing (name, np.ndarray) pairs
"""
# Tile the image into patches
tilertorch = TilerStitcherTorch(
batch.shape,
self.patch_size,
self.stride_size,
padding=True
)
patches = tilertorch.extract_patches_from_batched(batch)
# (for tqdm logging)
n_batches_inferred = 0
n_patches_total = patches.shape[2]*self.loader_batch_size
batch_instances = []
batch_types = []
batch_aux = []
batch_sem = []
# model batch size
batch_size = self.model.batch_size
if self.model_batch_size is not None:
batch_size = self.model_batch_size
# Loop the B in batched patches (B, C, n_patches, patch_h, patch_w)
for j in range(patches.shape[0]):
pred_inst = []
pred_type = []
pred_aux = []
pred_sem = []
# Divide patches into batches that can be used as input to model
for batch in self._get_batch(patches[j, ...], batch_size):
insts, types, aux, sem = self._predict_batch(batch)
pred_inst.append(insts)
pred_type.append(types) if types is not None else None
pred_aux.append(aux) if aux is not None else None
pred_sem.append(sem) if sem is not None else None
n_batches_inferred += batch.shape[0]
if batch_loader is not None:
batch_loader.set_postfix(
patches=f"{n_batches_inferred}/{n_patches_total}"
)
# catch runtime error if preds take too much GPU mem and
# move to cpu with the _apply method.
pred_inst = self._apply(var=pred_inst, op=torch.cat, dim=0)
pred_type = self._apply(var=pred_type, op=torch.cat, dim=0)
pred_aux = self._apply(var=pred_aux, op=torch.cat, dim=0)
pred_sem = self._apply(var=pred_sem, op=torch.cat, dim=0)
batch_instances.append(pred_inst)
batch_types.append(pred_type)
batch_aux.append(pred_aux)
batch_sem.append(pred_sem)
# Stitch the patches back to the orig img size
insts = torch.stack(batch_instances, dim=0).permute(0, 2, 1, 3, 4)
insts = self._apply(insts, tilertorch.stitch_batched_patches)
insts = zip(names, tensor_to_ndarray(insts))
types = zip(names, [None]*len(names))
if all(e is not None for e in batch_types):
types = torch.stack(batch_types, dim=0).permute(0, 2, 1, 3, 4)
types = self._apply(types, tilertorch.stitch_batched_patches)
types = zip(names, tensor_to_ndarray(types))
aux = zip(names, [None]*len(names))
if all(e is not None for e in batch_aux):
aux = torch.stack(batch_aux, dim=0).permute(0, 2, 1, 3, 4)
aux = self._apply(aux, tilertorch.stitch_batched_patches)
aux = zip(names, tensor_to_ndarray(aux))
sem = zip(names, [None]*len(names))
if all(e is not None for e in batch_sem):
sem = torch.stack(batch_sem, dim=0).permute(0, 2, 1, 3, 4)
sem = self._apply(sem, tilertorch.stitch_batched_patches)
sem = zip(names, tensor_to_ndarray(sem))
return insts, types, aux, sem
def _infer_img_batch(
self,
batch: Tuple[torch.Tensor],
names: Tuple[str],
batch_loader: Iterable=None
) -> Tuple[Iterable[Tuple[str, np.ndarray]]]:
"""
Run inference on a batch of images that do not require tiling
and stitching. I.e. For images of the same size as the model
input size.
Args:
--------
batch (torch.Tensor):
A batch of patches. Shape (B, C, patch_size, patch_size)
names (Tuple[str]):
filenames of the different images (without the suffices)
batch_loader (Iterable, default=None):
tqdm loader object
Returns:
--------
Tuple: of Zip objects containing (name, np.ndarray) pairs
"""
n_batches_inferred = 0
pred_insts, pred_types, pred_aux, pred_sem = self._predict_batch(batch)
insts = zip(names, tensor_to_ndarray(pred_insts))
types = zip(names, [None]*len(names))
if pred_types is not None:
types = zip(names, tensor_to_ndarray(pred_types))
aux = zip(names, [None]*len(names))
if pred_aux is not None:
aux = zip(names, tensor_to_ndarray(pred_aux))
sem = zip(names, [None]*len(names))
if pred_sem is not None:
sem = zip(names, tensor_to_ndarray(pred_sem))
n_batches_inferred += batch.shape[0]
if batch_loader is not None:
batch_loader.set_postfix(
patches=f"{n_batches_inferred}/{len(self.folderset.fnames)}"
)
return insts, types, aux, sem
def _chunks(self, iterable: Iterable, size: int) -> Iterable:
"""
Generate adjacent chunks of an iterable
This is used to chunk the folder dataset for lower mem footprint
Args:
---------
iterable (Iterable):
Input iterable (FolderDataset)
size (int):
size of one chunk.
Returns:
---------
Iterable chunk of filenames
"""
it = iter(iterable)
return iter(lambda: tuple(itertools.islice(it, size)), ())
def _infer(self, chunked_dataloader: Iterable) -> None:
"""
Run inference on input images.
Args:
---------
chunked_dataloader (Iterable, default=None):
A chunked dataloader object
"""
# Start pipeline
soft_instances = []
soft_types = []
aux_maps = []
soft_areas = []
with tqdm(chunked_dataloader, unit="batch") as batch_loader:
for data in batch_loader:
# Get data
batch = data["im"]
names = data["file"]
batch_loader.set_description(f"Running inference for {names}")
# Set patching flag (most datasets require patching),
# Assumes square patches
requires_patching = False
if batch.shape[-1] > self.patch_size[0]:
requires_patching = True
# predict soft maps
if requires_patching:
insts, types, aux, sem = self._infer_large_img_batch(
batch, names, batch_loader
)
else:
insts, types, aux, sem = self._infer_img_batch(
batch, names, batch_loader
)
soft_instances.extend(insts)
soft_types.extend(types)
aux_maps.extend(aux)
soft_areas.extend(sem)
# save intermediate results to mem if save_dir not specified
self.soft_insts = OrderedDict(soft_instances)
self.soft_types = OrderedDict(soft_types)
self.aux_maps = OrderedDict(aux_maps)
self.soft_areas = OrderedDict(soft_areas)
def _post_process(self):
"""
Run the post processing pipeline
"""
assert "soft_insts" in self.__dict__.keys(), (
"No predictions found, run inference first."
)
maps = self.post_processor.run_post_processing(
inst_probs=self.soft_insts,
type_probs=self.soft_types,
sem_probs=self.soft_areas,
aux_maps=self.aux_maps,
)
# save to containers
self.inst_maps = OrderedDict()
self.type_maps = OrderedDict()
self.sem_maps = OrderedDict()
for res in maps:
name = res[0]
self.inst_maps[name] = res[1].astype("int32")
tmap = None
if self.model.decoder_type_branch:
tmap = res[2].astype("int32")
smap = None
if self.model.decoder_sem_branch:
smap = res[3].astype("int32")
self.type_maps[name] = tmap
self.sem_maps[name] = smap
def run_inference(
self,
save_dir: Union[Path, str]=None,
fformat: str=None,
offsets: bool=False,
classes_type: Dict[str, int]=None,
classes_sem: Dict[str, int]=None,
) -> None:
"""
Run inference and post processing in chunks
Args:
---------
save_dir (Path or str, default=None):
directory where the .mat/geojson files are saved
fformat (str, default="geojson")
file format for the masks. One of ".mat, "geojson", None
offsets (bool, default=False):
If True, geojson coords are shifted by the offsets that
are encoded in the filenames (e.g. "x-1000_y-4000.png")
classes_type (Dict[str, int], default=None):
class dict for the cell types.
e.g. {"inflam":1, "epithelial":2, "connec":3}
This is required if masks are saved to geojson.
classes_sem (Dict[str, int], default=None):
class dict for the area types.
e.g. {"inflam":1, "epithelial":2, "connec":3}
This is required if masks are saved to geojson.
"""
# assertions before lengthy processing
if save_dir is not None:
save_dir = Path(save_dir)
assert save_dir.exists(), f"{save_dir} not found"
assert fformat in ("geojson", ".mat", None)
if fformat == "geojson":
assert classes_type is not None, (
"cell type classes needed for geojson format."
)
if self.model.decoder_sem_branch:
assert classes_sem is not None, (
"area classes needed for geojson format."
)
n_images_real = int(np.ceil(self.n_images / self.loader_batch_size))
n_chunks = int(np.ceil(len(self.folderset.fnames) / self.n_images))
loader = self._chunks(iterable=self.dataloader, size=n_images_real)
with torch.no_grad():
for _ in range(n_chunks):
self._infer(next(loader))
self._post_process()
# save results to files
if save_dir is not None:
for name, inst_map in self.inst_maps.items():
if fformat == "geojson":
# parse the offset coords from the inst key
x_off, y_off = (
int(c) for c in re.findall(r"\d+", name)
) if offsets else (0, 0)
mask2geojson(
inst_map=inst_map,
type_map=self.type_maps[name],
fname=f"{name}_cells",
save_dir=Path(save_dir / "cells"),
x_offset=x_off,
y_offset=y_off,
classes=classes_type
)
if self.model.decoder_sem_branch:
mask2geojson(
inst_map=label_sem_map(self.sem_maps[name]),
type_map=self.sem_maps[name],
fname=f"{name}_areas",
save_dir=Path(save_dir / "areas"),
x_offset=x_off,
y_offset=y_off,
classes=classes_sem
)
elif fformat == ".mat":
# TODO add sem classes
mask2mat(
inst_map=inst_map.astype("int32"),
type_map=self.type_maps[name].astype("int32"),
fname=name,
save_dir=save_dir
)
# clear memory
self.soft_insts.clear()
self.soft_types.clear()
self.aux_maps.clear()
self.inst_maps.clear()
self.type_maps.clear()
torch.cuda.empty_cache()
def benchmark_insts(
self,
pattern_list: Optional[List[str]]=None,
file_prefix: Optional[str]=""
) -> pd.DataFrame:
"""
Run benchmarikng metrics for only instance maps and save them
into a csv file. The file is written into the "results"
directory of the repositoy. This requires that the `gt_mask_dir`
arg is given
Args:
---------
pattern_list (List[str], optional, default=None):
A list of string patterns used for filtering files in
the input data folder
file_prefix (str, optional, default=""):
prefix to give to the csv filename that contains the
benchmarking results
Returns:
----------
pd.DataFrame: a df containing the benchmarking results
"""
assert self.gt_mask_dir is not None, "gt_mask_dir is None"
assert self.gt_mask_dir.exists(), "Gt mask dir not found"
assert "inst_maps" in self.__dict__.keys(), (
"No instance maps found, run inference first."
)
gt_masks = OrderedDict(
[
(f.name[:-4], self.read_mask(f, "inst_map"))
for f in self.gt_mask_dir
]
)
exp_dir = self.get_experiment_dir(self.exp_name, self.exp_version)
bm = Benchmarker()
scores = bm.benchmark_insts(
inst_maps=self.inst_maps,
gt_masks=gt_masks,
pattern_list=pattern_list,
save_dir=exp_dir,
prefix=file_prefix
)
return scores
def benchmark_types(
self,
classes: Dict[str, int],
pattern_list: Optional[List[str]]=None,
file_prefix: Optional[str]=""
) -> pd.DataFrame:
"""
Run benchmarking for inst_maps & type maps and save them into a
csv file. The file is written into the "results" directory of
the repositoy. This requires that the `gt_mask_dir` arg is given
Args:
---------
classes (Dict[str, int]):
The class dict e.g. {bg: 0, immune: 1, epithel: 2}.
background must be the 0 class
pattern_list (List[str], optional, default=None):
A list of string patterns used for filtering files in
the input data folder
file_prefix (str, optional, default=""):
prefix to give to the csv filename that contains the
benchmarking results
Returns:
----------
pd.DataFrame: A df containing the benchmarking results
"""
assert self.gt_mask_dir is not None, "gt_mask_dir is None"
assert self.gt_mask_dir.exists(), "Gt mask dir not found"
assert "inst_maps" in self.__dict__.keys(), (
"No instance maps found, run inference first"
)
assert "type_maps" in self.__dict__.keys(), (
"No type maps found, run inference first."
)
assert self.model.decoder_type_branch, (
"the network model does not contain type branch"
)
gt_mask_insts = OrderedDict(
[
(f.name[:-4], FileHandler.read_mask(f, "inst_map"))
for f in self.gt_mask_dir
]
)
gt_mask_types = OrderedDict(
[
(f.name[:-4], FileHandler.read_mask(f, "type_map"))
for f in self.gt_mask_dir
]
)
exp_dir = self.get_experiment_dir(self.exp_name, self.exp_version)
bm = Benchmarker()
scores = bm.benchmark_per_type(
inst_maps=self.inst_maps,
type_maps=self.type_maps,
gt_mask_insts=gt_mask_insts,
gt_mask_types=gt_mask_types,
pattern_list=pattern_list,
classes=classes,
save_dir=exp_dir,
prefix=file_prefix
)
return scores
|
[
"tqdm.tqdm",
"numpy.ceil",
"torch.utils.data.DataLoader",
"torch.stack",
"src.dl.utils.tensor_to_ndarray",
"src.utils.label_sem_map",
"torch.load",
"src.patching.TilerStitcherTorch",
"pathlib.Path",
"re.findall",
"src.metrics.Benchmarker",
"torch.cuda.is_available",
"itertools.islice",
"torch.cuda.empty_cache",
"collections.OrderedDict",
"src.utils.FileHandler.read_mask",
"torch.no_grad"
] |
[((2724, 2741), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (2728, 2741), False, 'from pathlib import Path\n'), ((4264, 4300), 're.findall', 're.findall', (['"""(x-\\\\d+_y-\\\\d+)"""', 'fname'], {}), "('(x-\\\\d+_y-\\\\d+)', fname)\n", (4274, 4300), False, 'import re\n'), ((4481, 4517), 're.findall', 're.findall', (['"""(x-\\\\d+_y-\\\\d+)"""', 'fname'], {}), "('(x-\\\\d+_y-\\\\d+)', fname)\n", (4491, 4517), False, 'import re\n'), ((14287, 14302), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14300, 14302), False, 'import torch\n'), ((14644, 14708), 'torch.load', 'torch.load', (['ckpt_path'], {'map_location': '(lambda storage, loc: storage)'}), '(ckpt_path, map_location=lambda storage, loc: storage)\n', (14654, 14708), False, 'import torch\n'), ((15820, 15944), 'torch.utils.data.DataLoader', 'DataLoader', (['self.folderset'], {'batch_size': 'loader_batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': 'loader_num_workers'}), '(self.folderset, batch_size=loader_batch_size, shuffle=False,\n pin_memory=True, num_workers=loader_num_workers)\n', (15830, 15944), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((22400, 22485), 'src.patching.TilerStitcherTorch', 'TilerStitcherTorch', (['batch.shape', 'self.patch_size', 'self.stride_size'], {'padding': '(True)'}), '(batch.shape, self.patch_size, self.stride_size, padding=True\n )\n', (22418, 22485), False, 'from src.patching import TilerStitcherTorch\n'), ((29539, 29566), 'collections.OrderedDict', 'OrderedDict', (['soft_instances'], {}), '(soft_instances)\n', (29550, 29566), False, 'from collections import OrderedDict\n'), ((29593, 29616), 'collections.OrderedDict', 'OrderedDict', (['soft_types'], {}), '(soft_types)\n', (29604, 29616), False, 'from collections import OrderedDict\n'), ((29641, 29662), 'collections.OrderedDict', 'OrderedDict', (['aux_maps'], {}), '(aux_maps)\n', (29652, 29662), False, 'from collections import OrderedDict\n'), ((29689, 29712), 'collections.OrderedDict', 'OrderedDict', (['soft_areas'], {}), '(soft_areas)\n', (29700, 29712), False, 'from collections import OrderedDict\n'), ((30207, 30220), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30218, 30220), False, 'from collections import OrderedDict\n'), ((30246, 30259), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30257, 30259), False, 'from collections import OrderedDict\n'), ((30284, 30297), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30295, 30297), False, 'from collections import OrderedDict\n'), ((36581, 36594), 'src.metrics.Benchmarker', 'Benchmarker', ([], {}), '()\n', (36592, 36594), False, 'from src.metrics import Benchmarker\n'), ((38855, 38868), 'src.metrics.Benchmarker', 'Benchmarker', ([], {}), '()\n', (38866, 38868), False, 'from src.metrics import Benchmarker\n'), ((14205, 14230), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14228, 14230), False, 'import torch\n'), ((24756, 24780), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['insts'], {}), '(insts)\n', (24773, 24780), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((26674, 26703), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['pred_insts'], {}), '(pred_insts)\n', (26691, 26703), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((28348, 28386), 'tqdm.tqdm', 'tqdm', (['chunked_dataloader'], {'unit': '"""batch"""'}), "(chunked_dataloader, unit='batch')\n", (28352, 28386), False, 'from tqdm import tqdm\n'), ((32080, 32094), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (32084, 32094), False, 'from pathlib import Path\n'), ((32608, 32655), 'numpy.ceil', 'np.ceil', (['(self.n_images / self.loader_batch_size)'], {}), '(self.n_images / self.loader_batch_size)\n', (32615, 32655), True, 'import numpy as np\n'), ((32823, 32838), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (32836, 32838), False, 'import torch\n'), ((4547, 4576), 're.findall', 're.findall', (['"""\\\\d+"""', 'xy_str[0]'], {}), "('\\\\d+', xy_str[0])\n", (4557, 4576), False, 'import re\n'), ((24600, 24635), 'torch.stack', 'torch.stack', (['batch_instances'], {'dim': '(0)'}), '(batch_instances, dim=0)\n', (24611, 24635), False, 'import torch\n'), ((25061, 25085), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['types'], {}), '(types)\n', (25078, 25085), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((25352, 25374), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['aux'], {}), '(aux)\n', (25369, 25374), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((25641, 25663), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['sem'], {}), '(sem)\n', (25658, 25663), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((26818, 26847), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['pred_types'], {}), '(pred_types)\n', (26835, 26847), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((26956, 26983), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['pred_aux'], {}), '(pred_aux)\n', (26973, 26983), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((27092, 27119), 'src.dl.utils.tensor_to_ndarray', 'tensor_to_ndarray', (['pred_sem'], {}), '(pred_sem)\n', (27109, 27119), False, 'from src.dl.utils import tensor_to_ndarray\n'), ((24901, 24932), 'torch.stack', 'torch.stack', (['batch_types'], {'dim': '(0)'}), '(batch_types, dim=0)\n', (24912, 24932), False, 'import torch\n'), ((25200, 25229), 'torch.stack', 'torch.stack', (['batch_aux'], {'dim': '(0)'}), '(batch_aux, dim=0)\n', (25211, 25229), False, 'import torch\n'), ((25489, 25518), 'torch.stack', 'torch.stack', (['batch_sem'], {'dim': '(0)'}), '(batch_sem, dim=0)\n', (25500, 25518), False, 'import torch\n'), ((27920, 27946), 'itertools.islice', 'itertools.islice', (['it', 'size'], {}), '(it, size)\n', (27936, 27946), False, 'import itertools\n'), ((35148, 35172), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (35170, 35172), False, 'import torch\n'), ((38475, 38511), 'src.utils.FileHandler.read_mask', 'FileHandler.read_mask', (['f', '"""inst_map"""'], {}), "(f, 'inst_map')\n", (38496, 38511), False, 'from src.utils import FileHandler, mask2geojson, mask2mat, label_sem_map\n'), ((38661, 38697), 'src.utils.FileHandler.read_mask', 'FileHandler.read_mask', (['f', '"""type_map"""'], {}), "(f, 'type_map')\n", (38682, 38697), False, 'from src.utils import FileHandler, mask2geojson, mask2mat, label_sem_map\n'), ((15145, 15167), 'pathlib.Path', 'Path', (['self.in_data_dir'], {}), '(self.in_data_dir)\n', (15149, 15167), False, 'from pathlib import Path\n'), ((15391, 15408), 'pathlib.Path', 'Path', (['gt_mask_dir'], {}), '(gt_mask_dir)\n', (15395, 15408), False, 'from pathlib import Path\n'), ((33681, 33705), 'pathlib.Path', 'Path', (["(save_dir / 'cells')"], {}), "(save_dir / 'cells')\n", (33685, 33705), False, 'from pathlib import Path\n'), ((33348, 33372), 're.findall', 're.findall', (['"""\\\\d+"""', 'name'], {}), "('\\\\d+', name)\n", (33358, 33372), False, 'import re\n'), ((34040, 34074), 'src.utils.label_sem_map', 'label_sem_map', (['self.sem_maps[name]'], {}), '(self.sem_maps[name])\n', (34053, 34074), False, 'from src.utils import FileHandler, mask2geojson, mask2mat, label_sem_map\n'), ((34248, 34272), 'pathlib.Path', 'Path', (["(save_dir / 'areas')"], {}), "(save_dir / 'areas')\n", (34252, 34272), False, 'from pathlib import Path\n')]
|
""" Unit tests for Fourier transform processors
"""
import logging
import unittest
from functools import partial
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import pixel_to_skycoord
from arl.data.polarisation import PolarisationFrame
from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image
from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, \
predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, \
invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, \
create_image_from_visibility, predict_skycomponent_visibility, \
predict_facets_wstack, invert_facets_wstack, \
predict_facets_wprojection, invert_facets_wprojection
from arl.imaging.weighting import weight_visibility
from arl.skycomponent.operations import create_skycomponent, find_skycomponents, find_nearest_component, \
insert_skycomponent
from arl.util.testing_support import create_named_configuration
from arl.visibility.base import create_visibility
from arl.visibility.operations import sum_visibility
log = logging.getLogger(__name__)
class TestImaging(unittest.TestCase):
def _checkdirty(self, vis, name='test_invert_2d_dirty', fluxthreshold=1.0):
# Make the dirty image
self.params['imaginary'] = False
dirty = create_empty_image_like(self.model)
dirty, sumwt = invert_2d(vis=vis, im=dirty, dopsf=False, normalize=True, **self.params)
export_image_to_fits(dirty, '%s/%s_dirty.fits' % (self.dir, name))
maxabs = numpy.max(numpy.abs(dirty.data))
assert maxabs < fluxthreshold, "%s, abs max %f exceeds flux threshold" % (name, maxabs)
def _checkcomponents(self, dirty, fluxthreshold=5.0, positionthreshold=1.0):
comps = find_skycomponents(dirty, fwhm=1.0, threshold=fluxthreshold, npixels=5)
assert len(comps) == len(self.components), "Different number of components found: original %d, recovered %d" % \
(len(self.components), len(comps))
cellsize = abs(dirty.wcs.wcs.cdelt[0])
# Check for agreement between image and DFT
for comp in comps:
sflux = sum_visibility(self.componentvis, comp.direction)[0]
assert abs(comp.flux[0, 0] - sflux[0, 0]) < fluxthreshold, \
"Fitted and DFT flux differ %s %s" % (comp.flux[0, 0], sflux[0, 0])
# Check for agreement in direction
ocomp = find_nearest_component(comp.direction, self.components)
radiff = abs(comp.direction.ra.deg - ocomp.direction.ra.deg) / cellsize
assert radiff < positionthreshold, "Component differs in dec %.3f pixels" % radiff
decdiff = abs(comp.direction.dec.deg - ocomp.direction.dec.deg) / cellsize
assert decdiff < positionthreshold, "Component differs in dec %.3f pixels" % decdiff
def setUp(self):
import os
self.dir = './test_results'
os.makedirs(self.dir, exist_ok=True)
self.params = {'npixel': 512,
'nchan': 1,
'reffrequency': 1e8,
'facets': 8,
'padding': 2,
'oversampling': 2,
'timeslice': 1000.0}
def actualSetUp(self, time=None, frequency=None, dospectral=False, dopol=False):
self.lowcore = create_named_configuration('LOWBD2-CORE')
self.times = (numpy.pi / 12.0) * numpy.linspace(-3.0, 3.0, 5)
if time is not None:
self.times = time
log.info("Times are %s" % (self.times))
if dospectral:
self.nchan = 3
self.frequency = numpy.array([0.9e8, 1e8, 1.1e8])
self.channel_bandwidth = numpy.array([1e7, 1e7, 1e7])
else:
self.frequency = numpy.array([1e8])
self.channel_bandwidth = numpy.array([1e7])
if dopol:
self.vis_pol = PolarisationFrame('linear')
self.image_pol = PolarisationFrame('stokesIQUV')
else:
self.vis_pol = PolarisationFrame('stokesI')
self.image_pol = PolarisationFrame('stokesI')
if dopol:
f = numpy.array([100.0, 20.0, -10.0, 1.0])
else:
f = numpy.array([100.0])
if dospectral:
flux = numpy.array([f, 0.8 * f, 0.6 * f])
else:
flux = numpy.array([f])
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.componentvis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=self.vis_pol)
self.uvw = self.componentvis.data['uvw']
self.componentvis.data['vis'] *= 0.0
# Create model
self.model = create_image_from_visibility(self.componentvis, npixel=512, cellsize=0.001,
nchan=len(self.frequency),
polarisation_frame=self.image_pol)
# Fill the visibility with exactly computed point sources. These are chosen to lie
# on grid points.
spacing_pixels = 512 // 8
log.info('Spacing in pixels = %s' % spacing_pixels)
centers = [(x, x) for x in numpy.linspace(-3.0, +3.0, 7)]
for x in numpy.linspace(-3.0, +3.0, 7):
centers.append((-x, x))
centers.append((1e-7, 1e-7))
centers.append((1.1, 2.2))
# Make the list of components
rpix = self.model.wcs.wcs.crpix
self.components = []
for center in centers:
ix, iy = center
# The phase center in 0-relative coordinates is n // 2 so we centre the grid of
# components on ny // 2, nx // 2. The wcs must be defined consistently.
p = int(round(rpix[0] + ix * spacing_pixels * numpy.sign(self.model.wcs.wcs.cdelt[0]))), \
int(round(rpix[1] + iy * spacing_pixels * numpy.sign(self.model.wcs.wcs.cdelt[1])))
sc = pixel_to_skycoord(p[0], p[1], self.model.wcs, origin=0)
log.info("Component at (%f, %f) [0-rel] %s" % (p[0], p[1], str(sc)))
if ix != 0 and iy != 0:
# Channel images
comp = create_skycomponent(flux=flux, frequency=self.frequency, direction=sc,
polarisation_frame=self.image_pol)
self.components.append(comp)
# Predict the visibility from the components exactly.
self.componentvis.data['vis'] *= 0.0
predict_skycomponent_visibility(self.componentvis, self.components)
insert_skycomponent(self.model, self.components)
# Calculate the model convolved with a Gaussian.
self.cmodel = smooth_image(self.model)
export_image_to_fits(self.model, '%s/test_model.fits' % self.dir)
export_image_to_fits(self.cmodel, '%s/test_cmodel.fits' % self.dir)
def test_findcomponents(self):
# Check that the components are where we expected them to be after insertion
self.actualSetUp()
self._checkcomponents(self.cmodel)
def test_findcomponents_spectral_pol(self):
# Check that the components are where we expected them to be after insertion
self.actualSetUp(dospectral=True, dopol=True)
self._checkcomponents(self.cmodel)
def test_predict_2d(self):
# Test if the 2D prediction works
#
# Set w=0 so that the two-dimensional transform should agree exactly with the component transform.
# Good check on the grid correction in the image->vis direction
# Set all w to zero
self.actualSetUp()
self.componentvis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0)
self.componentvis.data['uvw'][:, 2] = 0.0
# Predict the visibility using direct evaluation
predict_skycomponent_visibility(self.componentvis, self.components)
self.modelvis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=self.vis_pol)
self.modelvis.data['uvw'][:, 2] = 0.0
predict_2d(self.modelvis, self.model, **self.params)
self.residualvis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=self.vis_pol)
self.residualvis.data['uvw'][:, 2] = 0.0
self.residualvis.data['vis'] = self.modelvis.data['vis'] - self.componentvis.data['vis']
self._checkdirty(self.residualvis, 'test_predict_2d', fluxthreshold=4.0)
def _predict_base(self, predict, fluxthreshold=1.0):
self.modelvis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=self.vis_pol)
self.modelvis.data['vis'] *= 0.0
predict(self.modelvis, self.model, **self.params)
self.residualvis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=self.vis_pol)
self.residualvis.data['uvw'][:, 2] = 0.0
self.residualvis.data['vis'] = self.modelvis.data['vis'] - self.componentvis.data['vis']
self._checkdirty(self.residualvis, 'test_%s' % predict.__name__, fluxthreshold=fluxthreshold)
def test_predict_facets(self):
self.actualSetUp()
self.params['facets'] = 2
self._predict_base(predict_facets, fluxthreshold=numpy.infty)
def test_predict_timeslice(self):
# This works poorly because of the poor interpolation accuracy for point sources. The corresponding
# invert works well particularly if the beam sampling is high
self.actualSetUp()
self._predict_base(predict_timeslice, fluxthreshold=numpy.infty)
def test_predict_timeslice_wprojection(self):
self.actualSetUp()
self.params['kernel'] = 'wprojection'
self.params['wstep'] = 2.0
self._predict_base(predict_timeslice, fluxthreshold=numpy.infty)
def test_predict_wstack(self):
self.actualSetUp()
self.params['wstack'] = 2.0
self._predict_base(predict_wstack, fluxthreshold=5.0)
def test_predict_facets_wstack(self):
self.actualSetUp()
self.params['wstack'] = 2.0
self.params['facets'] = 2
self._predict_base(predict_facets_wstack, fluxthreshold=5.6)
def test_predict_facets_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self.params['wstack'] = 2.0
self.params['facets'] = 2
self._predict_base(predict_facets_wstack, fluxthreshold=5.8)
def test_predict_facets_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self.params['wstack'] = 2.0
self.params['facets'] = 2
self._predict_base(predict_facets_wstack, fluxthreshold=5.8)
def test_predict_wstack_wprojection(self):
self.actualSetUp()
self.params['wstack'] = 5 * 2.0
self.params['wstep'] = 2.0
self._predict_base(predict_wprojection_wstack, fluxthreshold=4.4)
def test_predict_facets_wprojection(self):
self.actualSetUp()
self.params['wstep'] = 2.0
self.params['facets'] = 2
self._predict_base(predict_facets_wprojection, fluxthreshold=7.5)
def test_predict_wprojection(self):
self.actualSetUp()
self.params['wstep'] = 2.0
self._predict_base(predict_wprojection, fluxthreshold=2.0)
def test_invert_2d(self):
# Test if the 2D invert works with w set to zero
# Set w=0 so that the two-dimensional transform should agree exactly with the model.
# Good check on the grid correction in the vis->image direction
self.actualSetUp()
self.componentvis = create_visibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,
weight=1.0, polarisation_frame=self.vis_pol)
self.componentvis.data['uvw'][:, 2] = 0.0
self.componentvis.data['vis'] *= 0.0
# Predict the visibility using direct evaluation
for comp in self.components:
predict_skycomponent_visibility(self.componentvis, comp)
psf2d = create_empty_image_like(self.model)
psf2d, sumwt = invert_2d(self.componentvis, psf2d, dopsf = True,**self.params)
export_image_to_fits(psf2d, '%s/test_invert_2d_psf.fits' % self.dir)
dirty2d = create_empty_image_like(self.model)
dirty2d, sumwt = invert_2d(self.componentvis, dirty2d, **self.params)
export_image_to_fits(dirty2d, '%s/test_invert_2d_dirty.fits' % self.dir)
self._checkcomponents(dirty2d, fluxthreshold=20.0, positionthreshold=1.0)
def _invert_base(self, invert, fluxthreshold=20.0, positionthreshold=1.0, check_components=True):
dirty = create_empty_image_like(self.model)
dirty, sumwt = invert(self.componentvis, dirty, **self.params)
assert sumwt.all() > 0.0
export_image_to_fits(dirty, '%s/test_%s_dirty.fits' % (self.dir, invert.__name__))
if check_components:
self._checkcomponents(dirty, fluxthreshold, positionthreshold)
def test_invert_facets(self):
self.actualSetUp()
self.params['facets'] = 2
self._invert_base(invert_facets, positionthreshold=6.0, check_components=False)
def test_invert_facets_wprojection(self):
self.actualSetUp()
self.params['facets'] = 2
self.params['wstep'] = 4.0
self._invert_base(invert_facets_wprojection, positionthreshold=1.0)
def test_invert_wstack(self):
self.actualSetUp()
self.params['wstack'] = 4.0
self._invert_base(invert_wstack, positionthreshold=1.0)
def test_invert_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self.params['wstack'] = 4.0
self._invert_base(invert_wstack, positionthreshold=1.0)
def test_invert_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self.params['wstack'] = 4.0
self._invert_base(invert_wstack, positionthreshold=1.0)
def test_invert_facets_wstack(self):
self.actualSetUp()
self.params['wstack'] = 4.0
self.params['facets'] = 4
self._invert_base(invert_facets_wstack, positionthreshold=1.0)
def test_invert_wprojection_wstack(self):
self.actualSetUp()
self.params['wstack'] = 5 * 4.0
self.params['wstep'] = 4.0
self._invert_base(invert_wprojection_wstack, positionthreshold=1.0)
def test_invert_wprojection(self):
self.actualSetUp()
self.params['wstep'] = 4.0
self._invert_base(invert_wprojection, positionthreshold=1.0)
def test_invert_timeslice(self):
self.actualSetUp()
self._invert_base(invert_timeslice, positionthreshold=8.0, check_components=False)
def test_weighting(self):
self.actualSetUp()
vis, density, densitygrid = weight_visibility(self.componentvis, self.model, weighting='uniform')
assert vis.nvis == self.componentvis.nvis
assert len(density) == vis.nvis
assert numpy.std(vis.imaging_weight) > 0.0
assert densitygrid.data.shape == self.model.data.shape
vis, density, densitygrid = weight_visibility(self.componentvis, self.model, weighting='natural')
assert density is None
assert densitygrid is None
def test_create_image_from_visibility(self):
self.actualSetUp()
self.componentvis = create_visibility(self.lowcore, self.times, self.frequency,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=self.vis_pol,
channel_bandwidth=self.channel_bandwidth)
im = create_image_from_visibility(self.componentvis, nchan=1, npixel=128)
assert im.data.shape == (1, 1, 128, 128)
im = create_image_from_visibility(self.componentvis, frequency=self.frequency, npixel=128)
assert im.data.shape == (len(self.frequency), 1, 128, 128)
im = create_image_from_visibility(self.componentvis, frequency=self.frequency, npixel=128,
nchan=1)
assert im.data.shape == (1, 1, 128, 128)
if __name__ == '__main__':
unittest.main()
|
[
"arl.image.operations.export_image_to_fits",
"numpy.abs",
"arl.visibility.base.create_visibility",
"arl.skycomponent.operations.insert_skycomponent",
"arl.imaging.invert_2d",
"arl.data.polarisation.PolarisationFrame",
"astropy.wcs.utils.pixel_to_skycoord",
"unittest.main",
"arl.imaging.weighting.weight_visibility",
"numpy.std",
"arl.skycomponent.operations.find_skycomponents",
"numpy.linspace",
"arl.skycomponent.operations.find_nearest_component",
"arl.skycomponent.operations.create_skycomponent",
"arl.imaging.predict_2d",
"arl.image.operations.create_empty_image_like",
"arl.util.testing_support.create_named_configuration",
"arl.visibility.operations.sum_visibility",
"arl.image.operations.smooth_image",
"os.makedirs",
"arl.imaging.create_image_from_visibility",
"arl.imaging.predict_skycomponent_visibility",
"numpy.array",
"numpy.sign",
"astropy.coordinates.SkyCoord",
"logging.getLogger"
] |
[((1186, 1213), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1203, 1213), False, 'import logging\n'), ((17796, 17811), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17809, 17811), False, 'import unittest\n'), ((1422, 1457), 'arl.image.operations.create_empty_image_like', 'create_empty_image_like', (['self.model'], {}), '(self.model)\n', (1445, 1457), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((1481, 1553), 'arl.imaging.invert_2d', 'invert_2d', ([], {'vis': 'vis', 'im': 'dirty', 'dopsf': '(False)', 'normalize': '(True)'}), '(vis=vis, im=dirty, dopsf=False, normalize=True, **self.params)\n', (1490, 1553), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((1562, 1628), 'arl.image.operations.export_image_to_fits', 'export_image_to_fits', (['dirty', "('%s/%s_dirty.fits' % (self.dir, name))"], {}), "(dirty, '%s/%s_dirty.fits' % (self.dir, name))\n", (1582, 1628), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((1877, 1948), 'arl.skycomponent.operations.find_skycomponents', 'find_skycomponents', (['dirty'], {'fwhm': '(1.0)', 'threshold': 'fluxthreshold', 'npixels': '(5)'}), '(dirty, fwhm=1.0, threshold=fluxthreshold, npixels=5)\n', (1895, 1948), False, 'from arl.skycomponent.operations import create_skycomponent, find_skycomponents, find_nearest_component, insert_skycomponent\n'), ((3086, 3122), 'os.makedirs', 'os.makedirs', (['self.dir'], {'exist_ok': '(True)'}), '(self.dir, exist_ok=True)\n', (3097, 3122), False, 'import os\n'), ((3512, 3553), 'arl.util.testing_support.create_named_configuration', 'create_named_configuration', (['"""LOWBD2-CORE"""'], {}), "('LOWBD2-CORE')\n", (3538, 3553), False, 'from arl.util.testing_support import create_named_configuration\n'), ((4601, 4678), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(+180.0 * u.deg)', 'dec': '(-60.0 * u.deg)', 'frame': '"""icrs"""', 'equinox': '"""J2000"""'}), "(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')\n", (4609, 4678), False, 'from astropy.coordinates import SkyCoord\n'), ((4707, 4891), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'channel_bandwidth': 'self.channel_bandwidth', 'phasecentre': 'self.phasecentre', 'weight': '(1.0)', 'polarisation_frame': 'self.vis_pol'}), '(self.lowcore, self.times, self.frequency,\n channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,\n weight=1.0, polarisation_frame=self.vis_pol)\n', (4724, 4891), False, 'from arl.visibility.base import create_visibility\n'), ((5674, 5703), 'numpy.linspace', 'numpy.linspace', (['(-3.0)', '(+3.0)', '(7)'], {}), '(-3.0, +3.0, 7)\n', (5688, 5703), False, 'import numpy\n'), ((6966, 7033), 'arl.imaging.predict_skycomponent_visibility', 'predict_skycomponent_visibility', (['self.componentvis', 'self.components'], {}), '(self.componentvis, self.components)\n', (6997, 7033), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((7042, 7090), 'arl.skycomponent.operations.insert_skycomponent', 'insert_skycomponent', (['self.model', 'self.components'], {}), '(self.model, self.components)\n', (7061, 7090), False, 'from arl.skycomponent.operations import create_skycomponent, find_skycomponents, find_nearest_component, insert_skycomponent\n'), ((7179, 7203), 'arl.image.operations.smooth_image', 'smooth_image', (['self.model'], {}), '(self.model)\n', (7191, 7203), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((7212, 7277), 'arl.image.operations.export_image_to_fits', 'export_image_to_fits', (['self.model', "('%s/test_model.fits' % self.dir)"], {}), "(self.model, '%s/test_model.fits' % self.dir)\n", (7232, 7277), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((7286, 7353), 'arl.image.operations.export_image_to_fits', 'export_image_to_fits', (['self.cmodel', "('%s/test_cmodel.fits' % self.dir)"], {}), "(self.cmodel, '%s/test_cmodel.fits' % self.dir)\n", (7306, 7353), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((8122, 8273), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'channel_bandwidth': 'self.channel_bandwidth', 'phasecentre': 'self.phasecentre', 'weight': '(1.0)'}), '(self.lowcore, self.times, self.frequency,\n channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,\n weight=1.0)\n', (8139, 8273), False, 'from arl.visibility.base import create_visibility\n'), ((8473, 8540), 'arl.imaging.predict_skycomponent_visibility', 'predict_skycomponent_visibility', (['self.componentvis', 'self.components'], {}), '(self.componentvis, self.components)\n', (8504, 8540), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((8574, 8758), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'channel_bandwidth': 'self.channel_bandwidth', 'phasecentre': 'self.phasecentre', 'weight': '(1.0)', 'polarisation_frame': 'self.vis_pol'}), '(self.lowcore, self.times, self.frequency,\n channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,\n weight=1.0, polarisation_frame=self.vis_pol)\n', (8591, 8758), False, 'from arl.visibility.base import create_visibility\n'), ((8889, 8941), 'arl.imaging.predict_2d', 'predict_2d', (['self.modelvis', 'self.model'], {}), '(self.modelvis, self.model, **self.params)\n', (8899, 8941), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((8969, 9153), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'channel_bandwidth': 'self.channel_bandwidth', 'phasecentre': 'self.phasecentre', 'weight': '(1.0)', 'polarisation_frame': 'self.vis_pol'}), '(self.lowcore, self.times, self.frequency,\n channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,\n weight=1.0, polarisation_frame=self.vis_pol)\n', (8986, 9153), False, 'from arl.visibility.base import create_visibility\n'), ((9603, 9787), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'channel_bandwidth': 'self.channel_bandwidth', 'phasecentre': 'self.phasecentre', 'weight': '(1.0)', 'polarisation_frame': 'self.vis_pol'}), '(self.lowcore, self.times, self.frequency,\n channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,\n weight=1.0, polarisation_frame=self.vis_pol)\n', (9620, 9787), False, 'from arl.visibility.base import create_visibility\n'), ((9990, 10174), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'channel_bandwidth': 'self.channel_bandwidth', 'phasecentre': 'self.phasecentre', 'weight': '(1.0)', 'polarisation_frame': 'self.vis_pol'}), '(self.lowcore, self.times, self.frequency,\n channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,\n weight=1.0, polarisation_frame=self.vis_pol)\n', (10007, 10174), False, 'from arl.visibility.base import create_visibility\n'), ((13055, 13239), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'channel_bandwidth': 'self.channel_bandwidth', 'phasecentre': 'self.phasecentre', 'weight': '(1.0)', 'polarisation_frame': 'self.vis_pol'}), '(self.lowcore, self.times, self.frequency,\n channel_bandwidth=self.channel_bandwidth, phasecentre=self.phasecentre,\n weight=1.0, polarisation_frame=self.vis_pol)\n', (13072, 13239), False, 'from arl.visibility.base import create_visibility\n'), ((13599, 13634), 'arl.image.operations.create_empty_image_like', 'create_empty_image_like', (['self.model'], {}), '(self.model)\n', (13622, 13634), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((13658, 13720), 'arl.imaging.invert_2d', 'invert_2d', (['self.componentvis', 'psf2d'], {'dopsf': '(True)'}), '(self.componentvis, psf2d, dopsf=True, **self.params)\n', (13667, 13720), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((13731, 13799), 'arl.image.operations.export_image_to_fits', 'export_image_to_fits', (['psf2d', "('%s/test_invert_2d_psf.fits' % self.dir)"], {}), "(psf2d, '%s/test_invert_2d_psf.fits' % self.dir)\n", (13751, 13799), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((13819, 13854), 'arl.image.operations.create_empty_image_like', 'create_empty_image_like', (['self.model'], {}), '(self.model)\n', (13842, 13854), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((13880, 13932), 'arl.imaging.invert_2d', 'invert_2d', (['self.componentvis', 'dirty2d'], {}), '(self.componentvis, dirty2d, **self.params)\n', (13889, 13932), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((13950, 14022), 'arl.image.operations.export_image_to_fits', 'export_image_to_fits', (['dirty2d', "('%s/test_invert_2d_dirty.fits' % self.dir)"], {}), "(dirty2d, '%s/test_invert_2d_dirty.fits' % self.dir)\n", (13970, 14022), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((14237, 14272), 'arl.image.operations.create_empty_image_like', 'create_empty_image_like', (['self.model'], {}), '(self.model)\n', (14260, 14272), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((14385, 14472), 'arl.image.operations.export_image_to_fits', 'export_image_to_fits', (['dirty', "('%s/test_%s_dirty.fits' % (self.dir, invert.__name__))"], {}), "(dirty, '%s/test_%s_dirty.fits' % (self.dir, invert.\n __name__))\n", (14405, 14472), False, 'from arl.image.operations import export_image_to_fits, create_empty_image_like, smooth_image\n'), ((16393, 16462), 'arl.imaging.weighting.weight_visibility', 'weight_visibility', (['self.componentvis', 'self.model'], {'weighting': '"""uniform"""'}), "(self.componentvis, self.model, weighting='uniform')\n", (16410, 16462), False, 'from arl.imaging.weighting import weight_visibility\n'), ((16703, 16772), 'arl.imaging.weighting.weight_visibility', 'weight_visibility', (['self.componentvis', 'self.model'], {'weighting': '"""natural"""'}), "(self.componentvis, self.model, weighting='natural')\n", (16720, 16772), False, 'from arl.imaging.weighting import weight_visibility\n'), ((16948, 17133), 'arl.visibility.base.create_visibility', 'create_visibility', (['self.lowcore', 'self.times', 'self.frequency'], {'phasecentre': 'self.phasecentre', 'weight': '(1.0)', 'polarisation_frame': 'self.vis_pol', 'channel_bandwidth': 'self.channel_bandwidth'}), '(self.lowcore, self.times, self.frequency, phasecentre=\n self.phasecentre, weight=1.0, polarisation_frame=self.vis_pol,\n channel_bandwidth=self.channel_bandwidth)\n', (16965, 17133), False, 'from arl.visibility.base import create_visibility\n'), ((17276, 17344), 'arl.imaging.create_image_from_visibility', 'create_image_from_visibility', (['self.componentvis'], {'nchan': '(1)', 'npixel': '(128)'}), '(self.componentvis, nchan=1, npixel=128)\n', (17304, 17344), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((17407, 17496), 'arl.imaging.create_image_from_visibility', 'create_image_from_visibility', (['self.componentvis'], {'frequency': 'self.frequency', 'npixel': '(128)'}), '(self.componentvis, frequency=self.frequency,\n npixel=128)\n', (17435, 17496), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((17573, 17671), 'arl.imaging.create_image_from_visibility', 'create_image_from_visibility', (['self.componentvis'], {'frequency': 'self.frequency', 'npixel': '(128)', 'nchan': '(1)'}), '(self.componentvis, frequency=self.frequency,\n npixel=128, nchan=1)\n', (17601, 17671), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((1656, 1677), 'numpy.abs', 'numpy.abs', (['dirty.data'], {}), '(dirty.data)\n', (1665, 1677), False, 'import numpy\n'), ((2579, 2634), 'arl.skycomponent.operations.find_nearest_component', 'find_nearest_component', (['comp.direction', 'self.components'], {}), '(comp.direction, self.components)\n', (2601, 2634), False, 'from arl.skycomponent.operations import create_skycomponent, find_skycomponents, find_nearest_component, insert_skycomponent\n'), ((3595, 3623), 'numpy.linspace', 'numpy.linspace', (['(-3.0)', '(3.0)', '(5)'], {}), '(-3.0, 3.0, 5)\n', (3609, 3623), False, 'import numpy\n'), ((3828, 3879), 'numpy.array', 'numpy.array', (['[90000000.0, 100000000.0, 110000000.0]'], {}), '([90000000.0, 100000000.0, 110000000.0])\n', (3839, 3879), False, 'import numpy\n'), ((3898, 3947), 'numpy.array', 'numpy.array', (['[10000000.0, 10000000.0, 10000000.0]'], {}), '([10000000.0, 10000000.0, 10000000.0])\n', (3909, 3947), False, 'import numpy\n'), ((3970, 3996), 'numpy.array', 'numpy.array', (['[100000000.0]'], {}), '([100000000.0])\n', (3981, 3996), False, 'import numpy\n'), ((4026, 4051), 'numpy.array', 'numpy.array', (['[10000000.0]'], {}), '([10000000.0])\n', (4037, 4051), False, 'import numpy\n'), ((4103, 4130), 'arl.data.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""linear"""'], {}), "('linear')\n", (4120, 4130), False, 'from arl.data.polarisation import PolarisationFrame\n'), ((4160, 4191), 'arl.data.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesIQUV"""'], {}), "('stokesIQUV')\n", (4177, 4191), False, 'from arl.data.polarisation import PolarisationFrame\n'), ((4233, 4261), 'arl.data.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (4250, 4261), False, 'from arl.data.polarisation import PolarisationFrame\n'), ((4291, 4319), 'arl.data.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (4308, 4319), False, 'from arl.data.polarisation import PolarisationFrame\n'), ((4355, 4393), 'numpy.array', 'numpy.array', (['[100.0, 20.0, -10.0, 1.0]'], {}), '([100.0, 20.0, -10.0, 1.0])\n', (4366, 4393), False, 'import numpy\n'), ((4424, 4444), 'numpy.array', 'numpy.array', (['[100.0]'], {}), '([100.0])\n', (4435, 4444), False, 'import numpy\n'), ((4488, 4522), 'numpy.array', 'numpy.array', (['[f, 0.8 * f, 0.6 * f]'], {}), '([f, 0.8 * f, 0.6 * f])\n', (4499, 4522), False, 'import numpy\n'), ((4556, 4572), 'numpy.array', 'numpy.array', (['[f]'], {}), '([f])\n', (4567, 4572), False, 'import numpy\n'), ((6389, 6444), 'astropy.wcs.utils.pixel_to_skycoord', 'pixel_to_skycoord', (['p[0]', 'p[1]', 'self.model.wcs'], {'origin': '(0)'}), '(p[0], p[1], self.model.wcs, origin=0)\n', (6406, 6444), False, 'from astropy.wcs.utils import pixel_to_skycoord\n'), ((13525, 13581), 'arl.imaging.predict_skycomponent_visibility', 'predict_skycomponent_visibility', (['self.componentvis', 'comp'], {}), '(self.componentvis, comp)\n', (13556, 13581), False, 'from arl.imaging import predict_2d, predict_wstack, predict_wprojection, predict_facets, predict_timeslice, predict_wprojection_wstack, invert_wprojection_wstack, invert_2d, invert_wstack, invert_wprojection, invert_facets, invert_timeslice, create_image_from_visibility, predict_skycomponent_visibility, predict_facets_wstack, invert_facets_wstack, predict_facets_wprojection, invert_facets_wprojection\n'), ((16568, 16597), 'numpy.std', 'numpy.std', (['vis.imaging_weight'], {}), '(vis.imaging_weight)\n', (16577, 16597), False, 'import numpy\n'), ((2302, 2351), 'arl.visibility.operations.sum_visibility', 'sum_visibility', (['self.componentvis', 'comp.direction'], {}), '(self.componentvis, comp.direction)\n', (2316, 2351), False, 'from arl.visibility.operations import sum_visibility\n'), ((5625, 5654), 'numpy.linspace', 'numpy.linspace', (['(-3.0)', '(+3.0)', '(7)'], {}), '(-3.0, +3.0, 7)\n', (5639, 5654), False, 'import numpy\n'), ((6648, 6757), 'arl.skycomponent.operations.create_skycomponent', 'create_skycomponent', ([], {'flux': 'flux', 'frequency': 'self.frequency', 'direction': 'sc', 'polarisation_frame': 'self.image_pol'}), '(flux=flux, frequency=self.frequency, direction=sc,\n polarisation_frame=self.image_pol)\n', (6667, 6757), False, 'from arl.skycomponent.operations import create_skycomponent, find_skycomponents, find_nearest_component, insert_skycomponent\n'), ((6227, 6266), 'numpy.sign', 'numpy.sign', (['self.model.wcs.wcs.cdelt[0]'], {}), '(self.model.wcs.wcs.cdelt[0])\n', (6237, 6266), False, 'import numpy\n'), ((6330, 6369), 'numpy.sign', 'numpy.sign', (['self.model.wcs.wcs.cdelt[1]'], {}), '(self.model.wcs.wcs.cdelt[1])\n', (6340, 6369), False, 'import numpy\n')]
|
from abc import ABC, abstractmethod
import numpy as np
from enum import Enum
from learning.normalizer import Normalizer
class Env(ABC):
class Terminate(Enum):
Null = 0
Fail = 1
Succ = 2
def __init__(self, args, enable_draw):
self.enable_draw = enable_draw
return
@abstractmethod
def update(self, timestep):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def get_time(self):
pass
@abstractmethod
def get_name(self):
pass
# rendering and UI interface
def draw(self):
pass
def keyboard(self, key, x, y):
pass
def mouse_click(self, button, state, x, y):
pass
def mouse_move(self, x, y):
pass
def reshape(self, w, h):
pass
def shutdown(self):
pass
def is_done(self):
return False
def set_playback_speed(self, speed):
pass
def set_updates_per_sec(self, updates_per_sec):
pass
@abstractmethod
def get_win_width(self):
pass
@abstractmethod
def get_win_height(self):
pass
def get_num_update_substeps(self):
return 1
# rl interface
@abstractmethod
def is_rl_scene(self):
return False
@abstractmethod
def get_num_agents(self):
return 0
@abstractmethod
def need_new_action(self, agent_id):
return False
@abstractmethod
def record_state(self, agent_id):
pass
@abstractmethod
def record_goal(self, agent_id):
pass
@abstractmethod
def set_action(self, agent_id):
pass
@abstractmethod
def get_action_space(self, agent_id):
pass
@abstractmethod
def get_state_size(self, agent_id):
pass
@abstractmethod
def get_goal_size(self, agent_id):
pass
@abstractmethod
def get_action_size(self, agent_id):
pass
@abstractmethod
def get_num_actions(self, agent_id):
pass
@abstractmethod
def log_val(self, agent_id, val):
pass
def build_state_offset(self, agent_id):
state_size = self.get_state_size(agent_id)
return np.zeros(state_size)
def build_state_scale(self, agent_id):
state_size = self.get_state_size(agent_id)
return np.ones(state_size)
def build_goal_offset(self, agent_id):
goal_size = self.get_goal_size(agent_id)
return np.zeros(goal_size)
def build_goal_scale(self, agent_id):
goal_size = self.get_goal_size(agent_id)
return np.ones(goal_size)
def build_action_offset(self, agent_id):
action_size = self.get_action_size()
return np.zeros(action_size)
def build_action_scale(self, agent_id):
action_size = self.get_action_size()
return np.ones(action_size)
def build_action_bound_min(self, agent_id):
action_size = self.get_action_size()
return -inf * np.ones(action_size)
def build_action_bound_max(self, agent_id):
action_size = self.get_action_size()
return inf * np.ones(action_size)
def build_state_norm_groups(self, agent_id):
state_size = self.get_state_size(agent_id)
return Normalizer.NORM_GROUP_SINGLE * np.ones(state_size, dtype=np.int32)
def build_goal_norm_groups(self, agent_id):
goal_size = self.get_goal_size(agent_id)
return Normalizer.NORM_GROUP_SINGLE * np.ones(goal_size, dtype=np.int32)
@abstractmethod
def calc_reward(self, agent_id):
return 0
@abstractmethod
def get_reward_min(self, agent_id):
return 0
@abstractmethod
def get_reward_max(self, agent_id):
return 1
@abstractmethod
def get_reward_fail(self, agent_id):
return self.get_reward_min(agent_id)
@abstractmethod
def get_reward_succ(self, agent_id):
return self.get_reward_max(agent_id)
@abstractmethod
def is_episode_end(self):
return False
@abstractmethod
def check_terminate(self, agent_id):
return Terminate.Null
@abstractmethod
def check_valid_episode(self):
return True
@abstractmethod
def set_sample_count(self, count):
pass
@abstractmethod
def set_mode(self, mode):
pass
|
[
"numpy.zeros",
"numpy.ones"
] |
[((2222, 2242), 'numpy.zeros', 'np.zeros', (['state_size'], {}), '(state_size)\n', (2230, 2242), True, 'import numpy as np\n'), ((2353, 2372), 'numpy.ones', 'np.ones', (['state_size'], {}), '(state_size)\n', (2360, 2372), True, 'import numpy as np\n'), ((2481, 2500), 'numpy.zeros', 'np.zeros', (['goal_size'], {}), '(goal_size)\n', (2489, 2500), True, 'import numpy as np\n'), ((2608, 2626), 'numpy.ones', 'np.ones', (['goal_size'], {}), '(goal_size)\n', (2615, 2626), True, 'import numpy as np\n'), ((2733, 2754), 'numpy.zeros', 'np.zeros', (['action_size'], {}), '(action_size)\n', (2741, 2754), True, 'import numpy as np\n'), ((2860, 2880), 'numpy.ones', 'np.ones', (['action_size'], {}), '(action_size)\n', (2867, 2880), True, 'import numpy as np\n'), ((2997, 3017), 'numpy.ones', 'np.ones', (['action_size'], {}), '(action_size)\n', (3004, 3017), True, 'import numpy as np\n'), ((3133, 3153), 'numpy.ones', 'np.ones', (['action_size'], {}), '(action_size)\n', (3140, 3153), True, 'import numpy as np\n'), ((3301, 3336), 'numpy.ones', 'np.ones', (['state_size'], {'dtype': 'np.int32'}), '(state_size, dtype=np.int32)\n', (3308, 3336), True, 'import numpy as np\n'), ((3481, 3515), 'numpy.ones', 'np.ones', (['goal_size'], {'dtype': 'np.int32'}), '(goal_size, dtype=np.int32)\n', (3488, 3515), True, 'import numpy as np\n')]
|
import numpy as np
from pettingzoo.mappo_ssd import escalation_gw_v1
def main():
env = escalation_gw_v1.parallel_env(max_frames=20, share_reward=False, shape_reward=False, shape_beta=0.8)
obs = env.reset()[0]
obs = np.array([obs])
print(obs)
print(env.env.obs_to_all_symmetries_agent(obs, 0))
actions = np.array([0, 1, 2, 3, 4])
print(env.env.actions_to_all_symmetries_agent(actions, 0))
if __name__ == "__main__":
main()
|
[
"pettingzoo.mappo_ssd.escalation_gw_v1.parallel_env",
"numpy.array"
] |
[((94, 198), 'pettingzoo.mappo_ssd.escalation_gw_v1.parallel_env', 'escalation_gw_v1.parallel_env', ([], {'max_frames': '(20)', 'share_reward': '(False)', 'shape_reward': '(False)', 'shape_beta': '(0.8)'}), '(max_frames=20, share_reward=False,\n shape_reward=False, shape_beta=0.8)\n', (123, 198), False, 'from pettingzoo.mappo_ssd import escalation_gw_v1\n'), ((230, 245), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (238, 245), True, 'import numpy as np\n'), ((331, 356), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (339, 356), True, 'import numpy as np\n')]
|
import os
import glob
import pandas as pd
import numpy as np
from chainercv.transforms import resize
from chainercv.utils import read_image,write_image
import pydicom as dicom
import argparse
def img2var(self,img):
# cut off mask [-1,1] or [0,1] output
return(2*(np.clip(img,-1024,1024)+1024)/2048-1.0)
parser = argparse.ArgumentParser(description='chainer implementation of pix2pix')
parser.add_argument('--root', '-R', help='input dir containing images')
parser.add_argument('--out', '-o', help='output dir')
parser.add_argument('--noise', '-n', default=100, type=float, help='strength of Poisson noise')
parser.add_argument('--imgtype', '-it', default="jpg", help="image file type (file extension)")
args = parser.parse_args()
os.makedirs(os.path.join(args.out,"trainA"), exist_ok=True)
os.makedirs(os.path.join(args.out,"trainB"), exist_ok=True)
for fullname in sorted(glob.glob(os.path.join(args.root,"**/*.{}".format(args.imgtype)), recursive=True)):
fn = os.path.basename(fullname)
fn,ext = os.path.splitext(fn)
if args.imgtype == 'dcm':
subdirname = os.path.basename(os.path.dirname(fullname))
ref_dicom_in = dicom.read_file(fullname, force=True)
ref_dicom_in.file_meta.TransferSyntaxUID = dicom.uid.ImplicitVRLittleEndian
dt=ref_dicom_in.pixel_array.dtype
fileB = "trainB/{}_{}_clean.dcm".format(subdirname,fn)
ref_dicom_in.save_as(os.path.join(args.out,fileB))
dat = ref_dicom_in.pixel_array
# print(np.min(dat),np.max(dat))
# noise
dat = (ref_dicom_in.pixel_array + np.random.poisson(args.noise,ref_dicom_in.pixel_array.shape)).astype(dt)
# print(np.min(dat),np.max(dat))
ref_dicom_in.PixelData = dat.tostring()
fileA = "trainA/{}_{}_noise.dcm".format(subdirname,fn)
ref_dicom_in.save_as(os.path.join(args.out,fileA))
else:
dat = read_image(fullname)
c,h,w = dat.shape
fileB = "trainB/{}_clean.jpg".format(fn)
write_image(dat,os.path.join(args.out,fileB))
dat += np.random.poisson(args.noise,dat.shape)
fileA = "trainA/{}_noise.jpg".format(fn)
write_image(dat,os.path.join(args.out,fileA))
print("{}\t{}".format(fileA,fileB))
|
[
"argparse.ArgumentParser",
"pydicom.read_file",
"os.path.basename",
"chainercv.utils.read_image",
"os.path.dirname",
"numpy.clip",
"os.path.splitext",
"numpy.random.poisson",
"os.path.join"
] |
[((324, 396), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""chainer implementation of pix2pix"""'}), "(description='chainer implementation of pix2pix')\n", (347, 396), False, 'import argparse\n'), ((755, 787), 'os.path.join', 'os.path.join', (['args.out', '"""trainA"""'], {}), "(args.out, 'trainA')\n", (767, 787), False, 'import os\n'), ((815, 847), 'os.path.join', 'os.path.join', (['args.out', '"""trainB"""'], {}), "(args.out, 'trainB')\n", (827, 847), False, 'import os\n'), ((980, 1006), 'os.path.basename', 'os.path.basename', (['fullname'], {}), '(fullname)\n', (996, 1006), False, 'import os\n'), ((1020, 1040), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (1036, 1040), False, 'import os\n'), ((1159, 1196), 'pydicom.read_file', 'dicom.read_file', (['fullname'], {'force': '(True)'}), '(fullname, force=True)\n', (1174, 1196), True, 'import pydicom as dicom\n'), ((1889, 1909), 'chainercv.utils.read_image', 'read_image', (['fullname'], {}), '(fullname)\n', (1899, 1909), False, 'from chainercv.utils import read_image, write_image\n'), ((2054, 2094), 'numpy.random.poisson', 'np.random.poisson', (['args.noise', 'dat.shape'], {}), '(args.noise, dat.shape)\n', (2071, 2094), True, 'import numpy as np\n'), ((1109, 1134), 'os.path.dirname', 'os.path.dirname', (['fullname'], {}), '(fullname)\n', (1124, 1134), False, 'import os\n'), ((1415, 1444), 'os.path.join', 'os.path.join', (['args.out', 'fileB'], {}), '(args.out, fileB)\n', (1427, 1444), False, 'import os\n'), ((1835, 1864), 'os.path.join', 'os.path.join', (['args.out', 'fileA'], {}), '(args.out, fileA)\n', (1847, 1864), False, 'import os\n'), ((2009, 2038), 'os.path.join', 'os.path.join', (['args.out', 'fileB'], {}), '(args.out, fileB)\n', (2021, 2038), False, 'import os\n'), ((2167, 2196), 'os.path.join', 'os.path.join', (['args.out', 'fileA'], {}), '(args.out, fileA)\n', (2179, 2196), False, 'import os\n'), ((274, 299), 'numpy.clip', 'np.clip', (['img', '(-1024)', '(1024)'], {}), '(img, -1024, 1024)\n', (281, 299), True, 'import numpy as np\n'), ((1582, 1643), 'numpy.random.poisson', 'np.random.poisson', (['args.noise', 'ref_dicom_in.pixel_array.shape'], {}), '(args.noise, ref_dicom_in.pixel_array.shape)\n', (1599, 1643), True, 'import numpy as np\n')]
|
import numpy as np
from pycocotools.coco import COCO
import json
from .custom import CustomDataset
#DATASET = 'DB_4LESIONS'
#DATASET = 'ROP_9LESIONS'
DATASET = 'DB_7LESIONS'
pseudo_threds = 0.0
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def py_cpu_nms(dets,scores, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
#scores = dets[:, 4] #bbox打分
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#打分从大到小排列,取index
order = scores.argsort()[::-1]
#keep为最后保留的边框
keep = []
while order.size > 0:
#order[0]是当前分数最大的窗口,肯定保留
i = order[0]
keep.append(i)
#计算窗口i与其他所有窗口的交叠部分的面积
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#交/并得到iou值
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收
inds = np.where(ovr <= thresh)[0]
#order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口
order = order[inds + 1]
return keep
WITH_NMS=True
class CocoDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush')
if DATASET=='ROP_2TISSUES':
CLASSES = ('Macula','OpticDisk')
if DATASET=='ROP_9LESIONS':
CLASSES = ('Laser Photocoagulation Spot','artifact','bleeding',
'Stage 1: demarcation line','Stage 2: ridge',
'Stage 3: ridge with neovascularization',
'proliferation','Retina detachment','carcinoma')
if DATASET=='DB_4LESIONS':
CLASSES = ('hemorrhages', 'micro-aneurysms', 'hard exudate', 'cotton wool spot')
if DATASET=='DB_7LESIONS':
CLASSES = ('1','2','3','4','5','6','7')
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
if 'ROP' in DATASET:
self.cat2label = {
cat_id+1: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
else:
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
print(self.cat2label)
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos.append(info)
return img_infos
def load_Pseudo_annotations(self,Pseudo_ann_file):
pseudo_ann_info = dict()
json_pseudo_ann = json.load(open(Pseudo_ann_file))
for pseudo_ann in json_pseudo_ann:
pseudo_ann_info[pseudo_ann['image_name']] = pseudo_ann
return pseudo_ann_info
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return self._parse_ann_info(ann_info, self.with_mask)
def get_Pseudo_ann_info(self, image_name):
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_scores = []
pseudo_ann = self.pseudo_ann_info[image_name]
for pseudo_box in pseudo_ann['box_results']:
if pseudo_box['score']>pseudo_threds:
x1, y1, w, h = pseudo_box['bbox']
box = [int(x1), int(y1), int(x1 + w - 1), int(y1 + h - 1)]
gt_bboxes.append(box)
gt_labels.append(pseudo_box['category_id'])
gt_scores.append(pseudo_box['score'])
if gt_bboxes:
if WITH_NMS:
#print(len(gt_bboxes))
temp_gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
#temp_gt_labels = np.array(gt_labels, dtype=np.int64)
temp_gt_scores = np.array(gt_scores,dtype = np.float32)
indexes = py_cpu_nms(temp_gt_bboxes,temp_gt_scores,0.15)
temp_gt_bboxes = []
temp_gt_labels = []
for index in indexes:
temp_gt_bboxes.append(gt_bboxes[int(index)])
temp_gt_labels.append(gt_labels[int(index)])
gt_bboxes=temp_gt_bboxes
gt_labels=temp_gt_labels
#print(len(gt_bboxes))
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
pseudo_ann = dict(
bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)
return pseudo_ann
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, ann_info, with_mask=True):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
if with_mask:
gt_masks = []
gt_mask_polys = []
gt_poly_lens = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann['iscrowd']:
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
if 'ROP' in DATASET:
gt_labels.append(self.cat2label[ann['category_id']+1])
else:
gt_labels.append(self.cat2label[ann['category_id']])
if with_mask:
gt_masks.append(self.coco.annToMask(ann))
mask_polys = [
p for p in ann['segmentation'] if len(p) >= 6
] # valid polygons have >= 3 points (6 coordinates)
poly_lens = [len(p) for p in mask_polys]
gt_mask_polys.append(mask_polys)
gt_poly_lens.extend(poly_lens)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)
if with_mask:
ann['masks'] = gt_masks
# poly format is not used in the current implementation
ann['mask_polys'] = gt_mask_polys
ann['poly_lens'] = gt_poly_lens
return ann
|
[
"numpy.minimum",
"numpy.maximum",
"numpy.zeros",
"pycocotools.coco.COCO",
"numpy.where",
"numpy.array"
] |
[((859, 891), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (869, 891), True, 'import numpy as np\n'), ((908, 940), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (918, 940), True, 'import numpy as np\n'), ((957, 989), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (967, 989), True, 'import numpy as np\n'), ((1006, 1038), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (1016, 1038), True, 'import numpy as np\n'), ((1056, 1086), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (1066, 1086), True, 'import numpy as np\n'), ((1101, 1131), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (1111, 1131), True, 'import numpy as np\n'), ((3256, 3270), 'pycocotools.coco.COCO', 'COCO', (['ann_file'], {}), '(ann_file)\n', (3260, 3270), False, 'from pycocotools.coco import COCO\n'), ((1319, 1342), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1327, 1342), True, 'import numpy as np\n'), ((5795, 5832), 'numpy.array', 'np.array', (['gt_bboxes'], {'dtype': 'np.float32'}), '(gt_bboxes, dtype=np.float32)\n', (5803, 5832), True, 'import numpy as np\n'), ((5857, 5892), 'numpy.array', 'np.array', (['gt_labels'], {'dtype': 'np.int64'}), '(gt_labels, dtype=np.int64)\n', (5865, 5892), True, 'import numpy as np\n'), ((5939, 5973), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (5947, 5973), True, 'import numpy as np\n'), ((5998, 6026), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (6006, 6026), True, 'import numpy as np\n'), ((6087, 6131), 'numpy.array', 'np.array', (['gt_bboxes_ignore'], {'dtype': 'np.float32'}), '(gt_bboxes_ignore, dtype=np.float32)\n', (6095, 6131), True, 'import numpy as np\n'), ((6177, 6211), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (6185, 6211), True, 'import numpy as np\n'), ((8677, 8714), 'numpy.array', 'np.array', (['gt_bboxes'], {'dtype': 'np.float32'}), '(gt_bboxes, dtype=np.float32)\n', (8685, 8714), True, 'import numpy as np\n'), ((8739, 8774), 'numpy.array', 'np.array', (['gt_labels'], {'dtype': 'np.int64'}), '(gt_labels, dtype=np.int64)\n', (8747, 8774), True, 'import numpy as np\n'), ((8813, 8847), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (8821, 8847), True, 'import numpy as np\n'), ((8872, 8900), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (8880, 8900), True, 'import numpy as np\n'), ((8962, 9006), 'numpy.array', 'np.array', (['gt_bboxes_ignore'], {'dtype': 'np.float32'}), '(gt_bboxes_ignore, dtype=np.float32)\n', (8970, 9006), True, 'import numpy as np\n'), ((9052, 9086), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (9060, 9086), True, 'import numpy as np\n'), ((5152, 5189), 'numpy.array', 'np.array', (['gt_bboxes'], {'dtype': 'np.float32'}), '(gt_bboxes, dtype=np.float32)\n', (5160, 5189), True, 'import numpy as np\n'), ((5293, 5330), 'numpy.array', 'np.array', (['gt_scores'], {'dtype': 'np.float32'}), '(gt_scores, dtype=np.float32)\n', (5301, 5330), True, 'import numpy as np\n')]
|
import numpy as np
import random
from ConceptSet import getConceptSet
from sklearn import metrics
def kMedoids(D, k=6, maxItr=100):
DM=np.array(D)
n = DM.shape[0]
if k > n:
raise Exception('K>N')
random.seed(123)
M = np.array(list(range(n)))
np.random.shuffle(M)
M = np.sort(M[:k])
Mnew = np.copy(M)
C = {}
for t in range(maxItr):
minInd = np.argmin(DM[:,M], axis=1)
for i in range(k):
minInd[M[i]] = i
for clust_i in range(k):
C[clust_i] = np.where(minInd==clust_i)[0]
for clust_i in range(k):
minInd = np.mean(DM[np.ix_(C[clust_i],C[clust_i])],axis=1)
j = np.argmin(minInd)
Mnew[clust_i] = C[clust_i][j]
np.sort(Mnew)
if np.array_equal(M, Mnew):
break
M = np.copy(Mnew)
else:
minInd = np.argmin(DM[:,M], axis=1)
for clust_i in range(k):
C[clust_i] = np.where(minInd==clust_i)[0]
return M, C
def getSense(filename):
# L=[[0., 0.5,0.2,0.5,0.2],
# [0.5, 0.0 ,0.1,0.4,0.2],
# [0.2, 0.1, 0.0 , 0.3 ,0.4],
# [0.5 ,0.4 ,0.3 ,0.0 ,0.2],
# [0.2, 0.2, 0.4 ,0.2, 0.0 ]]
ListofConcepts,DistanceMatrix,FeatureVector = getConceptSet(filename)
M,C = kMedoids(DistanceMatrix)
Cn = {}
for k,v in C.items():
m = []
for l in v:
m.append(ListofConcepts[l])
Cn.update({k:m})
GM = [0 for x in range(len(M))]
for i in range(len(M)):
GM[i] = FeatureVector[i]
return (ListofConcepts,DistanceMatrix,FeatureVector,M,C,Cn,GM)
if __name__=="__main__":
sense = getSense("test.txt")
|
[
"numpy.copy",
"ConceptSet.getConceptSet",
"numpy.ix_",
"numpy.argmin",
"numpy.sort",
"numpy.where",
"random.seed",
"numpy.array",
"numpy.array_equal",
"numpy.random.shuffle"
] |
[((138, 149), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (146, 149), True, 'import numpy as np\n'), ((204, 220), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (215, 220), False, 'import random\n'), ((252, 272), 'numpy.random.shuffle', 'np.random.shuffle', (['M'], {}), '(M)\n', (269, 272), True, 'import numpy as np\n'), ((278, 292), 'numpy.sort', 'np.sort', (['M[:k]'], {}), '(M[:k])\n', (285, 292), True, 'import numpy as np\n'), ((301, 311), 'numpy.copy', 'np.copy', (['M'], {}), '(M)\n', (308, 311), True, 'import numpy as np\n'), ((1076, 1099), 'ConceptSet.getConceptSet', 'getConceptSet', (['filename'], {}), '(filename)\n', (1089, 1099), False, 'from ConceptSet import getConceptSet\n'), ((357, 384), 'numpy.argmin', 'np.argmin', (['DM[:, M]'], {'axis': '(1)'}), '(DM[:, M], axis=1)\n', (366, 384), True, 'import numpy as np\n'), ((646, 659), 'numpy.sort', 'np.sort', (['Mnew'], {}), '(Mnew)\n', (653, 659), True, 'import numpy as np\n'), ((665, 688), 'numpy.array_equal', 'np.array_equal', (['M', 'Mnew'], {}), '(M, Mnew)\n', (679, 688), True, 'import numpy as np\n'), ((705, 718), 'numpy.copy', 'np.copy', (['Mnew'], {}), '(Mnew)\n', (712, 718), True, 'import numpy as np\n'), ((737, 764), 'numpy.argmin', 'np.argmin', (['DM[:, M]'], {'axis': '(1)'}), '(DM[:, M], axis=1)\n', (746, 764), True, 'import numpy as np\n'), ((593, 610), 'numpy.argmin', 'np.argmin', (['minInd'], {}), '(minInd)\n', (602, 610), True, 'import numpy as np\n'), ((468, 495), 'numpy.where', 'np.where', (['(minInd == clust_i)'], {}), '(minInd == clust_i)\n', (476, 495), True, 'import numpy as np\n'), ((807, 834), 'numpy.where', 'np.where', (['(minInd == clust_i)'], {}), '(minInd == clust_i)\n', (815, 834), True, 'import numpy as np\n'), ((547, 577), 'numpy.ix_', 'np.ix_', (['C[clust_i]', 'C[clust_i]'], {}), '(C[clust_i], C[clust_i])\n', (553, 577), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.optimize import _lbfgsb
def objfun(x):
"""simplified objective func to test lbfgsb bound violation"""
x0 = [0.8750000000000278,
0.7500000000000153,
0.9499999999999722,
0.8214285714285992,
0.6363636363636085]
x1 = [1.0, 0.0, 1.0, 0.0, 0.0]
x2 = [1.0,
0.0,
0.9889733043149325,
0.0,
0.026353554421041155]
x3 = [1.0,
0.0,
0.9889917442915558,
0.0,
0.020341986743231205]
f0 = 5163.647901211178
f1 = 5149.8181642072905
f2 = 5149.379332309634
f3 = 5149.374490771297
g0 = np.array([-0.5934820547965749,
1.6251549718258351,
-71.99168459202559,
5.346636965797545,
37.10732723092604])
g1 = np.array([-0.43295349282641515,
1.008607936794592,
18.223666726602975,
31.927010036981997,
-19.667512518739386])
g2 = np.array([-0.4699874455100256,
0.9466285353668347,
-0.016874360242016825,
48.44999161133457,
5.819631620590712])
g3 = np.array([-0.46970678696829116,
0.9612719312174818,
0.006129809488833699,
48.43557729419473,
6.005481418498221])
if np.allclose(x, x0):
f = f0
g = g0
elif np.allclose(x, x1):
f = f1
g = g1
elif np.allclose(x, x2):
f = f2
g = g2
elif np.allclose(x, x3):
f = f3
g = g3
else:
raise ValueError(
'Simplified objective function not defined '
'at requested point')
return (np.copy(f), np.copy(g))
def test_setulb_floatround():
"""test if setulb() violates bounds
checks for violation due to floating point rounding error
"""
n = 5
m = 10
factr = 1e7
pgtol = 1e-5
maxls = 20
iprint = -1
nbd = np.full((n,), 2)
low_bnd = np.zeros(n, np.float64)
upper_bnd = np.ones(n, np.float64)
x0 = np.array(
[0.8750000000000278,
0.7500000000000153,
0.9499999999999722,
0.8214285714285992,
0.6363636363636085])
x = np.copy(x0)
f = np.array(0.0, np.float64)
g = np.zeros(n, np.float64)
fortran_int = _lbfgsb.types.intvar.dtype
wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, np.float64)
iwa = np.zeros(3*n, fortran_int)
task = np.zeros(1, 'S60')
csave = np.zeros(1, 'S60')
lsave = np.zeros(4, fortran_int)
isave = np.zeros(44, fortran_int)
dsave = np.zeros(29, np.float64)
task[:] = b'START'
for n_iter in range(7): # 7 steps required to reproduce error
f, g = objfun(x)
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave, maxls)
assert (x <= upper_bnd).all() and (x >= low_bnd).all(), (
"_lbfgsb.setulb() stepped to a point outside of the bounds")
|
[
"numpy.full",
"scipy.optimize._lbfgsb.setulb",
"numpy.copy",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"numpy.array"
] |
[((657, 771), 'numpy.array', 'np.array', (['[-0.5934820547965749, 1.6251549718258351, -71.99168459202559, \n 5.346636965797545, 37.10732723092604]'], {}), '([-0.5934820547965749, 1.6251549718258351, -71.99168459202559, \n 5.346636965797545, 37.10732723092604])\n', (665, 771), True, 'import numpy as np\n'), ((852, 969), 'numpy.array', 'np.array', (['[-0.43295349282641515, 1.008607936794592, 18.223666726602975, \n 31.927010036981997, -19.667512518739386]'], {}), '([-0.43295349282641515, 1.008607936794592, 18.223666726602975, \n 31.927010036981997, -19.667512518739386])\n', (860, 969), True, 'import numpy as np\n'), ((1050, 1167), 'numpy.array', 'np.array', (['[-0.4699874455100256, 0.9466285353668347, -0.016874360242016825, \n 48.44999161133457, 5.819631620590712]'], {}), '([-0.4699874455100256, 0.9466285353668347, -0.016874360242016825, \n 48.44999161133457, 5.819631620590712])\n', (1058, 1167), True, 'import numpy as np\n'), ((1248, 1365), 'numpy.array', 'np.array', (['[-0.46970678696829116, 0.9612719312174818, 0.006129809488833699, \n 48.43557729419473, 6.005481418498221]'], {}), '([-0.46970678696829116, 0.9612719312174818, 0.006129809488833699, \n 48.43557729419473, 6.005481418498221])\n', (1256, 1365), True, 'import numpy as np\n'), ((1445, 1463), 'numpy.allclose', 'np.allclose', (['x', 'x0'], {}), '(x, x0)\n', (1456, 1463), True, 'import numpy as np\n'), ((2074, 2090), 'numpy.full', 'np.full', (['(n,)', '(2)'], {}), '((n,), 2)\n', (2081, 2090), True, 'import numpy as np\n'), ((2105, 2128), 'numpy.zeros', 'np.zeros', (['n', 'np.float64'], {}), '(n, np.float64)\n', (2113, 2128), True, 'import numpy as np\n'), ((2145, 2167), 'numpy.ones', 'np.ones', (['n', 'np.float64'], {}), '(n, np.float64)\n', (2152, 2167), True, 'import numpy as np\n'), ((2178, 2293), 'numpy.array', 'np.array', (['[0.8750000000000278, 0.7500000000000153, 0.9499999999999722, \n 0.8214285714285992, 0.6363636363636085]'], {}), '([0.8750000000000278, 0.7500000000000153, 0.9499999999999722, \n 0.8214285714285992, 0.6363636363636085])\n', (2186, 2293), True, 'import numpy as np\n'), ((2342, 2353), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (2349, 2353), True, 'import numpy as np\n'), ((2363, 2388), 'numpy.array', 'np.array', (['(0.0)', 'np.float64'], {}), '(0.0, np.float64)\n', (2371, 2388), True, 'import numpy as np\n'), ((2397, 2420), 'numpy.zeros', 'np.zeros', (['n', 'np.float64'], {}), '(n, np.float64)\n', (2405, 2420), True, 'import numpy as np\n'), ((2477, 2537), 'numpy.zeros', 'np.zeros', (['(2 * m * n + 5 * n + 11 * m * m + 8 * m)', 'np.float64'], {}), '(2 * m * n + 5 * n + 11 * m * m + 8 * m, np.float64)\n', (2485, 2537), True, 'import numpy as np\n'), ((2536, 2564), 'numpy.zeros', 'np.zeros', (['(3 * n)', 'fortran_int'], {}), '(3 * n, fortran_int)\n', (2544, 2564), True, 'import numpy as np\n'), ((2574, 2592), 'numpy.zeros', 'np.zeros', (['(1)', '"""S60"""'], {}), "(1, 'S60')\n", (2582, 2592), True, 'import numpy as np\n'), ((2605, 2623), 'numpy.zeros', 'np.zeros', (['(1)', '"""S60"""'], {}), "(1, 'S60')\n", (2613, 2623), True, 'import numpy as np\n'), ((2636, 2660), 'numpy.zeros', 'np.zeros', (['(4)', 'fortran_int'], {}), '(4, fortran_int)\n', (2644, 2660), True, 'import numpy as np\n'), ((2673, 2698), 'numpy.zeros', 'np.zeros', (['(44)', 'fortran_int'], {}), '(44, fortran_int)\n', (2681, 2698), True, 'import numpy as np\n'), ((2711, 2735), 'numpy.zeros', 'np.zeros', (['(29)', 'np.float64'], {}), '(29, np.float64)\n', (2719, 2735), True, 'import numpy as np\n'), ((1504, 1522), 'numpy.allclose', 'np.allclose', (['x', 'x1'], {}), '(x, x1)\n', (1515, 1522), True, 'import numpy as np\n'), ((1811, 1821), 'numpy.copy', 'np.copy', (['f'], {}), '(f)\n', (1818, 1821), True, 'import numpy as np\n'), ((1823, 1833), 'numpy.copy', 'np.copy', (['g'], {}), '(g)\n', (1830, 1833), True, 'import numpy as np\n'), ((2862, 2989), 'scipy.optimize._lbfgsb.setulb', '_lbfgsb.setulb', (['m', 'x', 'low_bnd', 'upper_bnd', 'nbd', 'f', 'g', 'factr', 'pgtol', 'wa', 'iwa', 'task', 'iprint', 'csave', 'lsave', 'isave', 'dsave', 'maxls'], {}), '(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa,\n task, iprint, csave, lsave, isave, dsave, maxls)\n', (2876, 2989), False, 'from scipy.optimize import _lbfgsb\n'), ((1563, 1581), 'numpy.allclose', 'np.allclose', (['x', 'x2'], {}), '(x, x2)\n', (1574, 1581), True, 'import numpy as np\n'), ((1622, 1640), 'numpy.allclose', 'np.allclose', (['x', 'x3'], {}), '(x, x3)\n', (1633, 1640), True, 'import numpy as np\n')]
|
"""
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
A package wihch provides tools for generating pixel-level thermal maps that can be
used as states in reinforcement learning for autonomous vehicle.
Author:<NAME>
"""
import numpy as np
from msgs.log import logger
from obj_state import ego_vehicle as ego_v
from obj_state import road_obj as road_o
import math
from situation_assessment import _assess_one_obj_safety
from kp2hm_utils import heat_map
##########################
######### CONFIG #########
##########################
heat_map_size = (300, 300) ##(h, w)
##########################
def get_pose(objs_info):
"""get pose heat map, the pose head map will include road_obj.position,
road_obj.orientation and road_obj.size info.
Args:
objs_info: a list of road_obj.
Return:
A ndarray with the shape (heat_map_size[0], heat_map_size[1]),
represents the pose heat map
"""
p_heat_map = np.zeros(shape=(heat_map_size[0], heat_map_size[1]), dtype=np.float32)
for obj_info in objs_info:
if obj_info != None:
position = obj_info.get_position() ## (x, y, z) ##
orientation_e = obj_info.get_orientation(format='rpy') ## euler angle (r, p, y) ##
orientation_q = obj_info.get_orientation(format='xyzw') ## quaternion (x, y, z, w) ##
size = obj_info.get_size() ## obj size (height, width, depth )
else:
##sth error
raise ValueError("Get a None obj_info..")
#########################################
#### to do (ceate the pose heat map) ####
#########################################
return p_heat_map
def get_linear(objs_info, direction):
"""get linear velocity heat map
Args:
objs_info: a list of road_obj.
orientation: a str indicates the direction 'x' or 'y'
Return:
A ndarray with the shape (heat_map_size[0], heat_map_size[1]),
represents the linear vel heat map
"""
## assert ##
try:
assert direction.lower() in ['x', 'y']
except AssertionError:
logger.error('Pls ensure direction is "x" or "y", but i get '+'"%s"'%(direction))
exit(1)
## init heat map##
vel_heat_map = np.zeros(shape=(heat_map_size[0], heat_map_size[1]), dtype=np.float32)
## creat heat map ##
for obj_info in objs_info:
if obj_info != None:
position = obj_info.get_position() ## (x, y, z) ##
orientation_e = obj_info.get_pose(format='rpy') ## euler angle (r, p, y) ##
orientation_q = obj_info.get_pose(format='xyzw') ## quaternion (x, y, z, w) ##
size = obj_info.get_size() ## obj size (height, width, depth )
if direction.lower() == 'x':
vel = obj_info.get_position().x ## a float means velocity in direction x ##
else:
vel = obj_info.get_position().y ## a float means velocity in direction y ##
else:
##sth error
raise ValueError("Get a None obj_info..")
####################################
## to do (ceate the vel heat map) ##
####################################
return vel_heat_map
def get_angular(objs_info, axis='z'):
"""get angular rate heat map
Args:
objs_info: a list of road_obj.
orientation: a str indicates the direction 'x' or 'y'
Return:
A ndarray with the shape (heat_map_size[0], heat_map_size[1]),
represents the linear vel heat map
"""
## assert ##
try:
assert axis.lower() in ['x', 'y', 'z']
except AssertionError:
logger.error('Pls ensure direction is "x", "y", "z"')
exit(1)
## init heat map##
ang_heat_map = np.zeros(shape=(heat_map_size[0], heat_map_size[1]), dtype=np.float32)
## creat heat map ##
for obj_info in objs_info:
if obj_info != None:
position = obj_info.get_position() ## (x, y, z) ##
orientation_e = obj_info.get_pose(format='rpy') ## euler angle (r, p, y) ##
orientation_q = obj_info.get_pose(format='xyzw') ## quaternion (x, y, z, w) ##
size = obj_info.get_size() ## obj size (height, width, depth )
if axis.lower() == 'z':
ang = obj_info.get_angular().z ## a float means angular in axis z ##
elif axis.lower() == 'x':
ang = obj_info.get_position().x ## a float means angular in axis x ##
else:
ang = obj_info.get_position().y ## a float means angular in axis y ##
else:
##sth error
raise ValueError("Get a None obj_info..")
####################################
## to do (ceate the ang heat map) ##
####################################
return ang_heat_map
def produce_heat_map(ego, others, h_type, hm_size=(224, 224), consider_range=50):
"""produce heat map for each safety degree
Args:
ego: ego vehecle in carla
others: other actor in carla
Return:
heat map
"""
assert h_type in ['danger', 'attentive', 'safe']
ego_location = ego.get_location()
ego_location = (ego_location.x, ego_location.y, ego_location.z)
ego_size = ego.bounding_box.extent
ego_size = (ego_size.x, ego_size.y, ego_size.z)
ego_velocity = ego.get_velocity()
ego_velocity = (ego_velocity.x, ego_velocity.y, ego_velocity.z)
t = ego.get_transform()
f_v = t.get_forward_vector()
cos_theta = (f_v.x*0 + f_v.y*1)/math.sqrt(f_v.x**2+ f_v.y**2)
a = math.acos(cos_theta)
if f_v.x > 0:
a = -a
r_matix = np.array([[math.cos(a), -math.sin(a)], [math.sin(a), math.cos(a)]])
# points = []
# sizes = []
hms = []
for vehicle in others:
location = vehicle.get_location()
location = (location.x, location.y, location.z)
distance = math.sqrt((ego_location[0] - location[0]) ** 2 + (ego_location[1] - location[1]) ** 2)
# print(vehicle, distance)
# print(distance)
if distance <= consider_range:
size = vehicle.bounding_box.extent
size = (size.x, size.y, size.z)
velocity = vehicle.get_velocity()
velocity = (velocity.x, velocity.y, velocity.z)
ego_v_state = ego_v.ego_vehicle()
ego_v_state.set_position(position=ego_location)
ego_v_state.set_linear(linear=ego_velocity)
ego_v_state.set_size(size=ego_size)
road_obj_state = road_o.road_obj()
road_obj_state.set_position(position=location)
road_obj_state.set_linear(linear=velocity)
road_obj_state.set_size(size=size)
safety_degree = _assess_one_obj_safety(ego_vehicle=ego_v_state, road_obj=road_obj_state)
max_index = np.argmax(np.array(safety_degree))
if max_index == ['danger', 'attentive', 'safe'].index(h_type):
relative_x = int(location[0] - ego_location[0])*(hm_size[1]//consider_range//2)
relative_y = int(location[1] - ego_location[1])*(hm_size[0]//consider_range//2)
point = np.matmul(np.array([relative_x, relative_y]), r_matix)
point_x = min(hm_size[1]-1, max(-point[0] + hm_size[1]//2, 0))
point_y = min(hm_size[0]-1, max(-point[1] + hm_size[0]//2, 0))
size = vehicle.bounding_box.extent
size = math.sqrt(size.x**2+size.y**2)
hm = heat_map(hm_size, points=[[point_x, point_y]], sigma=size*2)
hm *= safety_degree[max_index]
hms.append(hm)
if len(hms) > 0:
hm = np.sum(np.array(hms), axis=0)
return hm
else:
return np.zeros(hm_size)
|
[
"obj_state.road_obj.road_obj",
"situation_assessment._assess_one_obj_safety",
"math.sqrt",
"numpy.zeros",
"math.sin",
"math.acos",
"numpy.array",
"math.cos",
"msgs.log.logger.error",
"obj_state.ego_vehicle.ego_vehicle",
"kp2hm_utils.heat_map"
] |
[((1026, 1096), 'numpy.zeros', 'np.zeros', ([], {'shape': '(heat_map_size[0], heat_map_size[1])', 'dtype': 'np.float32'}), '(shape=(heat_map_size[0], heat_map_size[1]), dtype=np.float32)\n', (1034, 1096), True, 'import numpy as np\n'), ((2332, 2402), 'numpy.zeros', 'np.zeros', ([], {'shape': '(heat_map_size[0], heat_map_size[1])', 'dtype': 'np.float32'}), '(shape=(heat_map_size[0], heat_map_size[1]), dtype=np.float32)\n', (2340, 2402), True, 'import numpy as np\n'), ((3883, 3953), 'numpy.zeros', 'np.zeros', ([], {'shape': '(heat_map_size[0], heat_map_size[1])', 'dtype': 'np.float32'}), '(shape=(heat_map_size[0], heat_map_size[1]), dtype=np.float32)\n', (3891, 3953), True, 'import numpy as np\n'), ((5710, 5730), 'math.acos', 'math.acos', (['cos_theta'], {}), '(cos_theta)\n', (5719, 5730), False, 'import math\n'), ((5672, 5706), 'math.sqrt', 'math.sqrt', (['(f_v.x ** 2 + f_v.y ** 2)'], {}), '(f_v.x ** 2 + f_v.y ** 2)\n', (5681, 5706), False, 'import math\n'), ((6040, 6130), 'math.sqrt', 'math.sqrt', (['((ego_location[0] - location[0]) ** 2 + (ego_location[1] - location[1]) ** 2)'], {}), '((ego_location[0] - location[0]) ** 2 + (ego_location[1] -\n location[1]) ** 2)\n', (6049, 6130), False, 'import math\n'), ((7885, 7902), 'numpy.zeros', 'np.zeros', (['hm_size'], {}), '(hm_size)\n', (7893, 7902), True, 'import numpy as np\n'), ((2191, 2278), 'msgs.log.logger.error', 'logger.error', (['(\'Pls ensure direction is "x" or "y", but i get \' + \'"%s"\' % direction)'], {}), '(\'Pls ensure direction is "x" or "y", but i get \' + \'"%s"\' %\n direction)\n', (2203, 2278), False, 'from msgs.log import logger\n'), ((3770, 3823), 'msgs.log.logger.error', 'logger.error', (['"""Pls ensure direction is "x", "y", "z\\""""'], {}), '(\'Pls ensure direction is "x", "y", "z"\')\n', (3782, 3823), False, 'from msgs.log import logger\n'), ((6451, 6470), 'obj_state.ego_vehicle.ego_vehicle', 'ego_v.ego_vehicle', ([], {}), '()\n', (6468, 6470), True, 'from obj_state import ego_vehicle as ego_v\n'), ((6665, 6682), 'obj_state.road_obj.road_obj', 'road_o.road_obj', ([], {}), '()\n', (6680, 6682), True, 'from obj_state import road_obj as road_o\n'), ((6873, 6945), 'situation_assessment._assess_one_obj_safety', '_assess_one_obj_safety', ([], {'ego_vehicle': 'ego_v_state', 'road_obj': 'road_obj_state'}), '(ego_vehicle=ego_v_state, road_obj=road_obj_state)\n', (6895, 6945), False, 'from situation_assessment import _assess_one_obj_safety\n'), ((7819, 7832), 'numpy.array', 'np.array', (['hms'], {}), '(hms)\n', (7827, 7832), True, 'import numpy as np\n'), ((5790, 5801), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (5798, 5801), False, 'import math\n'), ((5819, 5830), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (5827, 5830), False, 'import math\n'), ((5832, 5843), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (5840, 5843), False, 'import math\n'), ((6980, 7003), 'numpy.array', 'np.array', (['safety_degree'], {}), '(safety_degree)\n', (6988, 7003), True, 'import numpy as np\n'), ((7586, 7622), 'math.sqrt', 'math.sqrt', (['(size.x ** 2 + size.y ** 2)'], {}), '(size.x ** 2 + size.y ** 2)\n', (7595, 7622), False, 'import math\n'), ((7639, 7701), 'kp2hm_utils.heat_map', 'heat_map', (['hm_size'], {'points': '[[point_x, point_y]]', 'sigma': '(size * 2)'}), '(hm_size, points=[[point_x, point_y]], sigma=size * 2)\n', (7647, 7701), False, 'from kp2hm_utils import heat_map\n'), ((5804, 5815), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (5812, 5815), False, 'import math\n'), ((7307, 7341), 'numpy.array', 'np.array', (['[relative_x, relative_y]'], {}), '([relative_x, relative_y])\n', (7315, 7341), True, 'import numpy as np\n')]
|
# Copyright 2016-2019 <NAME> <<EMAIL>>
# Copyright 2013 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import lilv
import os
import sys
import unittest
path = os.path.abspath("bindings/bindings_test_plugin.lv2/")
if sys.version_info[0] == 2:
import urllib.request, urllib.parse, urllib.error
import urllib.parse
location = urllib.parse.urljoin("file:", urllib.request.pathname2url(path) + "/")
else:
from urllib.parse import urljoin
from urllib.request import pathname2url
location = urljoin("file:", pathname2url(path) + "/")
class NodeTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
def testNodes(self):
aint = self.world.new_int(1)
aint2 = self.world.new_int(1)
aint3 = self.world.new_int(3)
afloat = self.world.new_float(2.0)
atrue = self.world.new_bool(True)
afalse = self.world.new_bool(False)
auri = self.world.new_uri("http://example.org")
afile = self.world.new_file_uri(None, "/foo/bar")
astring = self.world.new_string("hello")
self.assertEqual(auri.get_turtle_token(), "<http://example.org>")
self.assertTrue(aint.is_int())
self.assertTrue(afloat.is_float())
self.assertTrue(auri.is_uri())
self.assertTrue(astring.is_string())
self.assertTrue(astring.is_literal())
self.assertFalse(auri.is_blank())
self.assertTrue(int(aint) == 1)
self.assertTrue(float(afloat) == 2.0)
self.assertTrue(bool(atrue))
self.assertFalse(bool(afalse))
self.assertEqual(afile.get_path(), "/foo/bar")
self.assertTrue(aint == aint2)
self.assertTrue(aint != aint3)
self.assertTrue(aint != afloat)
with self.assertRaises(ValueError):
int(atrue)
with self.assertRaises(ValueError):
float(aint)
with self.assertRaises(ValueError):
bool(astring)
class UriTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.load_all()
def testInvalidURI(self):
with self.assertRaises(ValueError):
self.plugin_uri = self.world.new_uri("invalid_uri")
def testNonExistentURI(self):
self.plugin_uri = self.world.new_uri("exist:does_not")
self.plugin = self.world.get_all_plugins().get_by_uri(self.plugin_uri)
self.assertEqual(self.plugin, None)
def testPortTypes(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_INPUT_PORT))
def testPortTypes2(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_OUTPUT_PORT))
def testPortTypes3(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_AUDIO_PORT))
def testPortTypes4(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_CONTROL_PORT))
class PluginClassTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
def testPluginClasses(self):
pclass = self.world.get_plugin_class()
self.assertIsNotNone(pclass)
self.assertIsNone(pclass.get_parent_uri())
self.assertIsNotNone(pclass.get_uri())
self.assertIsNotNone(pclass.get_label())
self.assertEqual(str(pclass.get_uri()), str(pclass))
for i in pclass.get_children():
self.assertIsNotNone(i)
self.assertIsNotNone(i.get_uri())
self.assertIsNotNone(i.get_label())
class PluginClassesTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.load_all()
def testPluginClasses(self):
classes = self.world.get_plugin_classes()
pclass = self.world.get_plugin_class()
self.assertIsNotNone(classes)
self.assertIsNotNone(pclass)
self.assertTrue(pclass in classes)
self.assertTrue(pclass.get_uri() in classes)
self.assertGreater(len(classes), 1)
self.assertIsNotNone(classes[0])
self.assertIsNotNone(classes[pclass.get_uri()])
with self.assertRaises(KeyError):
classes["http://example.org/notaclass"].get_uri()
class LoadTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.bundle_uri = self.world.new_uri(location)
self.world.load_specifications()
self.world.load_plugin_classes()
def testLoadUnload(self):
self.world.load_bundle(self.bundle_uri)
plugins = self.world.get_all_plugins()
plugin = plugins.get(plugins.begin())
self.world.load_resource(plugin)
self.world.unload_resource(plugin)
self.world.unload_bundle(self.bundle_uri)
class PluginTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.set_option(
lilv.OPTION_FILTER_LANG, self.world.new_bool(True)
)
self.bundle_uri = self.world.new_uri(location)
self.assertIsNotNone(
self.bundle_uri, "Invalid URI: '" + location + "'"
)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins.get(self.plugins.begin())
self.assertTrue(self.plugin.verify())
self.assertTrue(self.plugin in self.plugins)
self.assertTrue(self.plugin.get_uri() in self.plugins)
self.assertEqual(self.plugins[self.plugin.get_uri()], self.plugin)
with self.assertRaises(KeyError):
self.plugins["http://example.org/notaplugin"].get_uri()
self.assertIsNotNone(
self.plugin,
msg="Test plugin not found at location: '" + location + "'",
)
self.assertEqual(location, str(self.plugin.get_bundle_uri()))
self.plugin_uri = self.plugin.get_uri()
self.assertEqual(
self.plugin.get_uri(), self.plugin_uri, "URI equality broken"
)
self.lv2_InputPort = self.world.new_uri(lilv.LILV_URI_INPUT_PORT)
self.lv2_OutputPort = self.world.new_uri(lilv.LILV_URI_OUTPUT_PORT)
self.lv2_AudioPort = self.world.new_uri(lilv.LILV_URI_AUDIO_PORT)
self.lv2_ControlPort = self.world.new_uri(lilv.LILV_URI_CONTROL_PORT)
def testGetters(self):
self.assertEqual(
self.world.get_symbol(self.plugin), "lilv_bindings_test_plugin"
)
self.assertIsNotNone(self.plugin.get_bundle_uri())
self.assertGreater(len(self.plugin.get_data_uris()), 0)
self.assertIsNotNone(self.plugin.get_library_uri())
self.assertTrue(self.plugin.get_name().is_string())
self.assertTrue(self.plugin.get_class().get_uri().is_uri())
self.assertEqual(
len(self.plugin.get_value(self.world.ns.doap.license)), 1
)
licenses = self.plugin.get_value(self.world.ns.doap.license)
features = self.plugin.get_value(self.world.ns.lv2.optionalFeature)
self.assertEqual(len(licenses), 1)
self.assertTrue(licenses[0] in licenses)
with self.assertRaises(IndexError):
self.assertIsNone(licenses[len(licenses)])
self.assertEqual(
len(licenses) + len(features), len(licenses.merge(features))
)
self.assertEqual(
licenses.get(licenses.begin()),
self.world.new_uri("http://opensource.org/licenses/isc"),
)
self.assertEqual(licenses[0], licenses.get(licenses.begin()))
self.assertTrue(
self.plugin.has_feature(self.world.ns.lv2.hardRTCapable)
)
self.assertEqual(len(self.plugin.get_supported_features()), 1)
self.assertEqual(len(self.plugin.get_optional_features()), 1)
self.assertEqual(len(self.plugin.get_required_features()), 0)
self.assertFalse(
self.plugin.has_extension_data(
self.world.new_uri("http://example.org/nope")
)
)
self.assertEqual(len(self.plugin.get_extension_data()), 0)
self.assertEqual(len(self.plugin.get_extension_data()), 0)
self.assertFalse(self.plugin.has_latency())
self.assertIsNone(self.plugin.get_latency_port_index())
def testPorts(self):
self.assertEqual(self.plugin.get_num_ports(), 4)
self.assertIsNotNone(self.plugin.get_port(0))
self.assertIsNotNone(self.plugin.get_port(1))
self.assertIsNotNone(self.plugin.get_port(2))
self.assertIsNotNone(self.plugin.get_port(3))
self.assertIsNone(self.plugin.get_port_by_index(4))
self.assertIsNotNone(self.plugin.get_port("input"))
self.assertIsNotNone(self.plugin.get_port("output"))
self.assertIsNotNone(self.plugin.get_port("audio_input"))
self.assertIsNotNone(self.plugin.get_port("audio_output"))
self.assertIsNone(self.plugin.get_port_by_symbol("nonexistent"))
self.assertIsNone(
self.plugin.get_port_by_designation(
self.world.ns.lv2.InputPort, self.world.ns.lv2.control
)
)
self.assertIsNone(self.plugin.get_project())
self.assertIsNone(self.plugin.get_author_name())
self.assertIsNone(self.plugin.get_author_email())
self.assertIsNone(self.plugin.get_author_homepage())
self.assertFalse(self.plugin.is_replaced())
self.assertEqual(
0,
len(
self.plugin.get_related(
self.world.new_uri("http://example.org/Type")
)
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_InputPort, self.lv2_AudioPort
),
)
port = self.plugin.get_port("input")
self.assertEqual(self.world.get_symbol(port), "input")
self.assertTrue(port.get_node().is_blank())
self.assertEqual(0, port.get(self.world.ns.lv2.index))
self.assertEqual(1, len(port.get_value(self.world.ns.lv2.symbol)))
self.assertEqual(port.get_value(self.world.ns.lv2.symbol)[0], "input")
self.assertFalse(port.has_property(self.world.ns.lv2.latency))
self.assertFalse(port.supports_event(self.world.ns.midi.MidiEvent))
self.assertEqual(0, port.get_index())
self.assertEqual("input", port.get_symbol())
self.assertEqual("Input", port.get_name())
self.assertEqual(
[
str(self.world.ns.lv2.ControlPort),
str(self.world.ns.lv2.InputPort),
],
sorted(list(map(str, port.get_classes()))),
)
self.assertTrue(port.is_a(self.world.ns.lv2.ControlPort))
self.assertFalse(port.is_a(self.world.ns.lv2.AudioPort))
self.assertEqual((0.5, 0.0, 1.0), port.get_range())
self.assertEqual(0, len(port.get_properties()))
def testScalePoints(self):
port = self.plugin.get_port("input")
points = port.get_scale_points()
point_dict = {
float(points[0].get_value()): points[0].get_label(),
float(points[1].get_value()): points[1].get_label(),
}
self.assertEqual(point_dict, {0.0: "off", 1.0: "on"})
def testPortCount(self):
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_OutputPort, self.lv2_AudioPort
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_OutputPort, self.lv2_ControlPort
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_InputPort, self.lv2_AudioPort
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_InputPort, self.lv2_ControlPort
),
)
class QueryTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.load_all()
self.bundle_uri = self.world.new_uri(location)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins.get(self.plugins.begin())
def testNamespaces(self):
self.assertEqual(self.world.ns.lv2, "http://lv2plug.in/ns/lv2core#")
self.assertEqual(
self.world.ns.lv2.Plugin, "http://lv2plug.in/ns/lv2core#Plugin"
)
def testQuery(self):
self.assertTrue(
self.world.ask(
None, self.world.ns.rdf.type, self.world.ns.lv2.Plugin
)
)
self.assertLess(
0,
len(
self.world.find_nodes(
None, self.world.ns.rdf.type, self.world.ns.lv2.Plugin
)
),
)
self.assertEqual(
self.plugin.get_uri(),
self.world.get(
None, self.world.ns.rdf.type, self.world.ns.lv2.Plugin
),
)
class InstanceTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.bundle_uri = self.world.new_uri(location)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins[0]
self.instance = lilv.Instance(self.plugin, 48000)
self.assertEqual(self.plugin.get_uri(), self.instance.get_uri())
self.assertIsNone(
self.instance.get_extension_data(
self.world.new_uri("http://example.org/ext")
)
)
self.assertIsNone(
self.instance.get_extension_data("http://example.org/ext")
)
def testRun(self):
try:
import numpy
except ImportError:
sys.stderr.write("warning: Missing numpy, not testing instance\n")
return
n_samples = 100
buf = numpy.zeros(n_samples)
with self.assertRaises(Exception):
self.instance.connect_port(0, "hello")
self.instance.connect_port(0, None)
self.instance.connect_port(0, None)
self.instance.connect_port(2, buf)
self.instance.connect_port(3, buf)
self.instance.activate()
self.instance.run(n_samples)
self.instance.deactivate()
class UITests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.bundle_uri = self.world.new_uri(location)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins[0]
def testUI(self):
uis = self.plugin.get_uis()
ui_uri = self.world.new_uri(
"http://example.org/lilv-bindings-test-plugin-ui"
)
self.assertEqual(1, len(uis))
self.assertEqual(str(uis[0]), str(ui_uri))
with self.assertRaises(KeyError):
uis["http://example.org/notaui"].get_uri()
self.assertEqual(uis[0], str(ui_uri))
self.assertEqual(uis[0].get_uri(), ui_uri)
self.assertEqual(uis[0].get_bundle_uri(), self.bundle_uri)
self.assertEqual(
uis[0].get_binary_uri(), str(self.bundle_uri) + "TODO"
)
self.assertEqual(uis[uis[0].get_uri()], uis[0])
self.assertTrue(uis[0].is_a(self.world.ns.ui.GtkUI))
self.assertTrue(uis[0] in uis)
self.assertTrue(uis[0].get_uri() in uis)
self.assertEqual([self.world.ns.ui.GtkUI], list(uis[0].get_classes()))
|
[
"os.path.abspath",
"urllib.request.pathname2url",
"lilv.Instance",
"numpy.zeros",
"lilv.World",
"sys.stderr.write"
] |
[((852, 905), 'os.path.abspath', 'os.path.abspath', (['"""bindings/bindings_test_plugin.lv2/"""'], {}), "('bindings/bindings_test_plugin.lv2/')\n", (867, 905), False, 'import os\n'), ((1327, 1339), 'lilv.World', 'lilv.World', ([], {}), '()\n', (1337, 1339), False, 'import lilv\n'), ((2718, 2730), 'lilv.World', 'lilv.World', ([], {}), '()\n', (2728, 2730), False, 'import lilv\n'), ((3634, 3646), 'lilv.World', 'lilv.World', ([], {}), '()\n', (3644, 3646), False, 'import lilv\n'), ((4232, 4244), 'lilv.World', 'lilv.World', ([], {}), '()\n', (4242, 4244), False, 'import lilv\n'), ((4902, 4914), 'lilv.World', 'lilv.World', ([], {}), '()\n', (4912, 4914), False, 'import lilv\n'), ((5440, 5452), 'lilv.World', 'lilv.World', ([], {}), '()\n', (5450, 5452), False, 'import lilv\n'), ((12631, 12643), 'lilv.World', 'lilv.World', ([], {}), '()\n', (12641, 12643), False, 'import lilv\n'), ((13767, 13779), 'lilv.World', 'lilv.World', ([], {}), '()\n', (13777, 13779), False, 'import lilv\n'), ((13997, 14030), 'lilv.Instance', 'lilv.Instance', (['self.plugin', '(48000)'], {}), '(self.plugin, 48000)\n', (14010, 14030), False, 'import lilv\n'), ((14597, 14619), 'numpy.zeros', 'numpy.zeros', (['n_samples'], {}), '(n_samples)\n', (14608, 14619), False, 'import numpy\n'), ((15071, 15083), 'lilv.World', 'lilv.World', ([], {}), '()\n', (15081, 15083), False, 'import lilv\n'), ((1221, 1239), 'urllib.request.pathname2url', 'pathname2url', (['path'], {}), '(path)\n', (1233, 1239), False, 'from urllib.request import pathname2url\n'), ((14472, 14538), 'sys.stderr.write', 'sys.stderr.write', (['"""warning: Missing numpy, not testing instance\n"""'], {}), "('warning: Missing numpy, not testing instance\\n')\n", (14488, 14538), False, 'import sys\n')]
|
# fitsviewer.py
# Server side of fits viewer client in browser
# Copyright 2014 by <NAME>
# License: GPLv3
from __future__ import division,print_function
from random import *
import os
import math
import astropy.io.fits as pyfits
import numpy as np
import copy
import astropy.wcs as pywcs
import hashlib
# astropyp modules
import astropyp.utils.core as core
from ...utils import tools
from ..photometry import detect_sources
from ..photometry import catalog
from .fits_core import *
def getTempPath(id,tile):
try:
#hash_dir=hashlib.md5(id['userId']+id['sessionId']+tile['fileId']+str(tile['frame'])).hexdigest()
hash_dir=hashlib.md5(tile['fileId']+str(tile['frame'])).hexdigest()
except KeyError:
raise core.AstropypError("Missing parameters to generate path for temp directory")
tempPath=os.path.join(core.active_users[id['userId']].stored_dirs['session'][id['sessionId']],hash_dir)
return os.path.relpath(tempPath,core.ROOT_DIR)
def checkTempPath(id,params):
path=getTempPath(id,params)
fullpath=os.path.join(core.ROOT_DIR,path)
try:
os.makedirs(fullpath)
except OSError:
if not os.path.isdir(fullpath):
raise core.AstropypError("Could not access temp directory")
return [path,fullpath]
def getImageProperties(hdu):
"""
getImageProperties
Get important properties of a fits hdu. Many of the properties are loaded from the header but a number of them
are also derived.
Parameters
----------
hdu: astropy.io.fits.IMAGEHDU
-Frame of the fits file that data is extracted from
Returns
-------
hdu.properties: dictionary
- The function will generate the properties of an hdu and save them in the attribute hdu.properties, which is
also returned by the function
"""
try:
return hdu.properties
except AttributeError:
try:
hdu.properties={
'width':hdu.header['NAXIS1'],
'height':hdu.header['NAXIS2']
}
except KeyError:
raise core.AstropypError("Tried to load image properties for non-image hdu")
try:
xyRanges=[map(int,coordRange.split(':')) for coordRange in hdu.header['DETSEC'][1:-1].split(',')]
except KeyError:
xyRanges=[[1,hdu.properties['width']],[1,hdu.properties['height']]]
hdu.properties['minCoords']=[xyRanges[0][0],xyRanges[1][0]]
hdu.properties['maxCoords']=[xyRanges[0][1],xyRanges[1][1]]
hdu.properties['dataMin']=float(np.amin(hdu.data))
hdu.properties['dataMax']=float(np.amax(hdu.data))
try:
hdu.wcs=pywcs.WCS(hdu.header)
# Call a function that will fail if wcs was not loaded properly
# TODO: The error given is InconsistentAxisTypesError, but
# the documentation doesn't list the module that error is contained in
# FOr completeness the exact error should be trapped
temp=hdu.wcs.get_axis_types()
hdu.properties['wcsAvailable']=True
except:
print("WCS not available")
hdu.properties['wcsAvailable']=False
return hdu.properties
def getWindow(viewer):
"""
getWindow
To save space and ensure that zooming in and out of an image is always centered on the same location, only
the center coordinates (xCenter,yCenter) are stored for a fits viewer window. This function calculates the
upper left and lower right coordinates of a fitsviewer window based on the canvas size and scale of the image.
Parameters
----------
viewer: dictionary
- Dictionary of fitsviewer properties. Required keys:
xCenter, yCenter: int
- x and y coordinates at the center of the image
canvasWidth,canvasHeight: int
- Width and height of the client canvas displaying the fits image
scale: float
- Scale of the image (scale=1 has 1 pixel for each element of the image array)
Returns
-------
viewer: dictionary
- The same viewer sent to the function is returned with its upper left boundary (x0,y0) and lower right boundary
(xf,yf) calculated
"""
viewer['x0']=int(viewer['xCenter']-viewer['canvasWidth']/viewer['scale']/2)
viewer['y0']=int(viewer['yCenter']-viewer['canvasHeight']/viewer['scale']/2)
viewer['xf']=int(viewer['x0']+viewer['canvasWidth']/viewer['scale'])
viewer['yf']=int(viewer['y0']+viewer['canvasHeight']/viewer['scale'])
return viewer
def getBestFit(hdu,viewer):
"""
getBestFit
Calculate the scale and viewer boundaries needed to fit an entire FITS image in the clients canvas viewer.
Parameters
----------
hdu: pyfits image hdu
- The image hdu (FITS frame that contains the image data)
viewer: dictionary
- Dictionary of fitsviewer properties. Required keys:
xCenter, yCenter: int
- x and y coordinates at the center of the image
canvasWidth,canvasHeight: int
- Width and height of the client canvas displaying the fits image
scale: float
- Scale of the image (scale=1 has 1 pixel for each element of the image array)
Returns
-------
viewer: dictionary
- The same viewer sent to the function is returned with its upper left boundary (x0,y0) and lower right boundary
(xf,yf) calculated
"""
xScale=viewer['canvasWidth']/hdu.properties['width']
yScale=viewer['canvasHeight']/hdu.properties['height']
scale=yScale;
if xScale<yScale:
scale=xScale
viewer['xCenter']=hdu.properties['width']/2
viewer['yCenter']=hdu.properties['height']/2
viewer['scale']=scale
viewer=getWindow(viewer)
return viewer
def getMosaicWindow(viewer,minCoords):
"""
getMosaicWindow
Mosaic images should contain fields in the header that give the coordinates of the lower left-hand corner of the image.
This function uses those coordinates to calculate the position of the window so that the viewer is properly centered
on the image.
Parameters
----------
viewer: dictionary
- Dictionary of fitsviewer properties. Required keys:
xCenter, yCenter: int
- x and y coordinates at the center of the image
canvasWidth,canvasHeight: int
- Width and height of the client canvas displaying the fits image
scale: float
- Scale of the image (scale=1 has 1 pixel for each element of the image array)
minCoords: list
- x,y coordinates of the physical coordinates of the image.
Returns
-------
viewer: dictionary
- The same viewer sent to the function is returned with its upper left boundary (x0,y0) and lower right boundary
(xf,yf) calculated
"""
viewer['xCenter']=viewer['xCenter']-minCoords[0]
viewer['xCenter']=viewer['xCenter']-minCoords[0]
return viewer
def loadFitsFile(id,params):
"""
loadFitsFile
Load a fits file on the server. The function first checks to see if the file has already been loaded into memory,
if it hasn't it uses astropy.io.fits (pyfits) to load the file into memory. It then sends a response to the client
that contains the properties of the fits file along with the number of image frames contained in the file.
Parameters
----------
id: dictionary
- dictionary that contains the following keys:
userId: string
- Unique identifier of the current user
sessionId: string
-Unique identifier of the current session
requestId: string
-Unique identifier for the current request sent by the client
params: dictionary
- Must contain either 'fileId' key or 'path' and 'filename' keys used to open the fits file
Returns
-------
response: dictionary
- The response is always a dictionary that contains an id field (in this case 'fits file') that identifies
the type of response the client is receiving.
- Other keys:
properties: dictionary
- Properties of the fits file. This includes all of the parameters sent to the function in params
as well as any quantities calculated in the function
"""
if 'fileId' not in params:
params['fileId']=getFileId(params)
try:
hdulist=openFits[params['fileId']]
except KeyError:
hdulist=pyfits.open(os.path.join(params['path'],params['filename']))
properties=params
properties['imageHDUlist']=[]
for n,hdu in enumerate(hdulist):
if (isinstance(hdu, pyfits.hdu.image.ImageHDU) or
isinstance(hdu, pyfits.hdu.compressed.CompImageHDU) or
len(hdulist)==1):
properties['imageHDUlist'].append(n)
properties['frames']=len(properties['imageHDUlist'])
hdulist.properties=properties
openFits[params['fileId']]=hdulist
response={
'id':"fits file",
'properties':hdulist.properties
}
print('new fits file:',response)
return response
def loadMosaic(id,params):
"""
loadMosaic
Loads mosaic information, open image hdu's, calculates the properties for each image hdu, and sends each
image to the client as a png file.
Parameters
----------
id: dictionary
- dictionary that contains the following keys:
userId: string
- Unique identifier of the current user
sessionId: string
-Unique identifier of the current session
requestId: string
-Unique identifier for the current request sent by the client
params: dictionary
- Must contain either 'fileId' key or 'path' and 'filename' keys used to open the fits file
Returns
-------
None
"""
hdulist=getHDUlist(id,params)
# We need to know the min and max values of the entire mosaic image so that each frame
# uses the same color map
if 'dataMin' not in hdulist.properties:
hdulist.properties['dataMin']=float("inf")
hdulist.properties['dataMax']=float("-inf")
hdulist.properties['minCoords']=[float("inf"),float("inf")]
hdulist.properties['maxCoords']=[float("-inf"),float("-inf")]
for n in hdulist.properties['imageHDUlist']:
core.progress_log("Loading properties for frame "+str(n), id);
hdu=hdulist[n]
getImageProperties(hdu)
print("max min for frame",n,hdu.properties['minCoords'],hdu.properties['maxCoords'])
if hdu.properties['dataMin']<hdulist.properties['dataMin']:
hdulist.properties['dataMin']=hdu.properties['dataMin']
if hdu.properties['dataMax']>hdulist.properties['dataMax']:
hdulist.properties['dataMax']=hdu.properties['dataMax']
if hdu.properties['minCoords']<hdulist.properties['minCoords']:
hdulist.properties['minCoords']=hdu.properties['minCoords']
if hdu.properties['maxCoords']>hdulist.properties['maxCoords']:
hdulist.properties['maxCoords']=hdu.properties['maxCoords']
hdulist.properties['width']=hdulist.properties['maxCoords'][0]-hdulist.properties['minCoords'][0]
hdulist.properties['height']=hdulist.properties['maxCoords'][1]-hdulist.properties['minCoords'][1]
core.respond(id, {
'id':"update fits file",
'properties':hdulist.properties
})
if 'scale' not in params:
params=getBestFit(hdulist,params)
elif params['scale']<0:
params=getBestFit(hdulist,params)
else:
params=getWindow(params)
core.progress_log("Loading images...", id);
for n in hdulist.properties['imageHDUlist']:
hdu=hdulist[n]
minCoords=hdu.properties['minCoords']
maxCoords=hdu.properties['maxCoords']
# only load images that are within the boundaries of the fits viewer window
if (minCoords[0]<params['xf'] and maxCoords[0]>params['x0'] and
minCoords[1]<params['yf'] and maxCoords[1]>params['y0']
):
params['frame']=n
params['mosaic']=True
print("Params:",params)
core.respond(id, loadImageHDU(id,params))
else:
print("Skipped frame",n)
return {}
def loadImageHDU(id,params):
core.check4key(params,['fileId','frame','colormap','canvasWidth','canvasHeight'])
hdulist=getHDUlist(id,params)
try:
hdu=hdulist[params['frame']]
except KeyError:
raise core.AstropypError("Frame not found in fits image")
path,fullpath=checkTempPath(id,params)
getImageProperties(hdu)
if 'scale' not in params.keys() or params['scale']<0:
params=getBestFit(hdu,params)
elif params['scale']>1:
params['scale']=math.floor(params['scale'])
params.update(hdu.properties)
params=getWindow(params)
if 'tile_width' not in params.keys():
params['tile_width']=DEFAULT_TILE_WIDTH
if 'tile_height' not in params.keys():
params['tile_height']=DEFAULT_TILE_HEIGHT
if 'mosaic' not in params.keys():
params['mosaic']=False
params['columns']=math.ceil(hdu.properties['width']/params['tile_width']*params['scale'])
params['rows']=math.ceil(hdu.properties['height']/params['tile_height']*params['scale'])
core.respond(id, {
'id':"image properties",
'properties':params
})
if 'clear_dir' in params:
for root,dirs,files in os.walk(fullpath):
for file in files:
os.remove(os.path.join(root,file))
for dir in dirs:
os.remove(root)
params.pop('clear_dir')
x0=params['x0']
y0=params['y0']
xf=params['xf']
yf=params['yf']
if params['mosaic']:
x0=x0-hdu.properties['minCoords'][0]
y0=y0-hdu.properties['minCoords'][1]
xf=xf-hdu.properties['minCoords'][0]
yf=yf-hdu.properties['minCoords'][1]
minCol=int(max(0,math.floor(x0*params['scale']/params['tile_width'])))
maxCol=int(min(params['columns'],math.ceil(xf*params['scale']/params['tile_width'])))
minRow=int(max(0,math.floor(y0*params['scale']/params['tile_height'])))
maxRow=int(min(params['rows'],math.ceil(yf*params['scale']/params['tile_height'])))
for row in range(minRow,maxRow):
for col in range(minCol,maxCol):
params['x']=int(col*params['tile_width']/params['scale'])
params['y']=int(row*params['tile_height']/params['scale'])
if params['x']<hdu.properties['width'] and params['y']<hdu.properties['height']:
params['filetype']='png'
core.respond(id, loadTile(id,params))
return {}
def loadTile(id,params):
import astro_pypelines
core.check4key(params,['frame','x','y','tile_width','tile_height','scale','colormap','filetype'])
hdulist=getHDUlist(id,params)
try:
hdu=hdulist[params['frame']]
except KeyError:
raise core.AstropypError("Frame not found in fits image")
if not hasattr(hdu,'properties'):
getImageProperties(hdu)
tileData=[]
tile=copy.deepcopy(params)
tile.update({
'id':"tilepng",
'tileId':getTileId(params)
})
path,fullpath=checkTempPath(id,params)
pngName=os.path.join(path,tile['tileId']+".png")
filename=os.path.join(core.ROOT_DIR,pngName)
tile['pngName']=pngName
#if (
# params['x']<0 or params['y']<0 or
# params['x']>hdu.properties['width'] or params['y']>hdu.properties['height']
#):
# raise core.AstropypError("Tile at ("+str(params['x'])+","+str(params['y'])+") is not located in the image")
xmin=max(params['x'],0)
ymin=max(params['y'], 0)
if params['scale']>1:
params['scale']=math.floor(params['scale'])
tile['tile_width']=min(int((hdu.properties['width']-xmin-1)*params['scale']),params['tile_width'])
tile['tile_height']=min(int((hdu.properties['height']-ymin-1)*params['scale']),params['tile_height'])
if tile['tile_width']<=0 or tile['tile_height']<=0:
return {}
xmax=int(xmin+tile['tile_width']/params['scale'])
ymax=int(ymin+tile['tile_height']/params['scale'])
tile['x']=xmin
tile['y']=ymin
tile['minCoords']=hdu.properties['minCoords']
tile['maxCoords']=hdu.properties['maxCoords']
if not os.path.isfile(filename):
if params['scale']==1:
tileDataArray=hdu.data[ymin:ymax,xmin:xmax]
tileData=tileDataArray.tolist()
elif params['scale']>1:
tileDataArray=hdu.data[ymin:ymax,xmin:xmax]
tileDataArray=np.kron(tileDataArray,np.ones((params['scale'],params['scale'])))
tileData=tileDataArray.tolist()
elif params['scale']<1 and params['scale']>0:
xIdx=np.linspace(xmin,xmax,tile['tile_width'])
yIdx=np.linspace(ymin,ymax,tile['tile_height'])
xIdx=np.array(xIdx,np.int)
yIdx=np.reshape(np.array(yIdx,np.int),(yIdx.size,1))
tileDataArray=hdu.data[yIdx,xIdx]
tileData=tileDataArray.tolist()
else:
raise core.AstropypError("Invalid scale sent to server")
if len(tileData)>0:
if params['filetype']=='png':
#tile['colormap']['colorFunc']="GRAY"
import astro_pypelines.pypelines.fitsviewer.png
if not astro_pypelines.pypelines.fitsviewer.png.buildImageTileC(filename,tile['colormap']['dataMin'],tile['colormap']['dataMax'],
tile['colormap']['scale'],tile['colormap']['colorFunc'],tileData):
raise core.AstropypError("Unable to load tile")
elif params['filetype']=='pixels':
tile['y']=int(tile['y']+tile['tile_height']/params['scale'])
tile['data']=[map(int,row) for row in tileData]
tile['id']='image data'
tile['dataType']=params['dataType']
return tile
else:
raise core.AstropypError("Unrecognized filetype")
else:
raise core.AstropypError("Empty dataset sent to server")
tile['y']=int(tile['y']+tile['tile_height']/params['scale'])
if params['mosaic']:
tile['x']=int(tile['x']+tile['minCoords'][0])
tile['y']=int(tile['y']+tile['minCoords'][1])
return tile
def loadHeader(id,params):
core.check4key(params,['frame','fileId'])
hdulist=getHDUlist(id,params)
try:
header=hdulist[params['frame']].header
except KeyError:
raise core.AstropypError("Frame not found in fits file")
headerList=[]
for key in header.keys():
headerList.append([key,str(header[key]),header.comments[key]])
response={
'id':"fitsHeader",
'fileId':params['fileId'],
'frame':params['frame'],
'header':headerList
}
return response
def loadDatapoint(id,params):
core.check4key(params,['frame','fileId','x','y'])
hdulist=getHDUlist(id,params)
hdu=hdulist[params['frame']]
ra="pywcs must be installed"
dec="on server to activate wcs"
if hdu.properties['wcsAvailable']:
wcsArray=hdu.wcs.all_pix2world(np.array([[params['x'],params['y']]],np.float_),1)
ra=str(wcsArray[0][0])
dec=str(wcsArray[0][1])
try:
response={
'id':"dataPoint",
'dataPoint':str(hdu.data[params['y']][params['x']]),
'ra':ra,
'dec':dec
}
except KeyError as error:
response={}
return response
def load2dGaussFit(id,params):
core.check4key(params,['fileId','frame','x','y','tile_width','tile_height'])
hdulist=getHDUlist(id,params)
try:
hdu=hdulist[params['frame']]
except KeyError:
raise core.AstropypError("Frame not found in fits image")
xmin=params['x']
ymin=params['y']
xmax=min(int(hdu.properties['width']),xmin+params['tile_width'])
ymax=min(int(hdu.properties['height']),ymin+params['tile_height'])
data=hdu.data[ymin:ymax,xmin:xmax]
# Center the tile on the pixel with the highest value
yIndex,xIndex=np.unravel_index(data.argmax(),data.shape)
xmin=xmin+xIndex-(params['tile_width']>>1)
ymin=ymin+yIndex-(params['tile_height']>>1)
xmax=min(int(hdu.properties['width']),xmin+params['tile_width'])
ymax=min(int(hdu.properties['height']),ymin+params['tile_height'])
data=hdu.data[ymin:ymax,xmin:xmax]
response=params
try:
response.update(tools.getGaussFit2d(id,{'data':data})['moments'])
except RuntimeError:
raise core.AstropypError("Fit does not converge")
response['x']=int(xmin)
response['y']=int(ymin)
response['x_mean']=float(response['x_mean']+xmin)
response['y_mean']=float(response['y_mean']+ymin)
response['ra']="pywcs must be installed"
response['dec']="on server to activate wcs"
if hdu.properties['wcsAvailable']:
wcsArray=hdu.wcs.all_pix2world(np.array([[response['x_mean'],response['y_mean']]],np.float_),1)
response['ra']=str(wcsArray[0][0])
response['dec']=str(wcsArray[0][1])
return response
def wcsAlign(id,params):
core.check4key(params,['reference','openFiles'])
ref_hdulist=getHDUlist(id,params['reference'])
try:
hdu=ref_hdulist[params['reference']['frame']]
except KeyError:
raise core.AstropypError("Frame not found in reference fits image")
if hdu.properties['wcsAvailable']:
wcsArray=hdu.wcs.all_pix2world(np.array([[params['reference']['xCenter'],params['reference']['yCenter']]],np.float_),1)
ra=wcsArray[0][0]
dec=wcsArray[0][1]
else:
raise core.AstropypError('Reference image has no wcs information available')
files=[]
for i,file in enumerate(params['openFiles']):
hdulist=getHDUlist(id,file)
try:
hdu=hdulist[file['frame']]
except KeyError:
raise core.AstropypError("Frame not found in fits image")
if hdu.properties['wcsAvailable']:
wcsArray=hdu.wcs.wcs_world2pix(np.array([[ra,dec]],np.float_),1)
xCenter=wcsArray[0][0]
yCenter=wcsArray[0][1]
files.append({
'fileId':file['fileId'],
'frame':file['frame'],
'xCenter':xCenter,
'yCenter':yCenter,
'scale':params['reference']['scale'] # assumes all images have the same scale TODO: calculate scale
})
response={
'id':'wcsAlignment',
'files':files
}
return response
def buildDetectParams(all_params,hdu=None):
mandatory_params=[
'apertureType',
'maxima_sigma',
'fit_method'
]
detect_params={}
for param in mandatory_params:
detect_params[param]=all_params[param]
if all_params['apertureType']=='width':
detect_params['maxima_size']=all_params['maxima_size']
elif all_params['apertureType']=='radius':
detect_params['maxima_size']=all_params['maxima_radius']
elif all_params['apertureType']=='footprint':
detect_params['maxima_footprint']=all_params['maxima_footprint']
else:
raise core.AstropypError('Invalid apertureType:'+detect_params['apertureType'])
if not all_params['auto_aperture']:
detect_params['aperture_radii']=all_params['aperture_radii']
print('radii:',all_params['aperture_radii'])
for n,radius in enumerate(all_params['aperture_radii']):
radius=all_params['aperture_radii'][n]
print('radius:',radius)
if radius[1]=='px':
all_params['aperture_radii'][n]=radius[0]
else:
print('app radius:',radius[1])
raise core.AstropypError("Only aperture_radii='px' is supported at this time")
if not all_params['auto_thresh']:
detect_params['threshold']=all_params['threshold']
if all_params['saturation_method']=='fitsHeader':
detect_params['saturate']=hdu.header[all_params['saturate_key']]
elif all_params['saturation_method']=='userSpecify':
detect_params['saturate']=all_params['saturation']
elif all_params['saturation_method']!='none':
raise core.AstropypError('Invalid saturation method')
if not all_params['auto_binStruct']:
print('binStruct:',all_params['binStruct'])
detect_params['binStruct']=all_params['binStruct']
if not all_params['auto_margin']:
detect_params['margin']=all_params['margin']
return detect_params
def findStars(id,params):
core.check4key(params,['fitsInfo','detectParams','catInfo'])
core.check4key(params['catInfo'],['path','filename'])
hdulist=getHDUlist(id,params['fitsInfo'])
try:
hdu=hdulist[params['fitsInfo']['frame']]
except KeyError:
raise core.AstropypError("Frame not found in fits image")
data=hdu.data
detect_params=buildDetectParams(params['detectParams'],hdu)
detect_params['id']=id
sources,badPoints=detect_sources.findStars(data,**detect_params)
print('first source:', sources[0])
print('dtypes:', sources.dtype.names)
srcCat=catalog.Catalog(
catPath=params['catInfo']['path'],
catName=params['catInfo']['filename'],
fitsPath=hdulist.properties['path'],
fitsName=hdulist.properties['filename'],
fitsFrame=params['fitsInfo']['frame'],
fields=list(sources.dtype.names),
objects=sources,
wcs=hdu.wcs
)
if params['detectParams']['use_filter']:
filter_min=params['detectParams']['filter_min']
filter_max=params['detectParams']['filter_max']
if params['detectParams']['filter_var']=='beta' or params['detectParams']['filter_var']=='fwhm':
invalid_mask=np.where((srcCat[params['detectParams']['filter_var']<filter_min]) | (srcCat[params['detectParams']['filter_var']>filter_max]))
srcCat['quality'][invalid_mask]=0
catalogId=catalog.getCatalogId(params['catInfo'])
user=core.active_users[id['userId']]
if not hasattr(user,'openCatalogs'):
user.openCatalogs={}
user.openCatalogs[catalogId]=srcCat
#saveResponse=catalog.saveCatalog(id,{
# 'path':srcCat.catPath,
# 'filename':srcCat.catName,
# 'fileType':'astro'
#})
saveResponse={}
sourceFields=['id','objType','x','y','quality']
if 'sourceFields' in params:
souceFields=params['souceFields']
objects=np.array(srcCat.objects,copy=True)
objects=objects[sourceFields]
response={
'catResponse':{
'id':'catalog',
'path':srcCat.catPath,
'catalogId':catalogId,
'filename':srcCat.catName,
'name':srcCat.catName,
'fields':sourceFields,
'objects':objects.tolist(),
'coordType':'image',
'fileType':'astro'
},
'saveResponse':saveResponse
}
core.progress_log('Sending catalog... this may take a minute or two', id)
return response
|
[
"os.remove",
"numpy.amin",
"os.walk",
"numpy.ones",
"os.path.isfile",
"astropyp.utils.core.progress_log",
"os.path.join",
"numpy.linspace",
"copy.deepcopy",
"math.ceil",
"astropyp.utils.core.AstropypError",
"astropyp.utils.core.check4key",
"os.makedirs",
"os.path.isdir",
"astropyp.utils.core.respond",
"math.floor",
"numpy.amax",
"astropy.wcs.WCS",
"astro_pypelines.pypelines.fitsviewer.png.buildImageTileC",
"numpy.where",
"numpy.array",
"os.path.relpath"
] |
[((828, 928), 'os.path.join', 'os.path.join', (["core.active_users[id['userId']].stored_dirs['session'][id['sessionId']]", 'hash_dir'], {}), "(core.active_users[id['userId']].stored_dirs['session'][id[\n 'sessionId']], hash_dir)\n", (840, 928), False, 'import os\n'), ((934, 974), 'os.path.relpath', 'os.path.relpath', (['tempPath', 'core.ROOT_DIR'], {}), '(tempPath, core.ROOT_DIR)\n', (949, 974), False, 'import os\n'), ((1050, 1083), 'os.path.join', 'os.path.join', (['core.ROOT_DIR', 'path'], {}), '(core.ROOT_DIR, path)\n', (1062, 1083), False, 'import os\n'), ((11964, 12006), 'astropyp.utils.core.progress_log', 'core.progress_log', (['"""Loading images..."""', 'id'], {}), "('Loading images...', id)\n", (11981, 12006), True, 'import astropyp.utils.core as core\n'), ((12660, 12750), 'astropyp.utils.core.check4key', 'core.check4key', (['params', "['fileId', 'frame', 'colormap', 'canvasWidth', 'canvasHeight']"], {}), "(params, ['fileId', 'frame', 'colormap', 'canvasWidth',\n 'canvasHeight'])\n", (12674, 12750), True, 'import astropyp.utils.core as core\n'), ((13493, 13568), 'math.ceil', 'math.ceil', (["(hdu.properties['width'] / params['tile_width'] * params['scale'])"], {}), "(hdu.properties['width'] / params['tile_width'] * params['scale'])\n", (13502, 13568), False, 'import math\n'), ((13584, 13661), 'math.ceil', 'math.ceil', (["(hdu.properties['height'] / params['tile_height'] * params['scale'])"], {}), "(hdu.properties['height'] / params['tile_height'] * params['scale'])\n", (13593, 13661), False, 'import math\n'), ((13662, 13728), 'astropyp.utils.core.respond', 'core.respond', (['id', "{'id': 'image properties', 'properties': params}"], {}), "(id, {'id': 'image properties', 'properties': params})\n", (13674, 13728), True, 'import astropyp.utils.core as core\n'), ((15097, 15206), 'astropyp.utils.core.check4key', 'core.check4key', (['params', "['frame', 'x', 'y', 'tile_width', 'tile_height', 'scale', 'colormap',\n 'filetype']"], {}), "(params, ['frame', 'x', 'y', 'tile_width', 'tile_height',\n 'scale', 'colormap', 'filetype'])\n", (15111, 15206), True, 'import astropyp.utils.core as core\n'), ((15457, 15478), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (15470, 15478), False, 'import copy\n'), ((15618, 15661), 'os.path.join', 'os.path.join', (['path', "(tile['tileId'] + '.png')"], {}), "(path, tile['tileId'] + '.png')\n", (15630, 15661), False, 'import os\n'), ((15672, 15708), 'os.path.join', 'os.path.join', (['core.ROOT_DIR', 'pngName'], {}), '(core.ROOT_DIR, pngName)\n', (15684, 15708), False, 'import os\n'), ((18729, 18772), 'astropyp.utils.core.check4key', 'core.check4key', (['params', "['frame', 'fileId']"], {}), "(params, ['frame', 'fileId'])\n", (18743, 18772), True, 'import astropyp.utils.core as core\n'), ((19273, 19326), 'astropyp.utils.core.check4key', 'core.check4key', (['params', "['frame', 'fileId', 'x', 'y']"], {}), "(params, ['frame', 'fileId', 'x', 'y'])\n", (19287, 19326), True, 'import astropyp.utils.core as core\n'), ((19933, 20019), 'astropyp.utils.core.check4key', 'core.check4key', (['params', "['fileId', 'frame', 'x', 'y', 'tile_width', 'tile_height']"], {}), "(params, ['fileId', 'frame', 'x', 'y', 'tile_width',\n 'tile_height'])\n", (19947, 20019), True, 'import astropyp.utils.core as core\n'), ((21515, 21565), 'astropyp.utils.core.check4key', 'core.check4key', (['params', "['reference', 'openFiles']"], {}), "(params, ['reference', 'openFiles'])\n", (21529, 21565), True, 'import astropyp.utils.core as core\n'), ((24959, 25022), 'astropyp.utils.core.check4key', 'core.check4key', (['params', "['fitsInfo', 'detectParams', 'catInfo']"], {}), "(params, ['fitsInfo', 'detectParams', 'catInfo'])\n", (24973, 25022), True, 'import astropyp.utils.core as core\n'), ((25024, 25079), 'astropyp.utils.core.check4key', 'core.check4key', (["params['catInfo']", "['path', 'filename']"], {}), "(params['catInfo'], ['path', 'filename'])\n", (25038, 25079), True, 'import astropyp.utils.core as core\n'), ((26882, 26917), 'numpy.array', 'np.array', (['srcCat.objects'], {'copy': '(True)'}), '(srcCat.objects, copy=True)\n', (26890, 26917), True, 'import numpy as np\n'), ((27358, 27431), 'astropyp.utils.core.progress_log', 'core.progress_log', (['"""Sending catalog... this may take a minute or two"""', 'id'], {}), "('Sending catalog... this may take a minute or two', id)\n", (27375, 27431), True, 'import astropyp.utils.core as core\n'), ((1100, 1121), 'os.makedirs', 'os.makedirs', (['fullpath'], {}), '(fullpath)\n', (1111, 1121), False, 'import os\n'), ((11664, 11742), 'astropyp.utils.core.respond', 'core.respond', (['id', "{'id': 'update fits file', 'properties': hdulist.properties}"], {}), "(id, {'id': 'update fits file', 'properties': hdulist.properties})\n", (11676, 11742), True, 'import astropyp.utils.core as core\n'), ((13811, 13828), 'os.walk', 'os.walk', (['fullpath'], {}), '(fullpath)\n', (13818, 13828), False, 'import os\n'), ((16107, 16134), 'math.floor', 'math.floor', (["params['scale']"], {}), "(params['scale'])\n", (16117, 16134), False, 'import math\n'), ((16676, 16700), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (16690, 16700), False, 'import os\n'), ((22019, 22089), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Reference image has no wcs information available"""'], {}), "('Reference image has no wcs information available')\n", (22037, 22089), True, 'import astropyp.utils.core as core\n'), ((738, 814), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Missing parameters to generate path for temp directory"""'], {}), "('Missing parameters to generate path for temp directory')\n", (756, 814), True, 'import astropyp.utils.core as core\n'), ((12857, 12908), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Frame not found in fits image"""'], {}), "('Frame not found in fits image')\n", (12875, 12908), True, 'import astropyp.utils.core as core\n'), ((13128, 13155), 'math.floor', 'math.floor', (["params['scale']"], {}), "(params['scale'])\n", (13138, 13155), False, 'import math\n'), ((14311, 14366), 'math.floor', 'math.floor', (["(x0 * params['scale'] / params['tile_width'])"], {}), "(x0 * params['scale'] / params['tile_width'])\n", (14321, 14366), False, 'import math\n'), ((14402, 14456), 'math.ceil', 'math.ceil', (["(xf * params['scale'] / params['tile_width'])"], {}), "(xf * params['scale'] / params['tile_width'])\n", (14411, 14456), False, 'import math\n'), ((14476, 14532), 'math.floor', 'math.floor', (["(y0 * params['scale'] / params['tile_height'])"], {}), "(y0 * params['scale'] / params['tile_height'])\n", (14486, 14532), False, 'import math\n'), ((14565, 14620), 'math.ceil', 'math.ceil', (["(yf * params['scale'] / params['tile_height'])"], {}), "(yf * params['scale'] / params['tile_height'])\n", (14574, 14620), False, 'import math\n'), ((15310, 15361), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Frame not found in fits image"""'], {}), "('Frame not found in fits image')\n", (15328, 15361), True, 'import astropyp.utils.core as core\n'), ((18432, 18482), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Empty dataset sent to server"""'], {}), "('Empty dataset sent to server')\n", (18450, 18482), True, 'import astropyp.utils.core as core\n'), ((18896, 18946), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Frame not found in fits file"""'], {}), "('Frame not found in fits file')\n", (18914, 18946), True, 'import astropyp.utils.core as core\n'), ((19537, 19586), 'numpy.array', 'np.array', (["[[params['x'], params['y']]]", 'np.float_'], {}), "([[params['x'], params['y']]], np.float_)\n", (19545, 19586), True, 'import numpy as np\n'), ((20125, 20176), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Frame not found in fits image"""'], {}), "('Frame not found in fits image')\n", (20143, 20176), True, 'import astropyp.utils.core as core\n'), ((20934, 20977), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Fit does not converge"""'], {}), "('Fit does not converge')\n", (20952, 20977), True, 'import astropyp.utils.core as core\n'), ((21313, 21376), 'numpy.array', 'np.array', (["[[response['x_mean'], response['y_mean']]]", 'np.float_'], {}), "([[response['x_mean'], response['y_mean']]], np.float_)\n", (21321, 21376), True, 'import numpy as np\n'), ((21713, 21774), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Frame not found in reference fits image"""'], {}), "('Frame not found in reference fits image')\n", (21731, 21774), True, 'import astropyp.utils.core as core\n'), ((21853, 21944), 'numpy.array', 'np.array', (["[[params['reference']['xCenter'], params['reference']['yCenter']]]", 'np.float_'], {}), "([[params['reference']['xCenter'], params['reference']['yCenter']]],\n np.float_)\n", (21861, 21944), True, 'import numpy as np\n'), ((25217, 25268), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Frame not found in fits image"""'], {}), "('Frame not found in fits image')\n", (25235, 25268), True, 'import astropyp.utils.core as core\n'), ((26182, 26314), 'numpy.where', 'np.where', (["(srcCat[params['detectParams']['filter_var'] < filter_min] | srcCat[params[\n 'detectParams']['filter_var'] > filter_max])"], {}), "(srcCat[params['detectParams']['filter_var'] < filter_min] | srcCat\n [params['detectParams']['filter_var'] > filter_max])\n", (26190, 26314), True, 'import numpy as np\n'), ((1157, 1180), 'os.path.isdir', 'os.path.isdir', (['fullpath'], {}), '(fullpath)\n', (1170, 1180), False, 'import os\n'), ((1200, 1253), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Could not access temp directory"""'], {}), "('Could not access temp directory')\n", (1218, 1253), True, 'import astropyp.utils.core as core\n'), ((2569, 2586), 'numpy.amin', 'np.amin', (['hdu.data'], {}), '(hdu.data)\n', (2576, 2586), True, 'import numpy as np\n'), ((2628, 2645), 'numpy.amax', 'np.amax', (['hdu.data'], {}), '(hdu.data)\n', (2635, 2645), True, 'import numpy as np\n'), ((2680, 2701), 'astropy.wcs.WCS', 'pywcs.WCS', (['hdu.header'], {}), '(hdu.header)\n', (2689, 2701), True, 'import astropy.wcs as pywcs\n'), ((8683, 8731), 'os.path.join', 'os.path.join', (["params['path']", "params['filename']"], {}), "(params['path'], params['filename'])\n", (8695, 8731), False, 'import os\n'), ((13957, 13972), 'os.remove', 'os.remove', (['root'], {}), '(root)\n', (13966, 13972), False, 'import os\n'), ((22284, 22335), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Frame not found in fits image"""'], {}), "('Frame not found in fits image')\n", (22302, 22335), True, 'import astropyp.utils.core as core\n'), ((22422, 22454), 'numpy.array', 'np.array', (['[[ra, dec]]', 'np.float_'], {}), '([[ra, dec]], np.float_)\n', (22430, 22454), True, 'import numpy as np\n'), ((23548, 23623), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (["('Invalid apertureType:' + detect_params['apertureType'])"], {}), "('Invalid apertureType:' + detect_params['apertureType'])\n", (23566, 23623), True, 'import astropyp.utils.core as core\n'), ((24118, 24190), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Only aperture_radii=\'px\' is supported at this time"""'], {}), '("Only aperture_radii=\'px\' is supported at this time")\n', (24136, 24190), True, 'import astropyp.utils.core as core\n'), ((24597, 24644), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Invalid saturation method"""'], {}), "('Invalid saturation method')\n", (24615, 24644), True, 'import astropyp.utils.core as core\n'), ((2094, 2164), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Tried to load image properties for non-image hdu"""'], {}), "('Tried to load image properties for non-image hdu')\n", (2112, 2164), True, 'import astropyp.utils.core as core\n'), ((13887, 13911), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (13899, 13911), False, 'import os\n'), ((16969, 17012), 'numpy.ones', 'np.ones', (["(params['scale'], params['scale'])"], {}), "((params['scale'], params['scale']))\n", (16976, 17012), True, 'import numpy as np\n'), ((17128, 17171), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', "tile['tile_width']"], {}), "(xmin, xmax, tile['tile_width'])\n", (17139, 17171), True, 'import numpy as np\n'), ((17187, 17231), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', "tile['tile_height']"], {}), "(ymin, ymax, tile['tile_height'])\n", (17198, 17231), True, 'import numpy as np\n'), ((17247, 17269), 'numpy.array', 'np.array', (['xIdx', 'np.int'], {}), '(xIdx, np.int)\n', (17255, 17269), True, 'import numpy as np\n'), ((17456, 17506), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Invalid scale sent to server"""'], {}), "('Invalid scale sent to server')\n", (17474, 17506), True, 'import astropyp.utils.core as core\n'), ((17718, 17920), 'astro_pypelines.pypelines.fitsviewer.png.buildImageTileC', 'astro_pypelines.pypelines.fitsviewer.png.buildImageTileC', (['filename', "tile['colormap']['dataMin']", "tile['colormap']['dataMax']", "tile['colormap']['scale']", "tile['colormap']['colorFunc']", 'tileData'], {}), "(filename, tile[\n 'colormap']['dataMin'], tile['colormap']['dataMax'], tile['colormap'][\n 'scale'], tile['colormap']['colorFunc'], tileData)\n", (17774, 17920), False, 'import astro_pypelines\n'), ((17966, 18007), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Unable to load tile"""'], {}), "('Unable to load tile')\n", (17984, 18007), True, 'import astropyp.utils.core as core\n'), ((18356, 18399), 'astropyp.utils.core.AstropypError', 'core.AstropypError', (['"""Unrecognized filetype"""'], {}), "('Unrecognized filetype')\n", (18374, 18399), True, 'import astropyp.utils.core as core\n'), ((17297, 17319), 'numpy.array', 'np.array', (['yIdx', 'np.int'], {}), '(yIdx, np.int)\n', (17305, 17319), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.constants import gravitational_constant as G
from scipy.integrate import odeint
import matplotlib.pyplot as plt
M = 5.975e24 # [kg]
R = 6.378e6 # [m]
v0 = np.sqrt(2*G*M/R) # [m/s]
T = 10000.0 # [s]
steps = 1000
t = np.linspace(0.0, T, steps)
def rhs(y, t):
x, v = y[:2], y[2:]
a = -G*M/np.linalg.norm(x)**3*x
res = np.zeros(4); res[:2] = v; res[2:] = a
return res
psi = np.linspace(0, 2*np.pi, 1000)
plt.plot(R*np.cos(psi), R*np.sin(psi), color="blue", label="earth")
throws = 10
label_set = False
for theta in np.linspace(0, np.pi/2, throws):
y0 = np.array([0.0, R, v0*np.cos(theta), v0*np.sin(theta)])
ys = odeint(rhs, y0, t)
x1, x2 = ys[:, 0], ys[:, 1]
if label_set:
plt.plot(x1, x2, color="red")
else:
plt.plot(x1, x2, color="red", label="trajectories")
y0 = np.array([0.0, R, v0*np.cos(theta), v0*np.sin(theta)])
ys = odeint(rhs, y0, -t)
x1, x2 = ys[:, 0], ys[:, 1]
if label_set:
plt.plot(x1, x2, color="green")
else:
plt.plot(x1, x2, color="green", label="trajectories back in time")
label_set = True
plt.xlabel("x")
plt.ylabel("y")
plt.title("trajectories of bodies with escape velocity")
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.sin",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((186, 208), 'numpy.sqrt', 'np.sqrt', (['(2 * G * M / R)'], {}), '(2 * G * M / R)\n', (193, 208), True, 'import numpy as np\n'), ((247, 273), 'numpy.linspace', 'np.linspace', (['(0.0)', 'T', 'steps'], {}), '(0.0, T, steps)\n', (258, 273), True, 'import numpy as np\n'), ((420, 451), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (431, 451), True, 'import numpy as np\n'), ((564, 597), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)', 'throws'], {}), '(0, np.pi / 2, throws)\n', (575, 597), True, 'import numpy as np\n'), ((1141, 1156), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1151, 1156), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1172), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1167, 1172), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1229), 'matplotlib.pyplot.title', 'plt.title', (['"""trajectories of bodies with escape velocity"""'], {}), "('trajectories of bodies with escape velocity')\n", (1182, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1242), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1240, 1242), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1251, 1253), True, 'import matplotlib.pyplot as plt\n'), ((360, 371), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (368, 371), True, 'import numpy as np\n'), ((670, 688), 'scipy.integrate.odeint', 'odeint', (['rhs', 'y0', 't'], {}), '(rhs, y0, t)\n', (676, 688), False, 'from scipy.integrate import odeint\n'), ((920, 939), 'scipy.integrate.odeint', 'odeint', (['rhs', 'y0', '(-t)'], {}), '(rhs, y0, -t)\n', (926, 939), False, 'from scipy.integrate import odeint\n'), ((461, 472), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (467, 472), True, 'import numpy as np\n'), ((476, 487), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (482, 487), True, 'import numpy as np\n'), ((747, 776), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'x2'], {'color': '"""red"""'}), "(x1, x2, color='red')\n", (755, 776), True, 'import matplotlib.pyplot as plt\n'), ((795, 846), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'x2'], {'color': '"""red"""', 'label': '"""trajectories"""'}), "(x1, x2, color='red', label='trajectories')\n", (803, 846), True, 'import matplotlib.pyplot as plt\n'), ((998, 1029), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'x2'], {'color': '"""green"""'}), "(x1, x2, color='green')\n", (1006, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1114), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'x2'], {'color': '"""green"""', 'label': '"""trajectories back in time"""'}), "(x1, x2, color='green', label='trajectories back in time')\n", (1056, 1114), True, 'import matplotlib.pyplot as plt\n'), ((327, 344), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (341, 344), True, 'import numpy as np\n'), ((627, 640), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (633, 640), True, 'import numpy as np\n'), ((645, 658), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (651, 658), True, 'import numpy as np\n'), ((877, 890), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (883, 890), True, 'import numpy as np\n'), ((895, 908), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (901, 908), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.