gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_spline_description2118
except ImportError:
bt_spline_description2118 = sys.modules[
"onshape_client.oas.models.bt_spline_description2118"
]
try:
from onshape_client.oas.models import bt_surface_description1564
except ImportError:
bt_surface_description1564 = sys.modules[
"onshape_client.oas.models.bt_surface_description1564"
]
try:
from onshape_client.oas.models import bt_sweep_description1473_all_of
except ImportError:
bt_sweep_description1473_all_of = sys.modules[
"onshape_client.oas.models.bt_sweep_description1473_all_of"
]
try:
from onshape_client.oas.models import bt_vector3d389
except ImportError:
bt_vector3d389 = sys.modules["onshape_client.oas.models.bt_vector3d389"]
class BTSweepDescription1473(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("type",): {
"PLANE": "PLANE",
"CYLINDER": "CYLINDER",
"CONE": "CONE",
"SPHERE": "SPHERE",
"TORUS": "TORUS",
"SPUN": "SPUN",
"SWEEP": "SWEEP",
"OFFSET": "OFFSET",
"BLEND": "BLEND",
"BSURFACE": "BSURFACE",
"OTHER": "OTHER",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"direction": (bt_vector3d389.BTVector3d389,), # noqa: E501
"profile": (
bt_spline_description2118.BTSplineDescription2118,
), # noqa: E501
"type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"direction": "direction", # noqa: E501
"profile": "profile", # noqa: E501
"type": "type", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_sweep_description1473.BTSweepDescription1473 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
direction (bt_vector3d389.BTVector3d389): [optional] # noqa: E501
profile (bt_spline_description2118.BTSplineDescription2118): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_surface_description1564.BTSurfaceDescription1564,
bt_sweep_description1473_all_of.BTSweepDescription1473AllOf,
],
"oneOf": [],
}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras.engine import base_layer as keras_base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.legacy_tf_layers import base as base_layers
from tensorflow.python.keras.legacy_tf_layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
self.assertEqual(layer.trainable_variables, [])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
# updates, losses only supported in GRAPH mode
self.assertEqual(layer.updates, [])
self.assertEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInt64Layer(self):
layer = base_layers.Layer(name='my_layer', dtype='int64')
layer.add_variable('my_var', [2, 2])
self.assertEqual(layer.name, 'my_layer')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testKerasStyleAddWeight(self):
keras_layer = keras_base_layer.Layer(name='keras_layer')
with ops.name_scope('foo', skip_on_eager=False):
keras_variable = keras_layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(keras_variable.name, 'foo/my_var:0')
with ops.name_scope('baz', skip_on_eager=False):
old_style_layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = old_style_layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
with base_layers.keras_style_scope():
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
with ops.name_scope('bar', skip_on_eager=False):
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'bar/my_var:0')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertEqual(layer.variables, [variable])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertEqual(layer.variables, [variable, variable_2])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [variable_2])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
_ = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
added_variable = [False]
# Test that sync `ON_READ` variables are defaulted to be non-trainable.
variable_3 = layer.add_variable(
'sync_on_read_var', [2, 2],
initializer=init_ops.zeros_initializer(),
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(layer.non_trainable_variables, [variable_2, variable_3])
@def_function.function
def function_adds_weight():
if not added_variable[0]:
layer.add_variable(
'reg_var_from_function', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
added_variable[0] = True
function_adds_weight()
self.assertEqual(len(layer.losses), 2)
def testInvalidTrainableSynchronizationCombination(self):
layer = base_layers.Layer(name='my_layer')
with self.assertRaisesRegexp(
ValueError, 'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.'):
_ = layer.add_variable(
'v', [2, 2],
initializer=init_ops.zeros_initializer(),
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
@test_util.run_deprecated_v1
def testReusePartitionedVariablesAndRegularizers(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
partitioner = partitioned_variables.fixed_size_partitioner(3)
for reuse in [False, True]:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
partitioner=partitioner,
reuse=reuse):
layer = base_layers.Layer(name='my_layer')
_ = layer.add_variable(
'reg_part_var', [4, 4],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op only supported in GRAPH mode.
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
layer.apply(constant_op.constant([1]))
# Note that we re-create the layer since in Eager mode, input spec checks
# only happen on first call.
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
layer.apply(constant_op.constant([1]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
layer.apply(constant_op.constant([[[1], [2]]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
layer.apply(constant_op.constant(1, dtype=dtypes.int32))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected axis'):
layer.apply(constant_op.constant([1, 2, 3]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1, 2]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = input_spec.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected shape'):
layer.apply(constant_op.constant([[1, 2]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
layer.apply(constant_op.constant(1))
# Works
if not context.executing_eagerly():
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = core_layers.Dense(16)
with self.assertRaises(ValueError):
dense.count_params()
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):
def call(self, inputs):
return {'l' + key: inputs[key] for key in inputs}
layer = DictLayer()
if context.executing_eagerly():
i1 = constant_op.constant(3)
i2 = constant_op.constant(4.0)
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
self.assertEqual(3, result['label'].numpy())
self.assertEqual(4.0, result['logits'].numpy())
else:
i1 = array_ops.placeholder('int32')
i2 = array_ops.placeholder('float32')
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
@test_util.run_deprecated_v1
def testActivityRegularizer(self):
regularizer = math_ops.reduce_sum
layer = base_layers.Layer(activity_regularizer=regularizer)
x = array_ops.placeholder('int32')
layer.apply(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
def testNameScopeIsConsistentWithVariableScope(self):
# Github issue 13429.
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return math_ops.multiply(inputs, self.my_var, name='my_op')
def _gen_layer(x, name=None):
layer = MyLayer(name=name)
out = layer.apply(x)
return layer, out
# unnamed layer
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x)
layer1, op1 = _gen_layer(op)
layer2, op2 = _gen_layer(op1)
self.assertEqual(layer.my_var.name, 'my_layer/my_var:0')
self.assertEqual(op.name, 'my_layer/my_op:0')
self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0')
self.assertEqual(op1.name, 'my_layer_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0')
self.assertEqual(op2.name, 'my_layer_2/my_op:0')
# name starts from zero
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name')
layer1, op1 = _gen_layer(op, name='name_1')
layer2, op2 = _gen_layer(op1, name='name_2')
self.assertEqual(layer.my_var.name, 'name/my_var:0')
self.assertEqual(op.name, 'name/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_1/my_var:0')
self.assertEqual(op1.name, 'name_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_2/my_var:0')
self.assertEqual(op2.name, 'name_2/my_op:0')
# name starts from one
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name_1')
layer1, op1 = _gen_layer(op, name='name_2')
layer2, op2 = _gen_layer(op1, name='name_3')
self.assertEqual(layer.my_var.name, 'name_1/my_var:0')
self.assertEqual(op.name, 'name_1/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_2/my_var:0')
self.assertEqual(op1.name, 'name_2/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_3/my_var:0')
self.assertEqual(op2.name, 'name_3/my_op:0')
def testVariablesAreLiftedFromFunctionBuildingGraphs(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return inputs
outer_graph = ops.get_default_graph()
function_building_graph = ops.Graph()
function_building_graph._building_function = True
with outer_graph.as_default():
with function_building_graph.as_default():
layer = MyLayer()
# Create a variable by invoking build through __call__ and assert that
# it is both tracked and lifted into the outer graph.
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
layer.apply(inputs)
self.assertEqual(len(layer.variables), 1)
self.assertEqual(len(layer.trainable_variables), 1)
self.assertEqual(layer.variables[0].graph, outer_graph)
@test_util.run_deprecated_v1
def testGetUpdateFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_update(state_ops.assign_add(self.a, 1., name='b_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.a, inputs, name='a_update'),
inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
# Call same layer on new input, creating one more conditional update
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(None)), 1)
# Check that we are successfully filtering out irrelevant updates
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
@test_util.run_deprecated_v1
def testGetLossesFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_loss(self.a)
self.built = True
def call(self, inputs):
self.add_loss(inputs, inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(None)), 1)
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
# Call same layer on new input, creating one more conditional loss
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(None)), 1)
# Check that we are successfully filtering out irrelevant losses
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
class IdentityLayer(base_layers.Layer):
"""A layer returns the identity of it's input."""
def call(self, inputs):
return inputs
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class DTypeTest(test.TestCase, parameterized.TestCase):
def _const(self, dtype):
return array_ops.constant(1, dtype=dtype)
def test_dtype_inferred_from_input(self):
# Test with Tensor input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
# Test with Numpy input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(np.array(1., dtype='float64'))
self.assertEqual(layer.dtype, 'float64')
# Test with integer input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('int32'))
self.assertEqual(layer.dtype, 'int32')
# Test layer dtype doesn't change when passed a new dtype
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
layer(self._const('float16'))
self.assertEqual(layer.dtype, 'float64')
# Test layer dtype inferred from first input
layer = IdentityLayer()
layer([self._const('float32'), self._const('float64')])
self.assertEqual(layer.dtype, 'float32')
def test_passing_dtype_to_constructor(self):
layer = IdentityLayer(dtype='float64')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
layer = IdentityLayer(dtype='int32')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'int32')
layer = IdentityLayer(dtype=dtypes.float64)
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
def test_inputs_not_casted(self):
layer = IdentityLayer(dtype='float32')
self.assertEqual(layer(self._const('float64')).dtype, 'float64')
if __name__ == '__main__':
test.main()
|
|
###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import gc
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence, Independent
def gaussian_log_likelihood(mu_2d, data_2d, obsrv_std, indices = None):
n_data_points = mu_2d.size()[-1]
if n_data_points > 0:
gaussian = Independent(Normal(loc = mu_2d, scale = obsrv_std.repeat(n_data_points)), 1)
log_prob = gaussian.log_prob(data_2d)
log_prob = log_prob / n_data_points
else:
log_prob = torch.zeros([1]).to(get_device(data_2d)).squeeze()
return log_prob
def poisson_log_likelihood(masked_log_lambdas, masked_data, indices, int_lambdas):
# masked_log_lambdas and masked_data
n_data_points = masked_data.size()[-1]
if n_data_points > 0:
log_prob = torch.sum(masked_log_lambdas) - int_lambdas[indices]
#log_prob = log_prob / n_data_points
else:
log_prob = torch.zeros([1]).to(get_device(masked_data)).squeeze()
return log_prob
def compute_binary_CE_loss(label_predictions, mortality_label):
#print("Computing binary classification loss: compute_CE_loss")
mortality_label = mortality_label.reshape(-1)
if len(label_predictions.size()) == 1:
label_predictions = label_predictions.unsqueeze(0)
n_traj_samples = label_predictions.size(0)
label_predictions = label_predictions.reshape(n_traj_samples, -1)
idx_not_nan = ~torch.isnan(mortality_label)
if len(idx_not_nan) == 0.:
print("All are labels are NaNs!")
ce_loss = torch.Tensor(0.).to(get_device(mortality_label))
label_predictions = label_predictions[:,idx_not_nan]
mortality_label = mortality_label[idx_not_nan]
if torch.sum(mortality_label == 0.) == 0 or torch.sum(mortality_label == 1.) == 0:
print("Warning: all examples in a batch belong to the same class -- please increase the batch size.")
assert(not torch.isnan(label_predictions).any())
assert(not torch.isnan(mortality_label).any())
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
mortality_label = mortality_label.repeat(n_traj_samples, 1)
ce_loss = nn.BCEWithLogitsLoss()(label_predictions, mortality_label)
# divide by number of patients in a batch
ce_loss = ce_loss / n_traj_samples
return ce_loss
def compute_multiclass_CE_loss(label_predictions, true_label, mask):
#print("Computing multi-class classification loss: compute_multiclass_CE_loss")
if (len(label_predictions.size()) == 3):
label_predictions = label_predictions.unsqueeze(0)
n_traj_samples, n_traj, n_tp, n_dims = label_predictions.size()
# assert(not torch.isnan(label_predictions).any())
# assert(not torch.isnan(true_label).any())
# For each trajectory, we get n_traj_samples samples from z0 -- compute loss on all of them
true_label = true_label.repeat(n_traj_samples, 1, 1)
label_predictions = label_predictions.reshape(n_traj_samples * n_traj * n_tp, n_dims)
true_label = true_label.reshape(n_traj_samples * n_traj * n_tp, n_dims)
# choose time points with at least one measurement
mask = torch.sum(mask, -1) > 0
# repeat the mask for each label to mark that the label for this time point is present
pred_mask = mask.repeat(n_dims, 1,1).permute(1,2,0)
label_mask = mask
pred_mask = pred_mask.repeat(n_traj_samples,1,1,1)
label_mask = label_mask.repeat(n_traj_samples,1,1,1)
pred_mask = pred_mask.reshape(n_traj_samples * n_traj * n_tp, n_dims)
label_mask = label_mask.reshape(n_traj_samples * n_traj * n_tp, 1)
if (label_predictions.size(-1) > 1) and (true_label.size(-1) > 1):
assert(label_predictions.size(-1) == true_label.size(-1))
# targets are in one-hot encoding -- convert to indices
_, true_label = true_label.max(-1)
res = []
for i in range(true_label.size(0)):
pred_masked = torch.masked_select(label_predictions[i], pred_mask[i].bool())
labels = torch.masked_select(true_label[i], label_mask[i].bool())
pred_masked = pred_masked.reshape(-1, n_dims)
if (len(labels) == 0):
continue
ce_loss = nn.CrossEntropyLoss()(pred_masked, labels.long())
res.append(ce_loss)
ce_loss = torch.stack(res, 0).to(get_device(label_predictions))
ce_loss = torch.mean(ce_loss)
# # divide by number of patients in a batch
# ce_loss = ce_loss / n_traj_samples
return ce_loss
def compute_masked_likelihood(mu, data, mask, likelihood_func):
# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
res = []
for i in range(n_traj_samples):
for k in range(n_traj):
for j in range(n_dims):
data_masked = torch.masked_select(data[i,k,:,j], mask[i,k,:,j].bool())
#assert(torch.sum(data_masked == 0.) < 10)
mu_masked = torch.masked_select(mu[i,k,:,j], mask[i,k,:,j].bool())
log_prob = likelihood_func(mu_masked, data_masked, indices = (i,k,j))
res.append(log_prob)
# shape: [n_traj*n_traj_samples, 1]
res = torch.stack(res, 0).to(get_device(data))
res = res.reshape((n_traj_samples, n_traj, n_dims))
# Take mean over the number of dimensions
res = torch.mean(res, -1) # !!!!!!!!!!! changed from sum to mean
res = res.transpose(0,1)
return res
def masked_gaussian_log_density(mu, data, obsrv_std, mask = None):
# these cases are for plotting through plot_estim_density
if (len(mu.size()) == 3):
# add additional dimension for gp samples
mu = mu.unsqueeze(0)
if (len(data.size()) == 2):
# add additional dimension for gp samples and time step
data = data.unsqueeze(0).unsqueeze(2)
elif (len(data.size()) == 3):
# add additional dimension for gp samples
data = data.unsqueeze(0)
n_traj_samples, n_traj, n_timepoints, n_dims = mu.size()
assert(data.size()[-1] == n_dims)
# Shape after permutation: [n_traj, n_traj_samples, n_timepoints, n_dims]
if mask is None:
mu_flat = mu.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
data_flat = data.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
res = gaussian_log_likelihood(mu_flat, data_flat, obsrv_std)
res = res.reshape(n_traj_samples, n_traj).transpose(0,1)
else:
# Compute the likelihood per patient so that we don't priorize patients with more measurements
func = lambda mu, data, indices: gaussian_log_likelihood(mu, data, obsrv_std = obsrv_std, indices = indices)
res = compute_masked_likelihood(mu, data, mask, func)
return res
def mse(mu, data, indices = None):
n_data_points = mu.size()[-1]
if n_data_points > 0:
mse = nn.MSELoss()(mu, data)
else:
mse = torch.zeros([1]).to(get_device(data)).squeeze()
return mse
def compute_mse(mu, data, mask = None):
# these cases are for plotting through plot_estim_density
if (len(mu.size()) == 3):
# add additional dimension for gp samples
mu = mu.unsqueeze(0)
if (len(data.size()) == 2):
# add additional dimension for gp samples and time step
data = data.unsqueeze(0).unsqueeze(2)
elif (len(data.size()) == 3):
# add additional dimension for gp samples
data = data.unsqueeze(0)
n_traj_samples, n_traj, n_timepoints, n_dims = mu.size()
assert(data.size()[-1] == n_dims)
# Shape after permutation: [n_traj, n_traj_samples, n_timepoints, n_dims]
if mask is None:
mu_flat = mu.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
n_traj_samples, n_traj, n_timepoints, n_dims = data.size()
data_flat = data.reshape(n_traj_samples*n_traj, n_timepoints * n_dims)
res = mse(mu_flat, data_flat)
else:
# Compute the likelihood per patient so that we don't priorize patients with more measurements
res = compute_masked_likelihood(mu, data, mask, mse)
return res
def compute_poisson_proc_likelihood(truth, pred_y, info, mask = None):
# Compute Poisson likelihood
# https://math.stackexchange.com/questions/344487/log-likelihood-of-a-realization-of-a-poisson-process
# Sum log lambdas across all time points
if mask is None:
poisson_log_l = torch.sum(info["log_lambda_y"], 2) - info["int_lambda"]
# Sum over data dims
poisson_log_l = torch.mean(poisson_log_l, -1)
else:
# Compute likelihood of the data under the predictions
truth_repeated = truth.repeat(pred_y.size(0), 1, 1, 1)
mask_repeated = mask.repeat(pred_y.size(0), 1, 1, 1)
# Compute the likelihood per patient and per attribute so that we don't priorize patients with more measurements
int_lambda = info["int_lambda"]
f = lambda log_lam, data, indices: poisson_log_likelihood(log_lam, data, indices, int_lambda)
poisson_log_l = compute_masked_likelihood(info["log_lambda_y"], truth_repeated, mask_repeated, f)
poisson_log_l = poisson_log_l.permute(1,0)
# Take mean over n_traj
#poisson_log_l = torch.mean(poisson_log_l, 1)
# poisson_log_l shape: [n_traj_samples, n_traj]
return poisson_log_l
|
|
#! /usr/bin/env python
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import nose
from nose.tools import assert_equal, assert_true
import numpy as np
import pymatgen as pmg
from sknano.core import rezero_array
from sknano.core.crystallography import Crystal2DLattice, Crystal3DLattice, \
Reciprocal2DLattice, Reciprocal3DLattice
# from sknano.core.atoms import Atom, Atoms, XAtom, XAtoms
from sknano.core.math import Point, transformation_matrix, zhat, \
rotation_matrix
from sknano.core.refdata import aCC, element_data
r_CC_vdw = element_data['C']['VanDerWaalsRadius']
def test1():
dlattice = Crystal2DLattice(a=4.0, b=8.0, gamma=120)
orientation_matrix = rotation_matrix(angle=np.pi/6, axis=zhat)
rlattice = \
Reciprocal2DLattice(a_star=dlattice.reciprocal_lattice.a_star,
b_star=dlattice.reciprocal_lattice.b_star,
gamma_star=dlattice.reciprocal_lattice.gamma_star,
orientation_matrix=orientation_matrix)
print('\ndlattice.matrix:\n{}'.format(dlattice.matrix))
print('\nrlattice.matrix:\n{}'.format(rlattice.matrix))
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
dlattice.reciprocal_lattice.matrix))
print('\nrlattice.reciprocal_lattice.matrix:\n{}'.format(
rlattice.reciprocal_lattice.matrix))
assert_true(np.allclose(dlattice.matrix,
rlattice.reciprocal_lattice.matrix))
assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
rlattice.matrix))
def test2():
a = np.sqrt(3) * aCC
latt = Crystal2DLattice(a=a, b=a, gamma=120)
hexlatt = Crystal2DLattice.hexagonal(a)
assert_equal(latt, hexlatt)
def test3():
a = np.sqrt(3) * aCC
latt = Crystal2DLattice(a=a, b=a, gamma=90)
square = Crystal2DLattice.square(a)
assert_equal(latt, square)
def test4():
a = np.sqrt(3) * aCC
latt = Crystal2DLattice(a=a, b=a, gamma=60)
a1 = latt.a1
a2 = latt.a2
rotated_a1 = a1.copy()
rotated_a2 = a2.copy()
xfrm = transformation_matrix(angle=-np.pi / 6)
rotated_a1.rotate(transform_matrix=xfrm)
rotated_a2.rotate(transform_matrix=xfrm)
latt.rotate(angle=-np.pi / 6)
assert_equal(latt.a1, rotated_a1)
assert_equal(latt.a2, rotated_a2)
assert_true(np.allclose(latt.orientation_matrix, xfrm))
rotated_latt = Crystal2DLattice(a1=rotated_a1, a2=rotated_a2)
assert_equal(rotated_a1, rotated_latt.a1)
assert_equal(rotated_a2, rotated_latt.a2)
assert_true(np.allclose(latt.orientation_matrix,
rotated_latt.orientation_matrix))
def test5():
a = np.sqrt(3) * aCC
dlattice = Crystal2DLattice(a=a, b=a, gamma=60)
rlattice = \
Reciprocal2DLattice(cell_matrix=dlattice.reciprocal_lattice.matrix)
assert_equal(dlattice, rlattice.reciprocal_lattice)
assert_equal(dlattice.reciprocal_lattice, rlattice)
def test6():
a = np.sqrt(3) * aCC
l1 = Crystal2DLattice.square(a)
l2 = Crystal2DLattice.square(2 * a)
assert_true(l1 < l2)
assert_true(np.allclose(2 * l1.a, l2.a))
def test7():
latt = Crystal3DLattice(a=4.0, b=8.0, c=2.0, alpha=90,
beta=90, gamma=120)
assert_true(np.allclose(latt.a, 4.0))
assert_true(np.allclose(latt.b, 8.0))
assert_true(np.allclose(latt.c, 2.0))
assert_true(np.allclose(latt.alpha, 90.))
assert_true(np.allclose(latt.beta, 90.))
assert_true(np.allclose(latt.gamma, 120.))
def test8():
a = np.sqrt(3) * aCC
latt = Crystal3DLattice(a=a, b=a, c=2 * r_CC_vdw,
alpha=90, beta=90, gamma=120)
print(latt)
a1 = latt.a1
a2 = latt.a2
a3 = latt.a3
xfrm = transformation_matrix(angle=-np.pi / 6)
rotangle = -np.pi / 6
for v in (a1, a2, a3):
v.rotate(angle=rotangle)
latt.rotate(angle=rotangle, axis='z')
print(latt)
assert_equal(latt.a1, a1)
assert_equal(latt.a2, a2)
assert_equal(latt.a3, a3)
assert_true(np.allclose(latt.orientation_matrix, xfrm))
def test9():
a = np.sqrt(3) * aCC
latt = Crystal3DLattice(a=a, b=a, c=2 * r_CC_vdw,
alpha=90, beta=90, gamma=120)
print(latt)
hex_latt = \
Crystal3DLattice.hexagonal(a, 2 * r_CC_vdw)
print(hex_latt)
assert_equal(latt, hex_latt)
def test10():
a = np.sqrt(3) * aCC
dlattice = Crystal3DLattice(a=a, b=a, c=2 * r_CC_vdw,
alpha=90, beta=90, gamma=120)
rlattice = \
Reciprocal3DLattice(cell_matrix=dlattice.reciprocal_lattice.matrix)
# print('\n{}'.format(np.linalg.inv(dlattice.matrix)))
# print(np.linalg.inv(dlattice.matrix) * dlattice.matrix)
# print(np.linalg.inv(dlattice.matrix).T)
# print(dlattice.reciprocal_lattice.matrix)
# print(dlattice.reciprocal_lattice.b1)
# print(dlattice.reciprocal_lattice.b2)
# print(dlattice.reciprocal_lattice.b3)
# print(rlattice.matrix)
assert_equal(dlattice, rlattice.reciprocal_lattice)
assert_equal(dlattice.reciprocal_lattice, rlattice)
assert_equal(dlattice.reciprocal_lattice.b1, rlattice.b1)
assert_equal(dlattice.reciprocal_lattice.b2, rlattice.b2)
assert_equal(dlattice.reciprocal_lattice.b3, rlattice.b3)
assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
rlattice.matrix))
assert_true(np.allclose(dlattice.reciprocal_lattice.metric_tensor,
rlattice.metric_tensor))
assert_true(np.allclose(dlattice.ortho_matrix,
rlattice.reciprocal_lattice.ortho_matrix))
assert_true(np.allclose(dlattice.reciprocal_lattice.ortho_matrix,
rlattice.ortho_matrix))
assert_true(np.allclose(dlattice.matrix * rlattice.matrix.T, np.eye(3)))
assert_true(np.allclose(dlattice.metric_tensor * rlattice.metric_tensor,
np.eye(3)))
pmg_dlattice = pmg.Lattice(dlattice.matrix)
print('\npmg_dlattice.matrix:\n{}'.format(
rezero_array(pmg_dlattice.matrix)))
print('\ndlattice.matrix:\n{}'.format(rezero_array(dlattice.matrix)))
print('\npmg_dlattice.reciprocal_lattice.matrix:\n{}'.format(
rezero_array(pmg_dlattice.reciprocal_lattice.matrix)))
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
rezero_array(dlattice.reciprocal_lattice.matrix)))
assert_true(np.allclose(dlattice.a, pmg_dlattice.a))
assert_true(np.allclose(dlattice.b, pmg_dlattice.b))
assert_true(np.allclose(dlattice.c, pmg_dlattice.c))
assert_true(np.allclose(np.asarray(dlattice.a1), pmg_dlattice.matrix[0]))
assert_true(np.allclose(np.asarray(dlattice.a2), pmg_dlattice.matrix[1]))
assert_true(np.allclose(np.asarray(dlattice.a3), pmg_dlattice.matrix[2]))
assert_true(np.allclose(dlattice.matrix, pmg_dlattice.matrix))
pmg_dlattice = pmg.Lattice.from_parameters(dlattice.a,
dlattice.b,
dlattice.c,
dlattice.alpha,
dlattice.beta,
dlattice.gamma)
print('\npmg_dlattice.matrix:\n{}'.format(
rezero_array(pmg_dlattice.matrix)))
print('\ndlattice.matrix:\n{}'.format(rezero_array(dlattice.matrix)))
print('\npmg_dlattice.reciprocal_lattice.matrix:\n{}'.format(
rezero_array(pmg_dlattice.reciprocal_lattice.matrix)))
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
rezero_array(dlattice.reciprocal_lattice.matrix)))
assert_true(np.allclose(dlattice.a, pmg_dlattice.a))
assert_true(np.allclose(dlattice.b, pmg_dlattice.b))
assert_true(np.allclose(dlattice.c, pmg_dlattice.c))
assert_true(np.allclose(np.asarray(dlattice.a1), pmg_dlattice.matrix[0]))
assert_true(np.allclose(np.asarray(dlattice.a2), pmg_dlattice.matrix[1]))
assert_true(np.allclose(np.asarray(dlattice.a3), pmg_dlattice.matrix[2]))
assert_true(np.allclose(dlattice.matrix, pmg_dlattice.matrix))
assert_true(np.allclose(dlattice.ortho_matrix.T,
pmg.Lattice(dlattice.ortho_matrix.T).matrix))
assert_true(np.allclose(pmg.Lattice(dlattice.matrix).metric_tensor,
dlattice.metric_tensor))
assert_true(np.allclose(pmg.Lattice(dlattice.ortho_matrix.T).metric_tensor,
dlattice.metric_tensor))
assert_true(np.allclose(pmg.Lattice(dlattice.matrix).matrix,
dlattice.matrix))
assert_true(np.allclose(pmg.Lattice(dlattice.ortho_matrix.T).matrix,
dlattice.matrix))
assert_true(np.allclose(pmg.Lattice(dlattice.matrix).matrix,
rlattice.reciprocal_lattice.matrix))
pmg_rlattice = \
pmg.Lattice.from_parameters(dlattice.reciprocal_lattice.a_star,
dlattice.reciprocal_lattice.b_star,
dlattice.reciprocal_lattice.c_star,
dlattice.reciprocal_lattice.alpha_star,
dlattice.reciprocal_lattice.beta_star,
dlattice.reciprocal_lattice.gamma_star)
print('\npmg_rlattice:\n{}'.format(rezero_array(pmg_rlattice.matrix)))
print('\nrlattice:\n{}'.format(rezero_array(rlattice.matrix)))
assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
rlattice.matrix))
# assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
# pmg_rlattice.matrix))
# assert_true(np.allclose(rlattice.matrix,
# pmg_rlattice.matrix))
def test11():
a = np.sqrt(3) * aCC
dlattice = Crystal3DLattice(a=a, b=a, c=2 * r_CC_vdw,
alpha=90, beta=90, gamma=120)
orientation_matrix = rotation_matrix(angle=np.pi / 6, axis=zhat)
rlattice = \
Reciprocal3DLattice(a_star=dlattice.reciprocal_lattice.a_star,
b_star=dlattice.reciprocal_lattice.b_star,
c_star=dlattice.reciprocal_lattice.c_star,
alpha_star=dlattice.reciprocal_lattice.alpha_star,
beta_star=dlattice.reciprocal_lattice.beta_star,
gamma_star=dlattice.reciprocal_lattice.gamma_star,
orientation_matrix=orientation_matrix)
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
dlattice.reciprocal_lattice.matrix))
print('\nrlattice.matrix:\n{}'.format(rlattice.matrix))
print('\ndlattice.matrix:\n{}'.format(dlattice.matrix))
print('\nrlattice.reciprocal_lattice.matrix:\n{}'.format(
rlattice.reciprocal_lattice.matrix))
# assert_equal(dlattice.reciprocal_lattice, rlattice)
assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
rlattice.matrix))
assert_true(np.allclose(dlattice.matrix,
rlattice.reciprocal_lattice.matrix))
# print('\n{}'.format(np.linalg.inv(dlattice.matrix)))
# print(np.linalg.inv(dlattice.matrix) * dlattice.matrix)
# print(np.linalg.inv(dlattice.matrix).T)
# print(dlattice.reciprocal_lattice.matrix)
# print(dlattice.reciprocal_lattice.b1)
# print(dlattice.reciprocal_lattice.b2)
# print(dlattice.reciprocal_lattice.b3)
# print(rlattice.matrix)
# print(rlattice.b1)
# print(rlattice.b2)
# print(rlattice.b3)
assert_equal(dlattice.reciprocal_lattice, rlattice)
assert_equal(dlattice.reciprocal_lattice.b1, rlattice.b1)
assert_equal(dlattice.reciprocal_lattice.b2, rlattice.b2)
assert_equal(dlattice.reciprocal_lattice.b3, rlattice.b3)
assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
rlattice.matrix))
assert_true(np.allclose(dlattice.reciprocal_lattice.metric_tensor,
rlattice.metric_tensor))
pmg_dlattice = pmg.Lattice(dlattice.matrix)
print('\npmg_dlattice.matrix:\n{}'.format(pmg_dlattice.matrix))
print('\ndlattice.matrix:\n{}'.format(dlattice.matrix))
print('\npmg_dlattice.reciprocal_lattice.matrix:\n{}'.format(
pmg_dlattice.reciprocal_lattice.matrix))
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
dlattice.reciprocal_lattice.matrix))
assert_true(np.allclose(dlattice.a, pmg_dlattice.a))
assert_true(np.allclose(dlattice.b, pmg_dlattice.b))
assert_true(np.allclose(dlattice.c, pmg_dlattice.c))
assert_true(np.allclose(np.asarray(dlattice.a1), pmg_dlattice.matrix[0]))
assert_true(np.allclose(np.asarray(dlattice.a2), pmg_dlattice.matrix[1]))
assert_true(np.allclose(np.asarray(dlattice.a3), pmg_dlattice.matrix[2]))
assert_true(np.allclose(dlattice.matrix, pmg_dlattice.matrix))
assert_true(np.allclose(dlattice.ortho_matrix.T,
pmg.Lattice(dlattice.ortho_matrix.T).matrix))
assert_true(np.allclose(pmg.Lattice(dlattice.matrix).metric_tensor,
dlattice.metric_tensor))
assert_true(np.allclose(pmg.Lattice(dlattice.ortho_matrix.T).metric_tensor,
dlattice.metric_tensor))
assert_true(np.allclose(pmg.Lattice(dlattice.matrix).matrix,
dlattice.matrix))
assert_true(np.allclose(pmg.Lattice(dlattice.ortho_matrix.T).matrix,
dlattice.matrix))
# print('latt_from_inv_latt_matrix_transpose:\n{}\n'.format(
# latt_from_inv_latt_matrix_transpose.matrix))
# print('rlattice.reciprocal_lattice.matrix:\n{}'.format(
# rlattice.reciprocal_lattice.matrix))
assert_true(np.allclose(pmg.Lattice(dlattice.matrix).matrix,
rlattice.reciprocal_lattice.matrix))
# print('rlattice.reciprocal_lattice.matrix:\n{}'.format(
# rlattice.reciprocal_lattice.matrix))
# print('pmg.Lattice(np.linalg.inv(dlattice.matrix).T).'
# 'reciprocal_lattice_crystallographic.matrix:\n{}'
# .format(pmg.Lattice(np.linalg.inv(dlattice.matrix).T)
# .reciprocal_lattice_crystallographic.matrix))
# print('pmg.Lattice.from_parameters(...):\n{}\n'.format(
# rezero_array(pmg.Lattice.from_parameters(
# dlattice.reciprocal_lattice.a_star,
# dlattice.reciprocal_lattice.b_star,
# dlattice.reciprocal_lattice.c_star,
# dlattice.reciprocal_lattice.alpha_star,
# dlattice.reciprocal_lattice.beta_star,
# dlattice.reciprocal_lattice.gamma_star)
# .reciprocal_lattice_crystallographic.matrix)))
# print('dlattice.ortho_matrix:\n{}'.format(dlattice.ortho_matrix))
# print('np.linalg.inv(dlattice.matrix).T:\n{}'.format(np.linalg.inv(
# dlattice.matrix).T))
# print('rlattice.reciprocal_lattice.ortho_matrix:\n{}'
# .format(rlattice.reciprocal_lattice.ortho_matrix))
# print('rlattice.reciprocal_lattice.ortho_matrix:\n{}\n'.
# format(rlattice.reciprocal_lattice.ortho_matrix))
# print('dlattice.reciprocal_lattice.ortho_matrix:\n{}'.format(
# dlattice.reciprocal_lattice.ortho_matrix))
# print('rlattice.ortho_matrix:\n{}'.format(
# rlattice.ortho_matrix))
# print('rlattice.ortho_matrix:\n{}\n'.format(
# rlattice.ortho_matrix))
assert_true(np.allclose(dlattice.ortho_matrix,
rlattice.reciprocal_lattice.
ortho_matrix))
assert_true(np.allclose(dlattice.reciprocal_lattice.ortho_matrix,
rlattice.ortho_matrix))
assert_equal(dlattice, rlattice.reciprocal_lattice)
assert_equal(dlattice.reciprocal_lattice, rlattice)
print(dlattice.matrix * rlattice.matrix.T)
assert_true(np.allclose(dlattice.matrix * rlattice.matrix.T,
np.eye(3)))
def test12():
a = np.sqrt(3) * aCC
cubic_latt = Crystal3DLattice(a=a, b=a, c=a, alpha=90, beta=90, gamma=90)
assert_equal(cubic_latt, Crystal3DLattice.cubic(a))
def test13():
latt = Crystal3DLattice(a=4.0, b=4.0, c=4.0,
alpha=90, beta=90, gamma=90)
print(latt)
p = [2.1, 0.9, 0.5]
assert_true(np.allclose(latt.wrap_fractional_coordinate(p),
Point((0.1, 0.9, 0.5))))
def test14():
latt = Crystal3DLattice(a=4.0, b=4.0, c=4.0,
alpha=90, beta=90, gamma=90)
print(latt)
a = latt.a1
b = latt.a2
c = latt.a3
G = np.matrix([[a.dot(a), a.dot(b), a.dot(c)],
[b.dot(a), b.dot(b), b.dot(c)],
[c.dot(a), c.dot(b), c.dot(c)]])
assert_true(np.allclose(latt.metric_tensor, G))
def test15():
latt = Crystal3DLattice(a=4.0, b=4.0, c=4.0,
alpha=90, beta=90, gamma=90)
print(latt)
recip_latt = Crystal3DLattice(a1=latt.b1, a2=latt.b2, a3=latt.b3)
print(recip_latt)
assert_equal(latt.a1, recip_latt.b1)
assert_equal(latt.a2, recip_latt.b2)
assert_equal(latt.a3, recip_latt.b3)
def test16():
a = np.sqrt(3) * aCC
assert_true(Crystal3DLattice.cubic(a) < Crystal3DLattice.cubic(2 * a))
def test17():
dlattice = Crystal3DLattice.hexagonal(np.sqrt(3) * aCC, r_CC_vdw)
rlattice = dlattice.reciprocal_lattice
assert_true(np.allclose(dlattice.volume ** 2,
np.linalg.det(dlattice.metric_tensor)))
print(dlattice.volume ** 2)
print(np.linalg.det(dlattice.metric_tensor))
print(rlattice.volume ** 2)
print(np.linalg.det(rlattice.metric_tensor))
assert_true(np.allclose(rlattice.volume ** 2,
np.linalg.det(rlattice.metric_tensor)))
print(rlattice.metric_tensor)
print(rlattice.metric_tensor.T)
assert_true(np.allclose(dlattice.volume * rlattice.volume, 1.0))
assert_true(np.allclose(rlattice.metric_tensor * dlattice.metric_tensor.T,
np.asmatrix(np.eye(3))))
def test18():
a = np.sqrt(3) * aCC
dlattice = Crystal3DLattice(a=a, b=a, c=2 * r_CC_vdw,
alpha=90, beta=90, gamma=120)
dlattice_from_matrix = Crystal3DLattice(cell_matrix=dlattice.matrix)
assert_equal(dlattice, dlattice_from_matrix)
assert_true(np.allclose(dlattice.matrix, dlattice_from_matrix.matrix))
def test19():
a = np.sqrt(3) * aCC
dlattice = Crystal3DLattice(a=a, b=a, c=2 * r_CC_vdw,
alpha=90, beta=90, gamma=120)
print('\ndlattice.matrix:\n{}'.format(dlattice.matrix))
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
dlattice.reciprocal_lattice.matrix))
a1 = dlattice.a1
a2 = dlattice.a2
a3 = dlattice.a3
V = dlattice.volume
b1 = a2.cross(a3) / V
b2 = a3.cross(a1) / V
b3 = a1.cross(a2) / V
a_star = b1.length
b_star = b2.length
c_star = b3.length
alpha_star = np.degrees(b2.angle(b3))
beta_star = np.degrees(b3.angle(b1))
gamma_star = np.degrees(b1.angle(b2))
assert_true(np.allclose(a_star, dlattice.reciprocal_lattice.a_star))
assert_true(np.allclose(b_star, dlattice.reciprocal_lattice.b_star))
assert_true(np.allclose(c_star, dlattice.reciprocal_lattice.c_star))
assert_true(np.allclose(alpha_star,
dlattice.reciprocal_lattice.alpha_star))
assert_true(np.allclose(beta_star,
dlattice.reciprocal_lattice.beta_star))
assert_true(np.allclose(gamma_star,
dlattice.reciprocal_lattice.gamma_star))
rlattice = Reciprocal3DLattice(a_star=a_star, b_star=b_star, c_star=c_star,
alpha_star=alpha_star, beta_star=beta_star,
gamma_star=gamma_star)
print('\nrlattice.matrix:\n{}'.format(rlattice.matrix))
print('\nrlattice.reciprocal_lattice.matrix:\n{}'.format(
rlattice.reciprocal_lattice.matrix))
rlattice_from_rlattice_matrix = \
Reciprocal3DLattice(cell_matrix=rlattice.matrix)
print('\nrlattice_from_rlattice_matrix.matrix:\n{}'.format(
rlattice_from_rlattice_matrix.matrix))
print('\nrlattice_from_rlattice_matrix.reciprocal_lattice.matrix:\n{}'.
format(rlattice_from_rlattice_matrix.reciprocal_lattice.matrix))
assert_equal(rlattice, rlattice_from_rlattice_matrix)
assert_true(np.allclose(rlattice.matrix,
rlattice_from_rlattice_matrix.matrix))
rlattice_from_dlattice_rlattice_matrix = \
Reciprocal3DLattice(cell_matrix=dlattice.reciprocal_lattice.matrix)
print('\nrlattice_from_dlattice_rlattice_matrix.matrix:\n{}'.format(
rlattice_from_dlattice_rlattice_matrix.matrix))
print('\nrlattice_from_dlattice_rlattice_matrix.reciprocal_lattice.'
'matrix:\n{}'.format(rlattice_from_dlattice_rlattice_matrix.
reciprocal_lattice.matrix))
def test20():
a = np.sqrt(3) * aCC
orientation_matrix = rotation_matrix(angle=-np.pi / 6, axis=zhat)
dlattice = Crystal3DLattice(a=a, b=a, c=2 * r_CC_vdw,
alpha=90, beta=90, gamma=120,
orientation_matrix=orientation_matrix)
rlattice = \
Reciprocal3DLattice(a_star=dlattice.reciprocal_lattice.a_star,
b_star=dlattice.reciprocal_lattice.b_star,
c_star=dlattice.reciprocal_lattice.c_star,
alpha_star=dlattice.reciprocal_lattice.alpha_star,
beta_star=dlattice.reciprocal_lattice.beta_star,
gamma_star=dlattice.reciprocal_lattice.gamma_star)
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
dlattice.reciprocal_lattice.matrix))
print('\nrlattice.matrix:\n{}'.format(rlattice.matrix))
print('\ndlattice.matrix:\n{}'.format(dlattice.matrix))
print('\nrlattice.reciprocal_lattice.matrix:\n{}'.format(
rlattice.reciprocal_lattice.matrix))
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
dlattice.reciprocal_lattice.matrix))
pmg_rlattice = \
pmg.Lattice.from_parameters(dlattice.reciprocal_lattice.a_star,
dlattice.reciprocal_lattice.b_star,
dlattice.reciprocal_lattice.c_star,
dlattice.reciprocal_lattice.alpha_star,
dlattice.reciprocal_lattice.beta_star,
dlattice.reciprocal_lattice.gamma_star)
print('\npmg_rlattice.matrix:\n{}'.format(pmg_rlattice.matrix))
print('\nrlattice.matrix:\n{}'.format(rlattice.matrix))
print('\npmg_rlattice.reciprocal_lattice.matrix:\n{}'.format(
pmg_rlattice.reciprocal_lattice.matrix))
print('\nrlattice.reciprocal_lattice.matrix:\n{}'.format(
rlattice.reciprocal_lattice.matrix))
assert_true(np.allclose(rlattice.matrix,
pmg_rlattice.matrix))
assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
pmg_rlattice.matrix))
assert_true(np.allclose(rlattice.matrix,
pmg_rlattice.matrix))
if __name__ == '__main__':
nose.runmodule()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'SyncShard'
db.delete_table('webinars_syncshard')
# Deleting model 'SyncJob'
db.delete_table('webinars_syncjob')
# Deleting model 'SyncStage'
db.delete_table('webinars_syncstage')
def backwards(self, orm):
# Adding model 'SyncShard'
db.create_table('webinars_syncshard', (
('completed_at', self.gf('sanetime.dj.SaneTimeField')(null=True)),
('depth', self.gf('django.db.models.fields.IntegerField')()),
('section', self.gf('django.db.models.fields.IntegerField')()),
('created_at', self.gf('sanetime.dj.SaneTimeField')(blank=True)),
('started_at', self.gf('sanetime.dj.SaneTimeField')(null=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sync_job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.SyncJob'])),
))
db.send_create_signal('webinars', ['SyncShard'])
# Adding model 'SyncJob'
db.create_table('webinars_syncjob', (
('started_at', self.gf('sanetime.dj.SaneTimeField')(blank=True)),
('completed_at', self.gf('sanetime.dj.SaneTimeField')(null=True)),
('account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.Account'], null=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.SyncJob'], null=True)),
('sharded_at', self.gf('sanetime.dj.SaneTimeField')(null=True)),
('auto', self.gf('django.db.models.fields.BooleanField')(default=True)),
('staged_at', self.gf('sanetime.dj.SaneTimeField')(null=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.Event'], null=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('webinars', ['SyncJob'])
# Adding model 'SyncStage'
db.create_table('webinars_syncstage', (
('completed_at', self.gf('sanetime.dj.SaneTimeField')(null=True)),
('offset', self.gf('django.db.models.fields.IntegerField')()),
('cms_form', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.CmsForm'], null=True)),
('started_at', self.gf('sanetime.dj.SaneTimeField')(null=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('max_size', self.gf('django.db.models.fields.IntegerField')()),
('size', self.gf('django.db.models.fields.IntegerField')(null=True)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=16)),
('created_at', self.gf('sanetime.dj.SaneTimeField')(blank=True)),
('subkind', self.gf('django.db.models.fields.CharField')(max_length=16, null=True)),
('sync_job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webinars.SyncJob'])),
))
db.send_create_signal('webinars', ['SyncStage'])
models = {
'webinars.account': {
'Meta': {'object_name': 'Account'},
'account_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountType']"}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.AccountSync']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.AccountSync']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'prevent_unformed_lead_import': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.accountsync': {
'Meta': {'object_name': 'AccountSync'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.HubSync']", 'null': 'True'}),
'sharded_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'staged_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.accountsyncshard': {
'Meta': {'object_name': 'AccountSyncShard'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']"}),
'section': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.accountsyncstage': {
'Meta': {'object_name': 'AccountSyncStage'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.accounttype': {
'Meta': {'object_name': 'AccountType'},
'can_api_create_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_load_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_register_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_report_views': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'extra_username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'listing_priority': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.cmsform': {
'Meta': {'object_name': 'CmsForm'},
'guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'is_sync_target': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.event': {
'Meta': {'object_name': 'Event'},
'_attended_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_noshow_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'attended_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'cms_forms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['webinars.CmsForm']", 'through': "orm['webinars.EventForm']", 'symmetrical': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.EventSync']"}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.EventSync']"}),
'mothballed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'noshow_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'sync_leads_for_all_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'unknowable_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.eventform': {
'Meta': {'object_name': 'EventForm'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'converted_at_cutoff': ('sanetime.dj.SaneTimeField', [], {'default': '0'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_last_modified_at': ('sanetime.dj.SaneTimeField', [], {'default': '0'})
},
'webinars.eventsync': {
'Meta': {'object_name': 'EventSync'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']", 'null': 'True'}),
'sharded_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'staged_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.eventsyncshard': {
'Meta': {'object_name': 'EventSyncShard'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'section': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hub': {
'Meta': {'object_name': 'Hub'},
'_attended_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.HubSync']"}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.HubSync']"}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'uninstalled_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.hubspoteventsyncstage': {
'Meta': {'object_name': 'HubSpotEventSyncStage'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'event_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventForm']"}),
'finish_last_modified_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_last_modified_at': ('sanetime.dj.SaneTimeField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hubspotregistrantsnapshot': {
'Meta': {'object_name': 'HubSpotRegistrantSnapshot'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hubsync': {
'Meta': {'object_name': 'HubSync'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.landingpage': {
'Meta': {'object_name': 'LandingPage'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'form_title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'webinars.registrant': {
'Meta': {'object_name': 'Registrant'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']", 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.stagedhubspotregistrant': {
'Meta': {'object_name': 'StagedHubSpotRegistrant'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'converted_at': ('sanetime.dj.SaneTimeField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.stagedwebexevent': {
'Meta': {'object_name': 'StagedWebexEvent'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.stagedwebexregistrant': {
'Meta': {'object_name': 'StagedWebexRegistrant'},
'attendee_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexeventsnapshot': {
'Meta': {'object_name': 'WebexEventSnapshot'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.webexeventsyncstage': {
'Meta': {'object_name': 'WebexEventSyncStage'},
'attendants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexregistrantsnapshot': {
'Meta': {'object_name': 'WebexRegistrantSnapshot'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
}
}
complete_apps = ['webinars']
|
|
import json
from datetime import datetime
from html2text import html2text
from dojo.models import Finding
class MobSFParser(object):
def get_scan_types(self):
return ["MobSF Scan"]
def get_label_for_scan_types(self, scan_type):
return "MobSF Scan"
def get_description_for_scan_types(self, scan_type):
return "Export a JSON file using the API, api/v1/report_json."
def get_findings(self, filename, test):
tree = filename.read()
try:
data = json.loads(str(tree, 'utf-8'))
except:
data = json.loads(tree)
find_date = datetime.now()
dupes = {}
test_description = ""
if "name" in data:
test_description = "**Info:**\n"
if "packagename" in data:
test_description = "%s **Package Name:** %s\n" % (test_description, data["packagename"])
if "mainactivity" in data:
test_description = "%s **Main Activity:** %s\n" % (test_description, data["mainactivity"])
if "pltfm" in data:
test_description = "%s **Platform:** %s\n" % (test_description, data["pltfm"])
if "sdk" in data:
test_description = "%s **SDK:** %s\n" % (test_description, data["sdk"])
if "min" in data:
test_description = "%s **Min SDK:** %s\n" % (test_description, data["min"])
if "targetsdk" in data:
test_description = "%s **Target SDK:** %s\n" % (test_description, data["targetsdk"])
if "minsdk" in data:
test_description = "%s **Min SDK:** %s\n" % (test_description, data["minsdk"])
if "maxsdk" in data:
test_description = "%s **Max SDK:** %s\n" % (test_description, data["maxsdk"])
test_description = "%s\n**File Information:**\n" % (test_description)
if "name" in data:
test_description = "%s **Name:** %s\n" % (test_description, data["name"])
if "md5" in data:
test_description = "%s **MD5:** %s\n" % (test_description, data["md5"])
if "sha1" in data:
test_description = "%s **SHA-1:** %s\n" % (test_description, data["sha1"])
if "sha256" in data:
test_description = "%s **SHA-256:** %s\n" % (test_description, data["sha256"])
if "size" in data:
test_description = "%s **Size:** %s\n" % (test_description, data["size"])
if "urls" in data:
curl = ""
for url in data["urls"]:
for curl in url["urls"]:
curl = "%s\n" % (curl)
if curl:
test_description = "%s\n**URL's:**\n %s\n" % (test_description, curl)
if "bin_anal" in data:
test_description = "%s \n**Binary Analysis:** %s\n" % (test_description, data["bin_anal"])
test.description = html2text(test_description)
mobsf_findings = []
# Mobile Permissions
if "permissions" in data:
# for permission, details in data["permissions"].items():
if type(data["permissions"]) is list:
for details in data["permissions"]:
mobsf_item = {
"category": "Mobile Permissions",
"title": details.get("name", ""),
"severity": self.getSeverityForPermission(details.get("status")),
"description": "**Permission Type:** " + details.get("name", "") + " (" + details.get("status", "") + ")\n\n**Description:** " + details.get("description", "") + "\n\n**Reason:** " + details.get("reason", ""),
"file_path": None
}
mobsf_findings.append(mobsf_item)
else:
for permission, details in list(data["permissions"].items()):
mobsf_item = {
"category": "Mobile Permissions",
"title": permission,
"severity": self.getSeverityForPermission(details.get("status", "")),
"description": "**Permission Type:** " + permission + "\n\n**Description:** " + details.get("description", ""),
"file_path": None
}
mobsf_findings.append(mobsf_item)
# Insecure Connections
if "insecure_connections" in data:
for details in data["insecure_connections"]:
insecure_urls = ""
for url in details.split(','):
insecure_urls = insecure_urls + url + "\n"
mobsf_item = {
"category": None,
"title": "Insecure Connections",
"severity": "Low",
"description": insecure_urls,
"file_path": None
}
mobsf_findings.append(mobsf_item)
# Binary Analysis
if "binary_analysis" in data:
if type(data["binary_analysis"]) is list:
for details in data["binary_analysis"]:
for binary_analysis_type in details:
if "name" != binary_analysis_type:
mobsf_item = {
"category": "Binary Analysis",
"title": details[binary_analysis_type]["description"].split(".")[0],
"severity": details[binary_analysis_type]["severity"].replace("warning", "low").title(),
"description": details[binary_analysis_type]["description"],
"file_path": details["name"]
}
mobsf_findings.append(mobsf_item)
else:
for binary_analysis_type, details in list(data["binary_analysis"].items()):
# "Binary makes use of insecure API(s)":{
# "detailed_desc":"The binary may contain the following insecure API(s) _vsprintf.",
# "severity":"high",
# "cvss":6,
# "cwe":"CWE-676 - Use of Potentially Dangerous Function",
# "owasp-mobile":"M7: Client Code Quality",
# "masvs":"MSTG-CODE-8"
# }
mobsf_item = {
"category": "Binary Analysis",
"title": details["detailed_desc"],
"severity": details["severity"].replace("good", "info").title(),
"description": details["detailed_desc"],
"file_path": None
}
mobsf_findings.append(mobsf_item)
# specific node for Android reports
if "android_api" in data:
# "android_insecure_random": {
# "files": {
# "u/c/a/b/a/c.java": "9",
# "kotlinx/coroutines/repackaged/net/bytebuddy/utility/RandomString.java": "3",
# ...
# "hu/mycompany/vbnmqweq/gateway/msg/Response.java": "13"
# },
# "metadata": {
# "id": "android_insecure_random",
# "description": "The App uses an insecure Random Number Generator.",
# "type": "Regex",
# "pattern": "java\\.util\\.Random;",
# "severity": "high",
# "input_case": "exact",
# "cvss": 7.5,
# "cwe": "CWE-330 Use of Insufficiently Random Values",
# "owasp-mobile": "M5: Insufficient Cryptography",
# "masvs": "MSTG-CRYPTO-6"
# }
# },
for api, details in list(data["android_api"].items()):
mobsf_item = {
"category": "Android API",
"title": details["metadata"]["description"],
"severity": details["metadata"]["severity"].replace("warning", "low").title(),
"description": "**API:** " + api + "\n\n**Description:** " + details["metadata"]["description"],
"file_path": None
}
mobsf_findings.append(mobsf_item)
# Manifest
if "manifest" in data:
for details in data["manifest"]:
mobsf_item = {
"category": "Manifest",
"title": details["title"],
"severity": details["stat"],
"description": details["desc"],
"file_path": None
}
mobsf_findings.append(mobsf_item)
# MobSF Findings
if "findings" in data:
for title, finding in list(data["findings"].items()):
description = title
file_path = None
if "path" in finding:
description = description + "\n\n**Files:**\n"
for path in finding["path"]:
if file_path is None:
file_path = path
description = description + " * " + path + "\n"
mobsf_item = {
"category": "Findings",
"title": title,
"severity": finding["level"],
"description": description,
"file_path": file_path
}
mobsf_findings.append(mobsf_item)
for mobsf_finding in mobsf_findings:
title = mobsf_finding["title"]
sev = self.getCriticalityRating(mobsf_finding["severity"])
description = ""
file_path = None
if mobsf_finding["category"]:
description += "**Category:** " + mobsf_finding["category"] + "\n\n"
description = description + html2text(mobsf_finding["description"])
finding = Finding(
title=title,
cwe=919, # Weaknesses in Mobile Applications
test=test,
description=description,
severity=sev,
references=None,
date=find_date,
static_finding=True,
dynamic_finding=False,
nb_occurences=1,
)
if mobsf_finding["file_path"]:
finding.file_path = mobsf_finding["file_path"]
dupe_key = sev + title
if dupe_key in dupes:
find = dupes[dupe_key]
if description is not None:
find.description += description
find.nb_occurences += 1
else:
dupes[dupe_key] = finding
return list(dupes.values())
def getSeverityForPermission(self, status):
"""Convert status for permission detection to severity
In MobSF there is only 4 know values for permission,
we map them as this:
dangerous => High (Critical?)
normal => Info
signature => Info (it's positive so... Info)
signatureOrSystem => Info (it's positive so... Info)
"""
if "dangerous" == status:
return "High"
else:
return "Info"
# Criticality rating
def getCriticalityRating(self, rating):
criticality = "Info"
if rating == "warning":
criticality = "Info"
else:
criticality = rating.capitalize()
return criticality
def suite_data(self, suites):
suite_info = ""
suite_info += suites["name"] + "\n"
suite_info += "Cipher Strength: " + str(suites["cipherStrength"]) + "\n"
if "ecdhBits" in suites:
suite_info += "ecdhBits: " + str(suites["ecdhBits"]) + "\n"
if "ecdhStrength" in suites:
suite_info += "ecdhStrength: " + str(suites["ecdhStrength"])
suite_info += "\n\n"
return suite_info
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import HTMLParser
import smtplib
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html
from frappe.utils.verified_command import get_signed_params, verify_request
from html2text import html2text
from frappe.utils import get_url, nowdate, encode, now_datetime, add_days, split_emails
class BulkLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, subject=None, message=None, reference_doctype=None,
reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, reply_to=None, cc=(), message_id=None, send_after=None,
expose_recipients=False):
"""Add email to sending queue (Bulk Email)
:param recipients: List of recipients.
:param sender: Email sender.
:param subject: Email subject.
:param message: Email message.
:param reference_doctype: Reference DocType of caller document.
:param reference_name: Reference name of caller document.
:param unsubscribe_method: URL method for unsubscribe. Default is `/api/method/frappe.email.bulk.unsubscribe`.
:param unsubscribe_params: additional params for unsubscribed links. default are name, doctype, email
:param attachments: Attachments to be sent.
:param reply_to: Reply to be captured here (default inbox)
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param send_after: Send this email after the given datetime. If value is in integer, then `send_after` will be the automatically set to no of days from current date.
"""
if not unsubscribe_method:
unsubscribe_method = "/api/method/frappe.email.bulk.unsubscribe"
if not recipients:
return
if isinstance(recipients, basestring):
recipients = split_emails(recipients)
if isinstance(send_after, int):
send_after = add_days(nowdate(), send_after)
if not sender or sender == "Administrator":
email_account = get_outgoing_email_account()
sender = email_account.get("sender") or email_account.email_id
check_bulk_limit(recipients)
formatted = get_formatted_html(subject, message)
try:
text_content = html2text(formatted)
except HTMLParser.HTMLParseError:
text_content = "See html attachment"
if reference_doctype and reference_name:
unsubscribed = [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"reference_doctype": reference_doctype, "reference_name": reference_name})]
unsubscribed += [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"global_unsubscribe": 1})]
else:
unsubscribed = []
recipients = [r for r in list(set(recipients)) if r and r not in unsubscribed]
for email in recipients:
email_content = formatted
email_text_context = text_content
if reference_doctype:
unsubscribe_link = get_unsubscribe_link(
reference_doctype=reference_doctype,
reference_name=reference_name,
email=email,
recipients=recipients,
expose_recipients=expose_recipients,
unsubscribe_method=unsubscribe_method,
unsubscribe_params=unsubscribe_params,
unsubscribe_message=unsubscribe_message
)
email_content = email_content.replace("<!--unsubscribe link here-->", unsubscribe_link.html)
email_text_context += unsubscribe_link.text
# add to queue
add(email, sender, subject, email_content, email_text_context, reference_doctype,
reference_name, attachments, reply_to, cc, message_id, send_after)
def add(email, sender, subject, formatted, text_content=None,
reference_doctype=None, reference_name=None, attachments=None, reply_to=None,
cc=(), message_id=None, send_after=None):
"""add to bulk mail queue"""
e = frappe.new_doc('Bulk Email')
e.sender = sender
e.recipient = email
try:
mail = get_email(email, sender=e.sender, formatted=formatted, subject=subject,
text_content=text_content, attachments=attachments, reply_to=reply_to, cc=cc)
if message_id:
mail.set_message_id(message_id)
e.message = mail.as_string()
except frappe.InvalidEmailAddressError:
# bad email id - don't add to queue
return
e.reference_doctype = reference_doctype
e.reference_name = reference_name
e.send_after = send_after
e.insert(ignore_permissions=True)
def check_bulk_limit(recipients):
# get count of mails sent this month
this_month = frappe.db.sql("""select count(*) from `tabBulk Email` where
status='Sent' and MONTH(creation)=MONTH(CURDATE())""")[0][0]
# if using settings from site_config.json, check bulk limit
# No limit for own email settings
smtp_server = SMTPServer()
if (smtp_server.email_account
and getattr(smtp_server.email_account, "from_site_config", False)
or frappe.flags.in_test):
monthly_bulk_mail_limit = frappe.conf.get('monthly_bulk_mail_limit') or 500
if (this_month + len(recipients)) > monthly_bulk_mail_limit:
throw(_("Email limit {0} crossed").format(monthly_bulk_mail_limit),
BulkLimitCrossedError)
def get_unsubscribe_link(reference_doctype, reference_name,
email, recipients, expose_recipients, unsubscribe_method, unsubscribe_params, unsubscribe_message):
unsubscribe_email = recipients if expose_recipients else [email]
unsubscribe_email = _("This email was sent to {0}").format(", ".join(unsubscribe_email))
if not unsubscribe_message:
unsubscribe_message = _("Unsubscribe from this list")
unsubscribe_url = get_unsubcribed_url(reference_doctype, reference_name, email,
unsubscribe_method, unsubscribe_params)
html = """<div style="margin: 15px auto; padding: 0px 7px; text-align: center; color: #8d99a6;">
{email}
<p style="margin: 15px auto;">
<a href="{unsubscribe_url}" style="color: #8d99a6; text-decoration: underline;
target="_blank">{unsubscribe_message}
</a>
</p>
</div>""".format(
unsubscribe_url = unsubscribe_url,
email=unsubscribe_email,
unsubscribe_message=unsubscribe_message
)
text = "\n{email}\n\n{unsubscribe_message}: {unsubscribe_url}".format(
email=unsubscribe_email,
unsubscribe_message=unsubscribe_message,
unsubscribe_url=unsubscribe_url
)
return frappe._dict({
"html": html,
"text": text
})
def get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params):
params = {"email": email.encode("utf-8"),
"doctype": reference_doctype.encode("utf-8"),
"name": reference_name.encode("utf-8")}
if unsubscribe_params:
params.update(unsubscribe_params)
query_string = get_signed_params(params)
# for test
frappe.local.flags.signed_query_string = query_string
return get_url(unsubscribe_method + "?" + get_signed_params(params))
@frappe.whitelist(allow_guest=True)
def unsubscribe(doctype, name, email):
# unsubsribe from comments and communications
if not verify_request():
return
try:
frappe.get_doc({
"doctype": "Email Unsubscribe",
"email": email,
"reference_doctype": doctype,
"reference_name": name
}).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
frappe.db.rollback()
else:
frappe.db.commit()
return_unsubscribed_page(email, doctype, name)
def return_unsubscribed_page(email, doctype, name):
frappe.respond_as_web_page(_("Unsubscribed"), _("{0} has left the conversation in {1} {2}").format(email, _(doctype), name))
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
smtpserver = SMTPServer()
auto_commit = not from_test
# additional check
check_bulk_limit([])
if frappe.are_emails_muted():
msgprint(_("Emails are muted"))
from_test = True
frappe.db.sql("""update `tabBulk Email` set status='Expired'
where datediff(curdate(), creation) > 3""", auto_commit=auto_commit)
for i in xrange(500):
email = frappe.db.sql("""select * from `tabBulk Email` where
status='Not Sent' and ifnull(send_after, "2000-01-01 00:00:00") < %s
order by creation asc limit 1 for update""", now_datetime(), as_dict=1)
if email:
email = email[0]
else:
break
frappe.db.sql("""update `tabBulk Email` set status='Sending' where name=%s""",
(email["name"],), auto_commit=auto_commit)
try:
if not from_test:
smtpserver.setup_email_account(email.reference_doctype)
smtpserver.sess.sendmail(email["sender"], email["recipient"], encode(email["message"]))
frappe.db.sql("""update `tabBulk Email` set status='Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError):
# bad connection, retry later
frappe.db.sql("""update `tabBulk Email` set status='Not Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
# no need to attempt further
return
except Exception, e:
frappe.db.sql("""update `tabBulk Email` set status='Error', error=%s
where name=%s""", (unicode(e), email["name"]), auto_commit=auto_commit)
def clear_outbox():
"""Remove mails older than 31 days in Outbox. Called daily via scheduler."""
frappe.db.sql("""delete from `tabBulk Email` where
datediff(now(), creation) > 31""")
|
|
#
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
import unittest
import yaml
from bitarray import bitarray
from basil.dut import Dut
cnfg_yaml = """
transfer_layer:
- name : dummy_tl
type : Dummy
init:
mem : {0: 2, 14: 4} # module version for init of spi and mem bytes
hw_drivers:
- name : spi_module
type : spi
interface : dummy_tl
base_addr : 0x0
registers:
- name : TEST1
type : StdRegister
hw_driver : spi_module
size : 32
- name : TEST2
type : StdRegister
hw_driver : spi_module
size : 20
fields :
- name : VINJECT
size : 6
offset : 19
bit_order: [5,4,3,1,0,2]
default : 0
- name : VPULSE
size : 6
offset : 13
- name : EN
size : 2
offset : 7
- name : COLUMN
offset : 5
size : 3
repeat : 2
fields :
- name : EnR
size : 1
offset : 2
- name : DACR
size : 2
offset : 1
"""
class TestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cnfg = yaml.load(cnfg_yaml)
cls.dut = Dut(cls.cnfg)
cls.dut['spi_module']._require_version = "==2"
cls.dut.init()
cls.dut['spi_module']._mem_bytes = 4
def test_mem_bytes(self):
self.dut.init()
self.dut['spi_module']._mem_bytes = 4
self.assertEqual(4, self.dut['spi_module'].MEM_BYTES)
self.assertRaises(ValueError, self.dut['spi_module'].set_data, [1, 2, 3, 4, 5])
def test_init_simple(self):
self.dut['TEST1'].write()
mem = dict()
# mem[0] = 0 # reset
# mem[0] = 1
mem[16] = 0 # has an offset of 16 bytes
mem[17] = 0
mem[18] = 0
mem[19] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST1'][0] = 1
self.dut['TEST1'].write()
mem[19] = 1
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST1'][31] = 1
self.dut['TEST1'].write()
mem[16] = 0x80
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST1'] = 0
self.dut['TEST1'].write()
mem[16] = 0
mem[19] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST1'] = 0xa55a8001
self.dut['TEST1'].write()
mem[16] = 0xa5
mem[17] = 0x5a
mem[18] = 0x80
mem[19] = 0x01
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST1'] = 0
self.dut['TEST1'][11:4] = 0xff
self.dut['TEST1'].write()
mem[16] = 0x0
mem[17] = 0x0
mem[18] = 0x0f
mem[19] = 0xf0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST1'] = 0
self.dut['TEST1'][11:4] = '10000001'
self.dut['TEST1'].write()
mem[16] = 0x0
mem[17] = 0x0
mem[18] = 0x08
mem[19] = 0x10
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST1'] = 0
self.dut['TEST1'][11:4] = bitarray('00011000')
self.dut['TEST1'].write()
mem[16] = 0x0
mem[17] = 0x0
mem[18] = 0x01
mem[19] = 0x80
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_bit_order(self):
self.dut['TEST2'].write()
mem = dict()
# mem[0] = 0 # reset
mem[0] = 2
mem[14] = 4
mem[16] = 0
mem[17] = 0
mem[18] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VINJECT'] = 0x01
self.dut['TEST2'].write()
mem[16] = 0x08
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VINJECT'] = 0x02
self.dut['TEST2'].write()
mem[16] = 0x10
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VINJECT'] = 0x04
self.dut['TEST2'].write()
mem[16] = 0x04
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VINJECT'] = 0x08
self.dut['TEST2'].write()
mem[16] = 0x20
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VINJECT'] = 0
self.dut['TEST2']['VINJECT'][0] = 1
self.dut['TEST2'].write()
mem[16] = 0x04
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_repeat(self):
self.dut['dummy_tl'].mem = dict()
# self.dut['TEST2'] = 0
self.dut['TEST2']['VINJECT'] = 0
self.dut['TEST2']['VPULSE'] = 0
self.dut['TEST2'].write()
mem = dict()
# mem[0] = 1
mem[16] = 0
mem[17] = 0
mem[18] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['COLUMN'][0]['EnR'] = 1
self.dut['TEST2'].write()
mem[17] = 0x02
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['COLUMN'][1]['DACR'] = 1
self.dut['TEST2'].write()
mem[18] = 0x10
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_default(self):
self.cnfg['registers'][1]['fields'][0]['default'] = 0x01 # VINJECT
self.dut = Dut(self.cnfg)
self.dut['spi_module']._require_version = "==2"
self.dut.init()
self.dut['spi_module']._mem_bytes = 32
mem = dict()
# mem[0] = 0 # reset
mem[0] = 2
mem[14] = 4
mem[16] = 0x08
mem[17] = 0
mem[18] = 0
self.dut['TEST2'].write()
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_fields(self):
self.dut['dummy_tl'].mem = dict()
self.dut['TEST2']['VINJECT'] = 0
self.dut['TEST2']['VPULSE'] = 0
self.dut['TEST2'].write()
mem = dict()
# mem[0] = 1
mem[16] = 0
mem[17] = 0
mem[18] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VPULSE'] = 0x5
self.dut['TEST2'].write()
mem = dict()
mem[16] = 0
mem[17] = 0x50
mem[18] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VPULSE'] = bitarray('100001')
self.dut['TEST2'].write()
mem = dict()
mem[16] = 0x02
mem[17] = 0x10
mem[18] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VPULSE'] = '100001'
self.dut['TEST2'].write()
mem = dict()
mem[16] = 0x02
mem[17] = 0x10
mem[18] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
self.dut['TEST2']['VPULSE'] = 0b100011
self.dut['TEST2'].write()
mem = dict()
mem[16] = 0x02
mem[17] = 0x30
mem[18] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestClass)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = 'os-hosts'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('host_name')
elem.set('service')
elem.set('zone')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
@extensions.expected_errors(())
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
"""
:returns: A dict in the format:
{'hosts': [{'host_name': 'some.host.name',
'service': 'cells',
'zone': 'internal'},
{'host_name': 'some.other.host.name',
'service': 'cells',
'zone': 'internal'},
{'host_name': 'some.celly.host.name',
'service': 'cells',
'zone': 'internal'},
{'host_name': 'console1.host.com',
'service': 'consoleauth',
'zone': 'internal'},
{'host_name': 'network1.host.com',
'service': 'network',
'zone': 'internal'},
{'host_name': 'netwwork2.host.com',
'service': 'network',
'zone': 'internal'},
{'host_name': 'compute1.host.com',
'service': 'compute',
'zone': 'nova'},
{'host_name': 'compute2.host.com',
'service': 'compute',
'zone': 'nova'},
{'host_name': 'sched1.host.com',
'service': 'scheduler',
'zone': 'internal'},
{'host_name': 'sched2.host.com',
'service': 'scheduler',
'zone': 'internal'},
{'host_name': 'vol1.host.com',
'service': 'volume'},
'zone': 'internal']}
"""
context = req.environ['nova.context']
authorize(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
for service in services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
@extensions.expected_errors((400, 404, 501))
def update(self, req, id, body):
"""
:param body: example format {'host': {'status': 'enable',
'maintenance_mode': 'enable'}}
:returns:
"""
def read_enabled(orig_val, msg):
"""
:param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
:param msg: The message to be passed to HTTPBadRequest. A single
%s will be replaced with orig_val.
:returns: True for 'enabled' and False for 'disabled'
"""
val = orig_val.strip().lower()
if val == "enable":
return True
elif val == "disable":
return False
else:
raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
context = req.environ['nova.context']
authorize(context)
# See what the user wants to 'update'
if not self.is_valid_body(body, 'host'):
raise webob.exc.HTTPBadRequest(
explanation=_("The request body invalid"))
params = dict([(k.strip().lower(), v)
for k, v in body['host'].iteritems()])
orig_status = status = params.pop('status', None)
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
# Validate the request
if len(params) > 0:
# Some extra param was passed. Fail.
explanation = _("Invalid update setting: '%s'") % params.keys()[0]
raise webob.exc.HTTPBadRequest(explanation=explanation)
if orig_status is not None:
status = read_enabled(orig_status, _("Invalid status: '%s'"))
if orig_maint_mode is not None:
maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
if status is None and maint_mode is None:
explanation = _("'status' or 'maintenance_mode' needed for "
"host update")
raise webob.exc.HTTPBadRequest(explanation=explanation)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id,
maint_mode)
return {'host': result}
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.audit(_("Putting host %(host_name)s in maintenance mode "
"%(mode)s."),
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host.
"""
if enabled:
LOG.audit(_("Enabling host %s.") % host_name)
else:
LOG.audit(_("Disabling host %s.") % host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": {"host": host_name,
"power_action": result}}
@extensions.expected_errors((400, 404, 501))
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
@extensions.expected_errors((400, 404, 501))
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
@extensions.expected_errors((400, 404, 501))
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
@extensions.expected_errors((403, 404))
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
authorize(context)
host_name = id
try:
service = self.api.service_get_by_compute_host(context, host_name)
except exception.ComputeHostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.AdminRequired:
# TODO(Alex Xu): The authorization is done by policy,
# db layer checking is needless. The db layer checking should
# be removed
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
compute_node = service['compute_node']
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in by_proj_resources.itervalues():
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.V3APIExtensionBase):
"""Admin-only host administration."""
name = "Hosts"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/hosts/api/v3"
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
def get_controller_extensions(self):
return []
|
|
"""Sensors flow for Withings."""
import typing as types
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import slugify
from . import const
from .common import _LOGGER, WithingsDataManager, get_data_manager
# There's only 3 calls (per profile) made to the withings api every 5
# minutes (see throttle values). This component wouldn't benefit
# much from parallel updates.
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: types.Callable[[types.List[Entity], bool], None],
):
"""Set up the sensor config entry."""
data_manager = get_data_manager(hass, entry)
entities = create_sensor_entities(data_manager)
async_add_entities(entities, True)
def get_measures():
"""Get all the measures.
This function exists to be easily mockable so we can test
one measure at a time. This becomes necessary when integration
testing throttle functionality in the data manager.
"""
return list(WITHINGS_MEASUREMENTS_MAP)
def create_sensor_entities(data_manager: WithingsDataManager):
"""Create sensor entities."""
entities = []
measures = get_measures()
for attribute in WITHINGS_ATTRIBUTES:
if attribute.measurement not in measures:
_LOGGER.debug(
"Skipping measurement %s as it is not in the"
"list of measurements to use",
attribute.measurement,
)
continue
_LOGGER.debug(
"Creating entity for measurement: %s, measure_type: %s,"
"friendly_name: %s, unit_of_measurement: %s",
attribute.measurement,
attribute.measure_type,
attribute.friendly_name,
attribute.unit_of_measurement,
)
entity = WithingsHealthSensor(data_manager, attribute)
entities.append(entity)
return entities
class WithingsAttribute:
"""Base class for modeling withing data."""
def __init__(
self,
measurement: str,
measure_type,
friendly_name: str,
unit_of_measurement: str,
icon: str,
) -> None:
"""Constructor."""
self.measurement = measurement
self.measure_type = measure_type
self.friendly_name = friendly_name
self.unit_of_measurement = unit_of_measurement
self.icon = icon
class WithingsMeasureAttribute(WithingsAttribute):
"""Model measure attributes."""
class WithingsSleepStateAttribute(WithingsAttribute):
"""Model sleep data attributes."""
def __init__(
self, measurement: str, friendly_name: str, unit_of_measurement: str, icon: str
) -> None:
"""Constructor."""
super().__init__(measurement, None, friendly_name, unit_of_measurement, icon)
class WithingsSleepSummaryAttribute(WithingsAttribute):
"""Models sleep summary attributes."""
WITHINGS_ATTRIBUTES = [
WithingsMeasureAttribute(
const.MEAS_WEIGHT_KG,
const.MEASURE_TYPE_WEIGHT,
"Weight",
const.UOM_MASS_KG,
"mdi:weight-kilogram",
),
WithingsMeasureAttribute(
const.MEAS_FAT_MASS_KG,
const.MEASURE_TYPE_FAT_MASS,
"Fat Mass",
const.UOM_MASS_KG,
"mdi:weight-kilogram",
),
WithingsMeasureAttribute(
const.MEAS_FAT_FREE_MASS_KG,
const.MEASURE_TYPE_FAT_MASS_FREE,
"Fat Free Mass",
const.UOM_MASS_KG,
"mdi:weight-kilogram",
),
WithingsMeasureAttribute(
const.MEAS_MUSCLE_MASS_KG,
const.MEASURE_TYPE_MUSCLE_MASS,
"Muscle Mass",
const.UOM_MASS_KG,
"mdi:weight-kilogram",
),
WithingsMeasureAttribute(
const.MEAS_BONE_MASS_KG,
const.MEASURE_TYPE_BONE_MASS,
"Bone Mass",
const.UOM_MASS_KG,
"mdi:weight-kilogram",
),
WithingsMeasureAttribute(
const.MEAS_HEIGHT_M,
const.MEASURE_TYPE_HEIGHT,
"Height",
const.UOM_LENGTH_M,
"mdi:ruler",
),
WithingsMeasureAttribute(
const.MEAS_TEMP_C,
const.MEASURE_TYPE_TEMP,
"Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
),
WithingsMeasureAttribute(
const.MEAS_BODY_TEMP_C,
const.MEASURE_TYPE_BODY_TEMP,
"Body Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
),
WithingsMeasureAttribute(
const.MEAS_SKIN_TEMP_C,
const.MEASURE_TYPE_SKIN_TEMP,
"Skin Temperature",
const.UOM_TEMP_C,
"mdi:thermometer",
),
WithingsMeasureAttribute(
const.MEAS_FAT_RATIO_PCT,
const.MEASURE_TYPE_FAT_RATIO,
"Fat Ratio",
const.UOM_PERCENT,
None,
),
WithingsMeasureAttribute(
const.MEAS_DIASTOLIC_MMHG,
const.MEASURE_TYPE_DIASTOLIC_BP,
"Diastolic Blood Pressure",
const.UOM_MMHG,
None,
),
WithingsMeasureAttribute(
const.MEAS_SYSTOLIC_MMGH,
const.MEASURE_TYPE_SYSTOLIC_BP,
"Systolic Blood Pressure",
const.UOM_MMHG,
None,
),
WithingsMeasureAttribute(
const.MEAS_HEART_PULSE_BPM,
const.MEASURE_TYPE_HEART_PULSE,
"Heart Pulse",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
),
WithingsMeasureAttribute(
const.MEAS_SPO2_PCT, const.MEASURE_TYPE_SPO2, "SP02", const.UOM_PERCENT, None
),
WithingsMeasureAttribute(
const.MEAS_HYDRATION, const.MEASURE_TYPE_HYDRATION, "Hydration", "", "mdi:water"
),
WithingsMeasureAttribute(
const.MEAS_PWV,
const.MEASURE_TYPE_PWV,
"Pulse Wave Velocity",
const.UOM_METERS_PER_SECOND,
None,
),
WithingsSleepStateAttribute(
const.MEAS_SLEEP_STATE, "Sleep state", None, "mdi:sleep"
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_WAKEUP_DURATION_SECONDS,
const.MEASURE_TYPE_SLEEP_WAKEUP_DURATION,
"Wakeup time",
const.UOM_SECONDS,
"mdi:sleep-off",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_LIGHT_DURATION_SECONDS,
const.MEASURE_TYPE_SLEEP_LIGHT_DURATION,
"Light sleep",
const.UOM_SECONDS,
"mdi:sleep",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_DEEP_DURATION_SECONDS,
const.MEASURE_TYPE_SLEEP_DEEP_DURATION,
"Deep sleep",
const.UOM_SECONDS,
"mdi:sleep",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_REM_DURATION_SECONDS,
const.MEASURE_TYPE_SLEEP_REM_DURATION,
"REM sleep",
const.UOM_SECONDS,
"mdi:sleep",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_WAKEUP_COUNT,
const.MEASURE_TYPE_SLEEP_WAKUP_COUNT,
"Wakeup count",
const.UOM_FREQUENCY,
"mdi:sleep-off",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_TOSLEEP_DURATION_SECONDS,
const.MEASURE_TYPE_SLEEP_TOSLEEP_DURATION,
"Time to sleep",
const.UOM_SECONDS,
"mdi:sleep",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_TOWAKEUP_DURATION_SECONDS,
const.MEASURE_TYPE_SLEEP_TOWAKEUP_DURATION,
"Time to wakeup",
const.UOM_SECONDS,
"mdi:sleep-off",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_HEART_RATE_AVERAGE,
const.MEASURE_TYPE_SLEEP_HEART_RATE_AVERAGE,
"Average heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_HEART_RATE_MIN,
const.MEASURE_TYPE_SLEEP_HEART_RATE_MIN,
"Minimum heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_HEART_RATE_MAX,
const.MEASURE_TYPE_SLEEP_HEART_RATE_MAX,
"Maximum heart rate",
const.UOM_BEATS_PER_MINUTE,
"mdi:heart-pulse",
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_RESPIRATORY_RATE_AVERAGE,
const.MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_AVERAGE,
"Average respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_RESPIRATORY_RATE_MIN,
const.MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_MIN,
"Minimum respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
),
WithingsSleepSummaryAttribute(
const.MEAS_SLEEP_RESPIRATORY_RATE_MAX,
const.MEASURE_TYPE_SLEEP_RESPIRATORY_RATE_MAX,
"Maximum respiratory rate",
const.UOM_BREATHS_PER_MINUTE,
None,
),
]
WITHINGS_MEASUREMENTS_MAP = {attr.measurement: attr for attr in WITHINGS_ATTRIBUTES}
class WithingsHealthSensor(Entity):
"""Implementation of a Withings sensor."""
def __init__(
self, data_manager: WithingsDataManager, attribute: WithingsAttribute
) -> None:
"""Initialize the Withings sensor."""
self._data_manager = data_manager
self._attribute = attribute
self._state = None
self._slug = self._data_manager.slug
self._user_id = self._data_manager.api.get_credentials().user_id
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"Withings {self._attribute.measurement} {self._slug}"
@property
def unique_id(self) -> str:
"""Return a unique, HASS-friendly identifier for this entity."""
return "withings_{}_{}_{}".format(
self._slug, self._user_id, slugify(self._attribute.measurement)
)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._attribute.unit_of_measurement
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return self._attribute.icon
@property
def device_state_attributes(self):
"""Get withings attributes."""
return self._attribute.__dict__
async def async_update(self) -> None:
"""Update the data."""
_LOGGER.debug(
"Async update slug: %s, measurement: %s, user_id: %s",
self._slug,
self._attribute.measurement,
self._user_id,
)
if isinstance(self._attribute, WithingsMeasureAttribute):
_LOGGER.debug("Updating measures state")
await self._data_manager.update_measures()
await self.async_update_measure(self._data_manager.measures)
elif isinstance(self._attribute, WithingsSleepStateAttribute):
_LOGGER.debug("Updating sleep state")
await self._data_manager.update_sleep()
await self.async_update_sleep_state(self._data_manager.sleep)
elif isinstance(self._attribute, WithingsSleepSummaryAttribute):
_LOGGER.debug("Updating sleep summary state")
await self._data_manager.update_sleep_summary()
await self.async_update_sleep_summary(self._data_manager.sleep_summary)
async def async_update_measure(self, data) -> None:
"""Update the measures data."""
if data is None:
_LOGGER.error("Provided data is None. Setting state to %s", None)
self._state = None
return
measure_type = self._attribute.measure_type
_LOGGER.debug(
"Finding the unambiguous measure group with measure_type: %s", measure_type
)
measure_groups = [
g
for g in data
if (not g.is_ambiguous() and g.get_measure(measure_type) is not None)
]
if not measure_groups:
_LOGGER.warning("No measure groups found, setting state to %s", None)
self._state = None
return
_LOGGER.debug(
"Sorting list of %s measure groups by date created (DESC)",
len(measure_groups),
)
measure_groups.sort(key=(lambda g: g.created), reverse=True)
self._state = round(measure_groups[0].get_measure(measure_type), 4)
async def async_update_sleep_state(self, data) -> None:
"""Update the sleep state data."""
if data is None:
_LOGGER.error("Provided data is None. Setting state to %s", None)
self._state = None
return
if not data.series:
_LOGGER.warning("No sleep data, setting state to %s", None)
self._state = None
return
series = sorted(data.series, key=lambda o: o.enddate, reverse=True)
serie = series[0]
if serie.state == const.MEASURE_TYPE_SLEEP_STATE_AWAKE:
self._state = const.STATE_AWAKE
elif serie.state == const.MEASURE_TYPE_SLEEP_STATE_LIGHT:
self._state = const.STATE_LIGHT
elif serie.state == const.MEASURE_TYPE_SLEEP_STATE_DEEP:
self._state = const.STATE_DEEP
elif serie.state == const.MEASURE_TYPE_SLEEP_STATE_REM:
self._state = const.STATE_REM
else:
self._state = None
async def async_update_sleep_summary(self, data) -> None:
"""Update the sleep summary data."""
if data is None:
_LOGGER.error("Provided data is None. Setting state to %s", None)
self._state = None
return
if not data.series:
_LOGGER.warning("Sleep data has no series, setting state to %s", None)
self._state = None
return
measurement = self._attribute.measurement
measure_type = self._attribute.measure_type
_LOGGER.debug("Determining total value for: %s", measurement)
total = 0
for serie in data.series:
if hasattr(serie, measure_type):
total += getattr(serie, measure_type)
self._state = round(total, 4)
|
|
#!/usr/bin/env python3
import importlib
import inspect
import logging
import sys
import synapseclient
try:
from synapseclient.core.exceptions import SynapseHTTPError
except ModuleNotFoundError:
from synapseclient.exceptions import SynapseHTTPError
from . import config
from . import example_filetype_format
from . import process_functions
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ValidationHelper(object):
# Used for the kwargs in validate_single_file
# Overload this per class
_validate_kwargs = []
def __init__(self, syn, project_id, center, entitylist,
format_registry=config.PROCESS_FILES,
file_type=None):
"""A validator helper class for a center's files.
Args:
syn: a synapseclient.Synapse object
project_id: Synapse Project ID where files are stored and configured.
center: The participating center name.
filepathlist: a list of file paths.
format_registry: A dictionary mapping file format name to the
format class.
file_type: Specify file type to skip filename validation
"""
self._synapse_client = syn
self._project = syn.get(project_id)
self.entitylist = entitylist
self.center = center
self._format_registry = format_registry
self.file_type = (self.determine_filetype()
if file_type is None else file_type)
def determine_filetype(self):
"""Gets the file type of the file by validating its filename
Args:
syn: Synapse object
filepathlist: list of filepaths to center files
Returns:
str: File type of input files. None if no filetype found
"""
filetype = None
# Loop through file formats
for file_format in self._format_registry:
validator = self._format_registry[file_format](self._synapse_client, self.center)
try:
filenames = [entity.name for entity in self.entitylist]
filetype = validator.validateFilename(filenames)
except AssertionError:
continue
# If valid filename, return file type.
if filetype is not None:
break
return filetype
def validate_single_file(self, **kwargs):
"""Validate a submitted file unit.
Returns:
message: errors and warnings
valid: Boolean value of validation status
"""
if self.file_type not in self._format_registry:
valid = False
errors = "Your filename is incorrect! Please change your filename before you run the validator or specify --filetype if you are running the validator locally"
warnings = ""
else:
mykwargs = {}
for required_parameter in self._validate_kwargs:
assert required_parameter in kwargs.keys(), \
"%s not in parameter list" % required_parameter
mykwargs[required_parameter] = kwargs[required_parameter]
validator_cls = self._format_registry[self.file_type]
validator = validator_cls(self._synapse_client, self.center)
filepathlist = [entity.path for entity in self.entitylist]
valid, errors, warnings = validator.validate(filePathList=filepathlist,
**mykwargs)
# Complete error message
message = collect_errors_and_warnings(errors, warnings)
return (valid, message)
class GenieValidationHelper(ValidationHelper):
"""A validator helper class for AACR Project Genie.
"""
_validate_kwargs = ['oncotree_link', 'nosymbol_check']
def collect_errors_and_warnings(errors, warnings):
'''Aggregates error and warnings into a string.
Args:
errors: string of file errors, separated by new lines.
warnings: string of file warnings, separated by new lines.
Returns:
message - errors + warnings
'''
# Complete error message
message = "----------------ERRORS----------------\n"
if errors == "":
message = "YOUR FILE IS VALIDATED!\n"
logger.info(message)
else:
for error in errors.split("\n"):
if error != '':
logger.error(error)
message += errors
if warnings != "":
for warning in warnings.split("\n"):
if warning != '':
logger.warning(warning)
message += "-------------WARNINGS-------------\n" + warnings
return message
def get_config(syn, synid):
"""Gets Synapse database to Table mapping in dict
Args:
syn: Synapse connection
synid: Synapse id of database mapping table
Returns:
dict: {'databasename': 'synid'}
"""
config = syn.tableQuery('SELECT * FROM {}'.format(synid))
configdf = config.asDataFrame()
configdf.index = configdf['Database']
config_dict = configdf.to_dict()
return config_dict['Id']
def _check_parentid_permission_container(syn, parentid):
"""Checks permission / container
# TODO: Currently only checks if a user has READ permissions
"""
if parentid is not None:
try:
syn_ent = syn.get(parentid, downloadFile=False)
# If not container, throw an assertion
assert synapseclient.entity.is_container(syn_ent)
except (SynapseHTTPError, AssertionError):
raise ValueError(
"Provided Synapse id must be your input folder Synapse id "
"or a Synapse Id of a folder inside your input directory")
def _check_center_input(center, center_list):
"""Checks center input
Args:
center: Center name
center_list: List of allowed centers
Raises:
ValueError: If specify a center not part of the center list
"""
if center not in center_list:
raise ValueError("Must specify one of these "
f"centers: {', '.join(center_list)}")
def _get_oncotreelink(syn, databasetosynid_mappingdf, oncotree_link=None):
"""
Gets oncotree link unless a link is specified by the user
Args:
syn: Synapse object
databasetosynid_mappingdf: database to synid mapping
oncotree_link: link to oncotree. Default is None
Returns:
oncotree link
"""
if oncotree_link is None:
oncolink = databasetosynid_mappingdf.query(
'Database == "oncotreeLink"').Id
oncolink_ent = syn.get(oncolink.iloc[0])
oncotree_link = oncolink_ent.externalURL
return oncotree_link
def _upload_to_synapse(syn, filepaths, valid, parentid=None):
"""
Upload to synapse if parentid is specified and valid
Args:
syn: Synapse object
filepaths: List of file paths
valid: Boolean value for validity of file
parentid: Synapse id of container. Default is None
"""
if parentid is not None and valid:
logger.info("Uploading file to {}".format(parentid))
for path in filepaths:
file_ent = synapseclient.File(path, parent=parentid)
ent = syn.store(file_ent)
logger.info("Stored to {}".format(ent.id))
def collect_format_types(package_names):
"""Finds subclasses of the example_filetype_format.FileTypeFormat
from a list of package names.
Args:
package_names: A list of Python package names as strings.
Returns:
A list of classes that are in the named packages and subclasses
of example_filetype_format.FileTypeFormat
"""
file_format_list = []
for package_name in package_names:
importlib.import_module(package_name)
for cls in config.get_subclasses(example_filetype_format.FileTypeFormat):
logger.debug("checking {}.".format(cls))
cls_module_name = cls.__module__
cls_pkg = cls_module_name.split('.')[0]
if cls_pkg in package_names:
file_format_list.append(cls)
file_format_dict = config.make_format_registry_dict(file_format_list)
return file_format_dict
def _perform_validate(syn, args):
"""This is the main entry point to the genie command line tool."""
# Check parentid argparse
_check_parentid_permission_container(syn, args.parentid)
databasetosynid_mappingdf = process_functions.get_synid_database_mappingdf(
syn, project_id=args.project_id)
synid = databasetosynid_mappingdf.query('Database == "centerMapping"').Id
center_mapping = syn.tableQuery('select * from {}'.format(synid.iloc[0]))
center_mapping_df = center_mapping.asDataFrame()
# Check center argparse
_check_center_input(args.center, center_mapping_df.center.tolist())
args.oncotree_link = _get_oncotreelink(syn, databasetosynid_mappingdf,
oncotree_link=args.oncotree_link)
format_registry = collect_format_types(args.format_registry_packages)
logger.debug("Using {} file formats.".format(format_registry))
entity_list = [synapseclient.File(name=filepath, path=filepath,
parentId=None)
for filepath in args.filepath]
validator = GenieValidationHelper(syn=syn, project_id=args.project_id,
center=args.center,
entitylist=entity_list,
format_registry=format_registry,
file_type=args.filetype)
mykwargs = dict(oncotree_link=args.oncotree_link,
nosymbol_check=args.nosymbol_check,
project_id=args.project_id)
valid, message = validator.validate_single_file(**mykwargs)
# Upload to synapse if parentid is specified and valid
_upload_to_synapse(syn, args.filepath, valid, parentid=args.parentid)
|
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Sebastien Blot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import subprocess
import time
import libqtile
import libqtile.layout
import libqtile.bar
import libqtile.command
import libqtile.widget
import libqtile.manager
import libqtile.config
import libqtile.hook
import libqtile.confreader
from .conftest import whereis, BareConfig, no_xinerama
class ManagerConfig(object):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2),
libqtile.layout.tile.Tile(ratio=0.5),
libqtile.layout.max.Max()
]
floating_layout = libqtile.layout.floating.Floating(
float_rules=[dict(wmclass="xclock")])
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
main = None
follow_mouse_focus = True
manager_config = pytest.mark.parametrize("qtile", [ManagerConfig], indirect=True)
@manager_config
def test_screen_dim(qtile):
# self.c.restart()
qtile.testXclock()
assert qtile.c.screen.info()["index"] == 0
assert qtile.c.screen.info()["x"] == 0
assert qtile.c.screen.info()["width"] == 800
assert qtile.c.group.info()["name"] == 'a'
assert qtile.c.group.info()["focus"] == 'xclock'
qtile.c.to_screen(1)
qtile.testXeyes()
assert qtile.c.screen.info()["index"] == 1
assert qtile.c.screen.info()["x"] == 800
assert qtile.c.screen.info()["width"] == 640
assert qtile.c.group.info()["name"] == 'b'
assert qtile.c.group.info()["focus"] == 'xeyes'
qtile.c.to_screen(0)
assert qtile.c.screen.info()["index"] == 0
assert qtile.c.screen.info()["x"] == 0
assert qtile.c.screen.info()["width"] == 800
assert qtile.c.group.info()["name"] == 'a'
assert qtile.c.group.info()["focus"] == 'xclock'
@pytest.mark.parametrize("xephyr", [{"xoffset": 0}], indirect=True)
@manager_config
def test_clone_dim(qtile):
self = qtile
self.testXclock()
assert self.c.screen.info()["index"] == 0
assert self.c.screen.info()["x"] == 0
assert self.c.screen.info()["width"] == 800
assert self.c.group.info()["name"] == 'a'
assert self.c.group.info()["focus"] == 'xclock'
assert len(self.c.screens()) == 1
@manager_config
def test_to_screen(qtile):
self = qtile
assert self.c.screen.info()["index"] == 0
self.c.to_screen(1)
assert self.c.screen.info()["index"] == 1
self.testWindow("one")
self.c.to_screen(0)
self.testWindow("two")
ga = self.c.groups()["a"]
assert ga["windows"] == ["two"]
gb = self.c.groups()["b"]
assert gb["windows"] == ["one"]
assert self.c.window.info()["name"] == "two"
self.c.next_screen()
assert self.c.window.info()["name"] == "one"
self.c.next_screen()
assert self.c.window.info()["name"] == "two"
self.c.prev_screen()
assert self.c.window.info()["name"] == "one"
@manager_config
def test_togroup(qtile):
self = qtile
self.testWindow("one")
with pytest.raises(libqtile.command.CommandError):
self.c.window.togroup("nonexistent")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("a")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("b")
assert self.c.groups()["b"]["focus"] == "one"
assert self.c.groups()["a"]["focus"] is None
self.c.to_screen(1)
self.c.window.togroup("c")
assert self.c.groups()["c"]["focus"] == "one"
@manager_config
def test_resize(qtile):
self = qtile
self.c.screen[0].resize(x=10, y=10, w=100, h=100)
for _ in range(10):
time.sleep(0.1)
d = self.c.screen[0].info()
if d["width"] == d["height"] == 100:
break
else:
raise AssertionError("Screen didn't resize")
assert d["x"] == d["y"] == 10
@no_xinerama
def test_minimal(qtile):
assert qtile.c.status() == "OK"
@manager_config
@no_xinerama
def test_events(qtile):
assert qtile.c.status() == "OK"
# FIXME: failing test disabled. For some reason we don't seem
# to have a keymap in Xnest or Xephyr 99% of the time.
@manager_config
@no_xinerama
def test_keypress(qtile):
self = qtile
self.testWindow("one")
self.testWindow("two")
v = self.c.simulate_keypress(["unknown"], "j")
assert v.startswith("Unknown modifier")
assert self.c.groups()["a"]["focus"] == "two"
self.c.simulate_keypress(["control"], "j")
assert self.c.groups()["a"]["focus"] == "one"
@manager_config
@no_xinerama
def test_spawn(qtile):
# Spawn something with a pid greater than init's
assert int(qtile.c.spawn("true")) > 1
@manager_config
@no_xinerama
def test_spawn_list(qtile):
# Spawn something with a pid greater than init's
assert int(qtile.c.spawn(["echo", "true"])) > 1
@manager_config
@no_xinerama
def test_kill_window(qtile):
qtile.testWindow("one")
qtile.testwindows = []
qtile.c.window[qtile.c.window.info()["id"]].kill()
qtile.c.sync()
for _ in range(20):
time.sleep(0.1)
if not qtile.c.windows():
break
else:
raise AssertionError("Window did not die...")
@manager_config
@no_xinerama
def test_kill_other(qtile):
self = qtile
self.c.group.setlayout("tile")
one = self.testWindow("one")
assert self.c.window.info()["width"] == 798
assert self.c.window.info()["height"] == 578
two = self.testWindow("two")
assert self.c.window.info()["name"] == "two"
assert self.c.window.info()["width"] == 398
assert self.c.window.info()["height"] == 578
assert len(self.c.windows()) == 2
self.kill_window(one)
for _ in range(10):
time.sleep(0.1)
if len(self.c.windows()) == 1:
break
else:
raise AssertionError("window did not die")
assert self.c.window.info()["name"] == "two"
assert self.c.window.info()["width"] == 798
assert self.c.window.info()["height"] == 578
@manager_config
@no_xinerama
def test_regression_groupswitch(qtile):
self = qtile
self.c.group["c"].toscreen()
self.c.group["d"].toscreen()
assert self.c.groups()["c"]["screen"] is None
@manager_config
@no_xinerama
def test_next_layout(qtile):
self = qtile
self.testWindow("one")
self.testWindow("two")
assert len(self.c.layout.info()["stacks"]) == 1
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.c.next_layout()
self.c.next_layout()
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 1
@manager_config
@no_xinerama
def test_setlayout(qtile):
self = qtile
assert not self.c.layout.info()["name"] == "max"
self.c.group.setlayout("max")
assert self.c.layout.info()["name"] == "max"
@manager_config
@no_xinerama
def test_adddelgroup(qtile):
self = qtile
self.testWindow("one")
self.c.addgroup("dummygroup")
self.c.addgroup("testgroup")
assert "testgroup" in self.c.groups().keys()
self.c.window.togroup("testgroup")
self.c.delgroup("testgroup")
assert "testgroup" not in self.c.groups().keys()
# Assert that the test window is still a member of some group.
assert sum(len(i["windows"]) for i in self.c.groups().values())
for i in list(self.c.groups().keys())[:-1]:
self.c.delgroup(i)
with pytest.raises(libqtile.command.CommandException):
self.c.delgroup(list(self.c.groups().keys())[0])
@manager_config
@no_xinerama
def test_delgroup(qtile):
self = qtile
self.testWindow("one")
for i in ['a', 'd', 'c']:
self.c.delgroup(i)
with pytest.raises(libqtile.command.CommandException):
self.c.delgroup('b')
@manager_config
@no_xinerama
def test_nextprevgroup(qtile):
self = qtile
start = self.c.group.info()["name"]
ret = self.c.screen.next_group()
assert self.c.group.info()["name"] != start
assert self.c.group.info()["name"] == ret
ret = self.c.screen.prev_group()
assert self.c.group.info()["name"] == start
@manager_config
@no_xinerama
def test_toggle_group(qtile):
self = qtile
self.c.group["a"].toscreen()
self.c.group["b"].toscreen()
self.c.screen.toggle_group("c")
assert self.c.group.info()["name"] == "c"
self.c.screen.toggle_group("c")
assert self.c.group.info()["name"] == "b"
self.c.screen.toggle_group()
assert self.c.group.info()["name"] == "c"
@manager_config
@no_xinerama
def test_inspect_xeyes(qtile):
self = qtile
self.testXeyes()
assert self.c.window.inspect()
@manager_config
@no_xinerama
def test_inspect_xterm(qtile):
self = qtile
self.testXterm()
assert self.c.window.inspect()["wm_class"]
@manager_config
@no_xinerama
def test_static(qtile):
self = qtile
self.testXeyes()
self.testWindow("one")
self.c.window[self.c.window.info()["id"]].static(0, 0, 0, 100, 100)
@manager_config
@no_xinerama
def test_match(qtile):
self = qtile
self.testXeyes()
assert self.c.window.match(wname="xeyes")
assert not self.c.window.match(wname="nonexistent")
@manager_config
@no_xinerama
def test_default_float(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXclock()
assert self.c.group.info()['focus'] == 'xclock'
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
assert self.c.window.info()['floating'] is True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
assert self.c.window.info()['floating'] is True
@manager_config
@no_xinerama
def test_last_float_size(qtile):
"""
When you re-float something it would be preferable to have it use the previous float size
"""
self = qtile
self.testXeyes()
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
# float and it moves
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
# resize
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
# back to not floating
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
# float again, should use last float size
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
# make sure it works through min and max
self.c.window.toggle_maximize()
self.c.window.toggle_minimize()
self.c.window.toggle_minimize()
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
@manager_config
@no_xinerama
def test_float_max_min_combo(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
assert self.c.window.info()['floating'] is False
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['minimized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['minimized'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_fullscreen(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['fullscreen'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 600
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['fullscreen'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_max(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_min(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['minimized'] is True
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['minimized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_floating(qtile):
self = qtile
self.testXeyes()
assert self.c.window.info()['floating'] is False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
# change layout (should still be floating)
self.c.next_layout()
assert self.c.window.info()['floating'] is True
@manager_config
@no_xinerama
def test_floating_focus(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXterm()
self.testXeyes()
# self.testWindow("one")
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
self.c.window.toggle_floating()
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.group.info()['focus'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# change focus to xterm
self.c.group.next_window()
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['name'] != 'xeyes'
assert self.c.group.info()['focus'] != 'xeyes'
# check what stack thinks is focus
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# now focusing via layout is borked (won't go to float)
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
@manager_config
@no_xinerama
def test_move_floating(qtile):
self = qtile
self.testXeyes()
# self.testWindow("one")
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.resize_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 60
assert self.c.window.info()['height'] == 110
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
# change layout (x, y should be same)
self.c.next_layout()
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
@manager_config
@no_xinerama
def test_screens(qtile):
self = qtile
assert len(self.c.screens())
@manager_config
@no_xinerama
def test_rotate(qtile):
self = qtile
self.testWindow("one")
s = self.c.screens()[0]
height, width = s["height"], s["width"]
subprocess.call(
[
"xrandr",
"--output", "default",
"-display", self.display,
"--rotate", "left"
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
for _ in range(10):
time.sleep(0.1)
s = self.c.screens()[0]
if s["width"] == height and s["height"] == width:
break
else:
raise AssertionError("Screen did not rotate")
# TODO: see note on test_resize
@manager_config
@no_xinerama
def test_resize_(qtile):
self = qtile
self.testWindow("one")
subprocess.call(
[
"xrandr",
"-s", "480x640",
"-display", self.display
]
)
for _ in range(10):
time.sleep(0.1)
d = self.c.screen.info()
if d["width"] == 480 and d["height"] == 640:
break
else:
raise AssertionError("Screen did not resize")
@manager_config
@no_xinerama
def test_focus_stays_on_layout_switch(qtile):
qtile.testWindow("one")
qtile.testWindow("two")
# switch to a double stack layout
qtile.c.next_layout()
# focus on a different window than the default
qtile.c.layout.next()
# toggle the layout
qtile.c.next_layout()
qtile.c.prev_layout()
assert qtile.c.window.info()['name'] == 'one'
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xeyes(qtile):
qtile.testXeyes()
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xterm(qtile):
qtile.testXterm()
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xterm_kill_window(qtile):
self = qtile
self.testXterm()
self.c.window.kill()
self.c.sync()
for _ in range(10):
time.sleep(0.1)
if not self.c.windows():
break
else:
raise AssertionError("xterm did not die")
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_map_request(qtile):
self = qtile
self.testWindow("one")
info = self.c.groups()["a"]
assert "one" in info["windows"]
assert info["focus"] == "one"
self.testWindow("two")
info = self.c.groups()["a"]
assert "two" in info["windows"]
assert info["focus"] == "two"
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_unmap(qtile):
self = qtile
one = self.testWindow("one")
two = self.testWindow("two")
three = self.testWindow("three")
info = self.c.groups()["a"]
assert info["focus"] == "three"
assert len(self.c.windows()) == 3
self.kill_window(three)
assert len(self.c.windows()) == 2
info = self.c.groups()["a"]
assert info["focus"] == "two"
self.kill_window(two)
assert len(self.c.windows()) == 1
info = self.c.groups()["a"]
assert info["focus"] == "one"
self.kill_window(one)
assert len(self.c.windows()) == 0
info = self.c.groups()["a"]
assert info["focus"] is None
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_setgroup(qtile):
self = qtile
self.testWindow("one")
self.c.group["b"].toscreen()
self.groupconsistency()
if len(self.c.screens()) == 1:
assert self.c.groups()["a"]["screen"] is None
else:
assert self.c.groups()["a"]["screen"] == 1
assert self.c.groups()["b"]["screen"] == 0
self.c.group["c"].toscreen()
self.groupconsistency()
assert self.c.groups()["c"]["screen"] == 0
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_unmap_noscreen(qtile):
self = qtile
self.testWindow("one")
pid = self.testWindow("two")
assert len(self.c.windows()) == 2
self.c.group["c"].toscreen()
self.groupconsistency()
self.c.status()
assert len(self.c.windows()) == 2
self.kill_window(pid)
assert len(self.c.windows()) == 1
assert self.c.groups()["a"]["focus"] == "one"
def test_init():
with pytest.raises(libqtile.manager.QtileError):
libqtile.config.Key([], "unknown", libqtile.command._Call("base", None, "foo"))
with pytest.raises(libqtile.manager.QtileError):
libqtile.config.Key(["unknown"], "x", libqtile.command._Call("base", None, "foo"))
class TScreen(libqtile.config.Screen):
def setGroup(self, x, save_prev=True):
pass
def test_dx():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dx == 10
def test_dwidth():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dwidth == 90
s.right = libqtile.bar.Gap(10)
assert s.dwidth == 80
def test_dy():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dy == 10
def test_dheight():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dheight == 90
s.bottom = libqtile.bar.Gap(10)
assert s.dheight == 80
class _Config(object):
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.layout.floating.Floating()
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
auto_fullscreen = True
class ClientNewStaticConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.static(0)
libqtile.hook.subscribe.client_new(client_new)
clientnew_config = pytest.mark.parametrize("qtile", [ClientNewStaticConfig], indirect=True)
@clientnew_config
def test_minimal_(qtile):
self = qtile
a = self.testWindow("one")
self.kill_window(a)
@pytest.mark.skipif(whereis("gkrellm") is None, reason="gkrellm not found")
@clientnew_config
def test_gkrellm(qtile):
qtile.testGkrellm()
time.sleep(0.1)
class ToGroupConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.togroup("d")
libqtile.hook.subscribe.client_new(client_new)
togroup_config = pytest.mark.parametrize("qtile", [ToGroupConfig], indirect=True)
@togroup_config
def test_minimal__(qtile):
qtile.c.group["d"].toscreen()
qtile.c.group["a"].toscreen()
a = qtile.testWindow("one")
assert len(qtile.c.group["d"].info()["windows"]) == 1
qtile.kill_window(a)
@manager_config
def test_colorPixel(qtile):
# test for #394
qtile.c.eval("self.colorPixel(\"ffffff\")")
|
|
import random
import string
import typing # noqa
import pyparsing as pp
import mitmproxy.net.websockets
from mitmproxy.utils import strutils
from . import base, generators, actions, message
NESTED_LEADER = b"pathod!"
class WF(base.CaselessLiteral):
TOK = "wf"
class OpCode(base.IntField):
names = {
"continue": mitmproxy.net.websockets.OPCODE.CONTINUE,
"text": mitmproxy.net.websockets.OPCODE.TEXT,
"binary": mitmproxy.net.websockets.OPCODE.BINARY,
"close": mitmproxy.net.websockets.OPCODE.CLOSE,
"ping": mitmproxy.net.websockets.OPCODE.PING,
"pong": mitmproxy.net.websockets.OPCODE.PONG,
} # type: typing.Dict[str, int]
max = 15
preamble = "c"
class Body(base.Value):
preamble = "b"
class RawBody(base.Value):
unique_name = "body"
preamble = "r"
class Fin(base.Boolean):
name = "fin"
class RSV1(base.Boolean):
name = "rsv1"
class RSV2(base.Boolean):
name = "rsv2"
class RSV3(base.Boolean):
name = "rsv3"
class Mask(base.Boolean):
name = "mask"
class Key(base.FixedLengthValue):
preamble = "k"
length = 4
class KeyNone(base.CaselessLiteral):
unique_name = "key"
TOK = "knone"
class Length(base.Integer):
bounds = (0, 1 << 64)
preamble = "l"
class Times(base.Integer):
preamble = "x"
COMPONENTS = [
OpCode,
Length,
# Bit flags
Fin,
RSV1,
RSV2,
RSV3,
Mask,
actions.PauseAt,
actions.DisconnectAt,
actions.InjectAt,
KeyNone,
Key,
Times,
Body,
RawBody,
]
class WebsocketFrame(message.Message):
components = COMPONENTS # type: typing.List[typing.Type[base._Component]]
logattrs = ["body"]
# Used for nested frames
unique_name = "body"
@property
def actions(self):
return self.toks(actions._Action)
@property
def body(self):
return self.tok(Body)
@property
def rawbody(self):
return self.tok(RawBody)
@property
def opcode(self):
return self.tok(OpCode)
@property
def fin(self):
return self.tok(Fin)
@property
def rsv1(self):
return self.tok(RSV1)
@property
def rsv2(self):
return self.tok(RSV2)
@property
def rsv3(self):
return self.tok(RSV3)
@property
def mask(self):
return self.tok(Mask)
@property
def key(self):
return self.tok(Key)
@property
def knone(self):
return self.tok(KeyNone)
@property
def times(self):
return self.tok(Times)
@property
def toklength(self):
return self.tok(Length)
@classmethod
def expr(cls):
parts = [i.expr() for i in cls.components]
atom = pp.MatchFirst(parts)
resp = pp.And(
[
WF.expr(),
base.Sep,
pp.ZeroOrMore(base.Sep + atom)
]
)
resp = resp.setParseAction(cls)
return resp
@property
def nested_frame(self):
return self.tok(NestedFrame)
def resolve(self, settings, msg=None):
tokens = self.tokens[:]
if not self.mask and settings.is_client:
tokens.append(
Mask(True)
)
if not self.knone and self.mask and self.mask.value and not self.key:
allowed_chars = string.ascii_letters + string.digits
k = ''.join([allowed_chars[random.randrange(0, len(allowed_chars))] for i in range(4)])
tokens.append(
Key(base.TokValueLiteral(k))
)
return self.__class__(
[i.resolve(settings, self) for i in tokens]
)
def values(self, settings):
if self.body:
bodygen = self.body.value.get_generator(settings)
length = len(self.body.value.get_generator(settings))
elif self.rawbody:
bodygen = self.rawbody.value.get_generator(settings)
length = len(self.rawbody.value.get_generator(settings))
elif self.nested_frame:
bodygen = NESTED_LEADER + strutils.always_bytes(self.nested_frame.parsed.spec())
length = len(bodygen)
else:
bodygen = None
length = 0
if self.toklength:
length = int(self.toklength.value)
frameparts = dict(
payload_length=length
)
if self.mask and self.mask.value:
frameparts["mask"] = True
if self.knone:
frameparts["masking_key"] = None
elif self.key:
key = self.key.values(settings)[0][:]
frameparts["masking_key"] = key
for i in ["opcode", "fin", "rsv1", "rsv2", "rsv3", "mask"]:
v = getattr(self, i, None)
if v is not None:
frameparts[i] = v.value
frame = mitmproxy.net.websockets.FrameHeader(**frameparts)
vals = [bytes(frame)]
if bodygen:
if frame.masking_key and not self.rawbody:
masker = mitmproxy.net.websockets.Masker(frame.masking_key)
vals.append(
generators.TransformGenerator(
bodygen,
masker.mask
)
)
else:
vals.append(bodygen)
return vals
def spec(self):
return ":".join([i.spec() for i in self.tokens])
class NestedFrame(message.NestedMessage):
preamble = "f"
nest_type = WebsocketFrame
class WebsocketClientFrame(WebsocketFrame):
components = COMPONENTS + [NestedFrame]
|
|
#
# Copyright (c) 2012 Atis Elsts
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .structures import *
# Arithmetic functions -- "sum", "plus" (add), "minus" (subtract), "times" (multiply), "divide", "modulo", "difference", "abs", "neg", "invert", "square", "sqrt", "power"
# Data aggregation -- "min", "max", "average" (avg), "stdev" (std), "ewma", "changed"
# Signal processing -- "sharpen" (contrast), "smoothen" (blur), "map"
# Filtering -- "match", "filterRange", "filterEqual", "filterNotEqual", "filterLess", "filterLessOrEqual", "filterMore", "filterMoreOrEqual", "invertfilter"
# Subset selection & special purpose -- "take", "tuple", "sync", "if"
# this global dictionary holds all functions by name
functions = {}
class SealFunction(object):
def __init__(self, name):
global functions
functions[name.lower()] = self
self.name = name
# parameters
self.aggregate = False # can have subset-selection or "tuple" function as argument?
self.special = False # special-purpose?
self.alias = None # if set, can be referred by this name in SEAL code
self.group = "" # function class (documentation only)
self.repeatedArguments = False # has this function variable number of arguments?
self.arguments = []
def getArgumentByName(self, name):
for arg in self.arguments:
if arg.name.lower() == name:
return arg
return None
def getArgumentByPosition(self, pos):
if pos < len(self.arguments):
return self.arguments[pos]
return None
def declaration(self):
result = self.name
result += "("
result += ", ".join(map(str, self.arguments))
result += ")"
return result
class SealArgument(object):
def __init__(self, name, constantOnly = False, defaultValue = None, repeated = False):
self.name = name
# value must be known at compile time? i.e. not a sensor
self.constantOnly = constantOnly
# optional arguments can be skipped; they must have default values
self.defaultValue = defaultValue
# some functions (e.g. "sum(1, 2, 3)") can have arguments repatead zero to many times
self.repeated = repeated
def __str__(self):
return self.name
# --------------------------------------------------
f = SealFunction("sum")
f.group = "arithmetic" # or aggregate
f.aggregate = True
f.repeatedArguments = True
f.arguments.append(SealArgument("value", repeated = True))
f = SealFunction("plus")
f.group = "arithmetic"
f.alias = "add"
f.arguments.append(SealArgument("arg1"))
f.arguments.append(SealArgument("arg2"))
f = SealFunction("minus")
f.group = "arithmetic"
f.alias = "subtract"
f.arguments.append(SealArgument("arg1"))
f.arguments.append(SealArgument("arg2"))
f = SealFunction("times")
f.group = "arithmetic"
f.alias = "multiply"
f.arguments.append(SealArgument("arg1"))
f.arguments.append(SealArgument("arg2"))
f = SealFunction("divide")
f.group = "arithmetic"
f.arguments.append(SealArgument("arg1"))
f.arguments.append(SealArgument("arg2"))
f = SealFunction("modulo")
f.group = "arithmetic"
f.arguments.append(SealArgument("arg1"))
f.arguments.append(SealArgument("arg2"))
f = SealFunction("difference")
f.group = "arithmetic"
f.arguments.append(SealArgument("arg1"))
f.arguments.append(SealArgument("arg2"))
f = SealFunction("abs")
f.group = "arithmetic"
f.arguments.append(SealArgument("value"))
f = SealFunction("neg")
f.group = "arithmetic"
f.arguments.append(SealArgument("value"))
f = SealFunction("invert")
f.group = "arithmetic"
f.arguments.append(SealArgument("value"))
f = SealFunction("square")
f.group = "arithmetic"
f.arguments.append(SealArgument("value"))
f = SealFunction("sqrt")
f.group = "arithmetic"
f.arguments.append(SealArgument("value"))
f = SealFunction("power")
f.group = "arithmetic"
f.arguments.append(SealArgument("base"))
f.arguments.append(SealArgument("exponent"))
# --------------------------------------------------
f = SealFunction("min")
f.group = "aggregation"
f.aggregate = True
f.repeatedArguments = True
f.arguments.append(SealArgument("value", repeated = True))
f = SealFunction("max")
f.group = "aggregation"
f.aggregate = True
f.repeatedArguments = True
f.arguments.append(SealArgument("value", repeated = True))
f = SealFunction("average")
f.group = "aggregation"
f.aggregate = True
f.alias = "avg"
f.arguments.append(SealArgument("value"))
f = SealFunction("stdev")
f.group = "aggregation"
f.aggregate = True
f.alias = "std"
f.arguments.append(SealArgument("value"))
f = SealFunction("variance")
f.group = "aggregation"
f.aggregate = True
f.arguments.append(SealArgument("value"))
f = SealFunction("ewma") # exponentially weighted moving average
f.group = "aggregation"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("alpha", constantOnly = True, defaultValue = Value(0.1)))
f = SealFunction("changed")
f.group = "aggregation"
f.aggregate = True
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("milliseconds", constantOnly = True, defaultValue = Value(10000)))
# --------------------------------------------------
f = SealFunction("map")
f.group = "signal processing"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("fromRangeLow", constantOnly = True))
f.arguments.append(SealArgument("fromRangeHigh", constantOnly = True))
f.arguments.append(SealArgument("toRangeLow", constantOnly = True))
f.arguments.append(SealArgument("toRangeHigh", constantOnly = True))
f = SealFunction("sharpen")
f.group = "signal processing"
f.alias = "contrast"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("numSamples", constantOnly = True, defaultValue = Value(3)))
f.arguments.append(SealArgument("weight", constantOnly = True, defaultValue = Value(1)))
f = SealFunction("smoothen")
f.group = "signal processing"
f.alias = "blur"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("numSamples", constantOnly = True, defaultValue = Value(3)))
f.arguments.append(SealArgument("weight", constantOnly = True, defaultValue = Value(1)))
# --------------------------------------------------
f = SealFunction("match")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
# XXX: only a string (pattern name) is allowed. this is not validated ATM
f.arguments.append(SealArgument("pattern"))
f = SealFunction("filterRange")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("thresholdMin", constantOnly = True))
f.arguments.append(SealArgument("thresholdMax", constantOnly = True))
f = SealFunction("filterEqual")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("threshold", constantOnly = True))
f = SealFunction("filterNotEqual")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("threshold", constantOnly = True))
f = SealFunction("filterLess")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("threshold", constantOnly = True))
f = SealFunction("filterLessOrEqual")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("threshold", constantOnly = True))
f = SealFunction("filterMore")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("threshold", constantOnly = True))
f = SealFunction("filterMoreOrEqual")
f.group = "filtering"
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("threshold", constantOnly = True))
f = SealFunction("invertFilter")
f.group = "filtering"
f.arguments.append(SealArgument("filteredValue"))
# --------------------------------------------------
# Take a number of single sensor values
f = SealFunction("take")
f.group = "special"
f.special = True
f.arguments.append(SealArgument("value"))
f.arguments.append(SealArgument("numberToTake", constantOnly = True))
# time in milliseconds; if not set, time is not taken in account
f.arguments.append(SealArgument("timeToTake", constantOnly = True, defaultValue = Value(0)))
# Create a tuple with a number of different sensor values
f = SealFunction("tuple")
f.group = "special"
f.special = True
f.repeatedArguments = True
f.arguments.append(SealArgument("value", repeated = True))
# Synchronize sensor reading (must be at the top level)
f = SealFunction("sync")
f.group = "special"
f.special = True
f.repeatedArguments = True
f.arguments.append(SealArgument("value", repeated = True))
# Logical IF (like in Excel)
f = SealFunction("if")
f.group = "special"
f.special = True
f.arguments.append(SealArgument("condition"))
f.arguments.append(SealArgument("ifPart"))
f.arguments.append(SealArgument("elsePart", defaultValue = Value(0)))
# --------------------------------------------------
def resolveAlias(funName):
funName = funName.lower()
if funName == "add": return "plus"
if funName == "subtract": return "minus"
if funName == "multiply": return "times"
if funName == "avg": return "average"
if funName == "std": return "stdev"
if funName == "contrast": return "sharpen"
if funName == "blur": return "smoothen"
return funName
def validateFunction(functionTree):
funName = functionTree.function
fun = functions.get(resolveAlias(funName))
if fun is None:
return (False, "Unhandled function {}()\n".format(funName))
givenArgs = {}
for i in range(len(functionTree.arguments)):
givenArg = functionTree.arguments[i]
if givenArg.parameterName:
formalArg = fun.getArgumentByName(givenArg.parameterName)
if formalArg == None:
return (False, "Unknown named argument {} for function {}\n".format(
givenArg.parameterName, fun.declaration()))
else:
# TODO? do not allow to mix the args (i.e. named args must always be at the end)
formalArg = fun.getArgumentByPosition(i)
if formalArg == None:
if fun.repeatedArguments:
continue
return (False, "Too many arguments for function {}\n".format(
fun.declaration()))
if formalArg.name not in givenArgs:
givenArgs[formalArg.name] = [givenArg]
else:
givenArgs[formalArg.name].append(givenArg)
i = 0
for f in fun.arguments:
i += 1
g = givenArgs.get(f.name)
if g is None:
if not f.defaultValue:
return (False, "{} argument ('{}') of function {} is not optional\n".format(
toTitleCase(orderNumToString(i)), f.name, fun.declaration()))
# append the default value to real arguments
functionTree.arguments.append(FunctionTree(f.defaultValue, []))
continue
if len(g) > 1:
if not f.repeated:
return (False, "{} argument ('{}') of function {} repeated more than once\n".format(
toTitleCase(orderNumToString(i)), f.name, fun.declaration()))
# OK, argument expected and found. check its value.
if f.constantOnly:
for v in g:
if v.asConstant() is None:
return (False, "{} argument ('{}') of function {} is expected to be a constant!\n".format(
toTitleCase(orderNumToString(i)), f.name, fun.declaration()))
return (True, None)
|
|
class BSTree:
def __init__(self, value, comp):
"""
Creates a new binary search tree (BST) data structure
Args:
value: The data of the root node of the BST
comp(new_element, root): The comparison function for maintaining order
Operations:
insert: Insert a new element into the BST
search: Find an element in the BST
remove: Remove an element from the BST
find_min: Find the minimum value in the BST
find_max: Find the maximum value in the BST
pre_order: Gets the pre-order traversal
in_order: Gets the in-order traversal
post_order: Gets the post-order traversal
"""
self.comp = comp
self.data = value
self.parent = None
self.left = None
self.right = None
def insert(self, element, visual = False, visual_string = "Insertion:\t"):
"""
Inserts an element into the Binary Search Tree with n elements
Args:
element: The new value to be inserted
visual: Set to True for additional screen output
Time:
O(log n) [Average Case]
O(n) [Worst Case]
Explanation:
Recursively attempt to insert into left subtree or right subtree
"""
if visual:
visual_string += " {}".format(self.data)
if self.comp(element, self.data):
if self.left == None:
if visual:
print(visual_string + "L --> {}".format(element))
self.left = BSTree(element, self.comp)
self.left.parent = self
else:
if visual:
visual_string += "L -->"
self.left.insert(element, visual, visual_string)
else:
if self.right == None:
if visual:
print(visual_string + "R --> {}".format(element))
self.right = BSTree(element, self.comp)
self.right.parent = self
else:
if visual:
visual_string += "R -->"
self.right.insert(element, visual, visual_string)
def search(self, element, visual = False, visual_string = "Searching:\t"):
"""
Finds an element in the Binary Search Tree with n elements
Args:
element: The new value to be inserted
visual: Set to True for additional screen output
Time:
O(log n) [Average Case]
O(n) [Worst Case]
Returns:
The BST subtree with root containing the element if found,
None otherwise
Explanation:
Recursively attempt to find element in left subtree or right subtree
"""
visual_string += " {}".format(self.data)
if self.data == element:
if visual:
print(visual_string + " [Found]")
return self
elif self.comp(element, self.data):
if self.left != None:
if visual:
visual_string += "L -->"
self.left.search(element, visual, visual_string)
else:
if visual:
print(visual_string + " [Not Found]")
return None
else:
if self.right != None:
if visual:
visual_string += "R -->"
self.right.search(element, visual, visual_string)
else:
if visual:
print(visual_string + " [Not Found]")
return None
def find_leftmost(self, visual = False):
"""
Finds the "left-most" (minimum) element in the Binary Search Tree with n elements
Args:
visual: Set to True for additional screen output
Time:
O(log n) [Average Case]
O(n) [Worst Case]
Returns:
The left-most element
Explanation:
Recursively traverse down the left subtree
"""
if self.left == None:
if visual:
print("Minimum value {} found".format(self.data))
return self.data
else:
return self.left.find_leftmost(visual)
def find_rightmost(self, visual = False):
"""
Finds the "right-most" (maximum) element in the Binary Search Tree with n elements
Args:
visual: Set to True for additional screen output
Time:
O(log n) [Average Case]
O(n) [Worst Case]
Returns:
The right-most element
Explanation:
Recursively traverse down the right subtree
"""
if self.right == None:
if visual:
print("Maximum value {} found".format(self.data))
return self.data
else:
return self.right.find_rightmost(visual)
def remove(self, element, visual = False, visual_string = "Removal:\t"):
"""
Removes an element from a Binary Search Tree with n elements
Args:
element: The target element to remove
visual: Set to True for additional screen output
Time:
O(log n) [Average Case]
O(n) [Worst Case]
Explanation:
1) Find the element to be removed in the BST
2) If node has no children, remove the node
3) If node has only 1 child, replace the node with its child
4) If node has 2 children, replace node value with in-order successor,
then recursively remove the node containing the in-order successor
"""
if visual:
visual_string += " {}".format(self.data)
if self.data == element:
if self.left == None or self.right == None:
# At most 1 child
child = self.left if self.right == None else self.right
if self.parent.left == self:
self.parent.left = child
else:
self.parent.right = child
if visual:
print(visual_string + " [Removed]")
else:
# 2 Child -> replace with in-order successor
self.data = self.right.find_leftmost()
if visual:
visual_string += "R [Replaced with {}] -->".format(self.data)
self.right.remove(self.data, visual, visual_string)
else:
if self.comp(element, self.data):
if self.left != None:
if visual:
visual_string += "L -->"
self.left.remove(element, visual, visual_string)
elif visual:
print("{} not found in BST".format(element))
else:
if self.right != None:
if visual:
visual_string += "R -->"
self.right.remove(element, visual, visual_string)
elif visual:
print("{} not found in BST".format(element))
def pre_order(self, result = []):
"""
Generates the pre-order traversal sequence for a given BST
Time:
O(n)
Returns:
A list containing the pre-order traversal of the BST
Explanation:
1) Append data of root
2) Repeat on left subtree
3) Repeat on right subtree
"""
result.append(self.data)
if self.left != None:
result = self.left.pre_order(result)
if self.right != None:
result = self.right.pre_order(result)
return result
def in_order(self, result = []):
"""
Generates the in-order traversal sequence for a given BST.
Time:
O(n)
Returns:
A list containing the in-order traversal of the BST
Explanation:
1) Repeat on left subtree
2) Append data of root
3) Repeat on right subtree
Note: Will be sorted based on comparison function of BST
"""
if self.left != None:
result = self.left.in_order(result)
result.append(self.data)
if self.right != None:
result = self.right.in_order(result)
return result
def post_order(self, result = []):
"""
Generates the post-order traversal sequence for a given BST.
Time:
O(n)
Returns:
A list containing the post-order traversal of the BST
Explanation:
1) Repeat on left subtree
2) Repeat on right subtree
3) Append data of root
"""
if self.left != None:
result = self.left.post_order(result)
if self.right != None:
result = self.right.post_order(result)
result.append(self.data)
return result
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
from contextlib import contextmanager
from pants.cache.artifact import TarballArtifact
from pants.cache.artifact_cache import ArtifactCache, UnreadableArtifact
from pants.util.contextutil import temporary_file
from pants.util.dirutil import (
safe_delete,
safe_mkdir,
safe_mkdir_for,
safe_rm_oldest_items_in_dir,
safe_rmtree,
)
logger = logging.getLogger(__name__)
class BaseLocalArtifactCache(ArtifactCache):
def __init__(
self,
artifact_root,
artifact_extraction_root,
compression,
permissions=None,
dereference=True,
):
"""
:param str artifact_root: The path under which cacheable products will be read/written.
:param str artifact_extraction_root: The path to where we should extract artifacts. Usually a reified artifact_path.
:param int compression: The gzip compression level for created artifacts.
Valid values are 0-9.
:param str permissions: File permissions to use when creating artifact files.
:param bool dereference: Dereference symlinks when creating the cache tarball.
"""
super().__init__(artifact_root, artifact_extraction_root)
self._compression = compression
self._cache_root = None
self._permissions = permissions
self._dereference = dereference
def _artifact(self, path):
return TarballArtifact(
self.artifact_root,
self.artifact_extraction_root,
path,
self._compression,
dereference=self._dereference,
)
@contextmanager
def _tmpfile(self, cache_key, use):
"""Allocate tempfile on same device as cache with a suffix chosen to prevent collisions."""
with temporary_file(
suffix=cache_key.id + use, root_dir=self._cache_root, permissions=self._permissions
) as tmpfile:
yield tmpfile
@contextmanager
def insert_paths(self, cache_key, paths):
"""Gather paths into artifact, store it, and yield the path to stored artifact tarball."""
with self._tmpfile(cache_key, "write") as tmp:
self._artifact(tmp.name).collect(paths)
yield self._store_tarball(cache_key, tmp.name)
def store_and_use_artifact(self, cache_key, src, results_dir=None):
"""Store and then extract the artifact from the given `src` iterator for the given
cache_key.
:param cache_key: Cache key for the artifact.
:param src: Iterator over binary data to store for the artifact.
:param str results_dir: The path to the expected destination of the artifact extraction: will
be cleared both before extraction, and after a failure to extract.
"""
with self._tmpfile(cache_key, "read") as tmp:
for chunk in src:
tmp.write(chunk)
tmp.close()
tarball = self._store_tarball(cache_key, tmp.name)
artifact = self._artifact(tarball)
# NOTE(mateo): The two clean=True args passed in this method are likely safe, since the cache will by
# definition be dealing with unique results_dir, as opposed to the stable vt.results_dir (aka 'current').
# But if by chance it's passed the stable results_dir, safe_makedir(clean=True) will silently convert it
# from a symlink to a real dir and cause mysterious 'Operation not permitted' errors until the workdir is cleaned.
if results_dir is not None:
safe_mkdir(results_dir, clean=True)
try:
artifact.extract()
except Exception:
# Do our best to clean up after a failed artifact extraction. If a results_dir has been
# specified, it is "expected" to represent the output destination of the extracted
# artifact, and so removing it should clear any partially extracted state.
if results_dir is not None:
safe_mkdir(results_dir, clean=True)
safe_delete(tarball)
raise
return True
def _store_tarball(self, cache_key, src):
"""Given a src path to an artifact tarball, store it and return stored artifact's path."""
pass
class LocalArtifactCache(BaseLocalArtifactCache):
"""An artifact cache that stores the artifacts in local files."""
def __init__(
self,
artifact_root,
artifact_extraction_root,
cache_root,
compression,
max_entries_per_target=None,
permissions=None,
dereference=True,
):
"""
:param str artifact_root: The path under which cacheable products will be read/written.
:param str artifact_extraction_root: The path to where we should extract artifacts. Usually a reified artifact_path.
:param str cache_root: The locally cached files are stored under this directory.
:param int compression: The gzip compression level for created artifacts (1-9 or false-y).
:param int max_entries_per_target: The maximum number of old cache files to leave behind on a cache miss.
:param str permissions: File permissions to use when creating artifact files.
:param bool dereference: Dereference symlinks when creating the cache tarball.
"""
super().__init__(
artifact_root,
artifact_extraction_root,
compression,
permissions=int(permissions.strip(), base=8) if permissions else None,
dereference=dereference,
)
self._cache_root = os.path.realpath(os.path.expanduser(cache_root))
self._max_entries_per_target = max_entries_per_target
safe_mkdir(self._cache_root)
def prune(self, root):
"""Prune stale cache files.
If the option --cache-target-max-entry is greater than zero, then prune will remove all but n
old cache files for each target/task.
:param str root: The path under which cacheable artifacts will be cleaned
"""
max_entries_per_target = self._max_entries_per_target
if os.path.isdir(root) and max_entries_per_target:
safe_rm_oldest_items_in_dir(root, max_entries_per_target)
def has(self, cache_key):
return self._artifact_for(cache_key).exists()
def _artifact_for(self, cache_key):
return self._artifact(self._cache_file_for_key(cache_key))
def use_cached_files(self, cache_key, results_dir=None):
tarfile = self._cache_file_for_key(cache_key)
try:
artifact = self._artifact_for(cache_key)
if artifact.exists():
if results_dir is not None:
safe_rmtree(results_dir)
artifact.extract()
return True
except Exception as e:
# TODO(davidt): Consider being more granular in what is caught.
logger.warning(
"Error while reading {0} from local artifact cache: {1}".format(tarfile, e)
)
safe_delete(tarfile)
return UnreadableArtifact(cache_key, e)
return False
def try_insert(self, cache_key, paths):
with self.insert_paths(cache_key, paths):
pass
def delete(self, cache_key):
safe_delete(self._cache_file_for_key(cache_key))
def _store_tarball(self, cache_key, src):
dest = self._cache_file_for_key(cache_key)
safe_mkdir_for(dest)
os.rename(src, dest)
if self._permissions:
os.chmod(dest, self._permissions)
self.prune(os.path.dirname(dest)) # Remove old cache files.
return dest
def _cache_file_for_key(self, cache_key):
# Note: it's important to use the id as well as the hash, because two different targets
# may have the same hash if both have no sources, but we may still want to differentiate them.
return os.path.join(self._cache_root, cache_key.id, cache_key.hash) + ".tgz"
class TempLocalArtifactCache(BaseLocalArtifactCache):
"""A local cache that does not actually store any files between calls.
This implementation does not have a backing _cache_root, and never actually stores files between
calls, but is useful for handling file IO for a remote cache.
"""
def __init__(self, artifact_root, artifact_extraction_root, compression, permissions=None):
"""
:param str artifact_root: The path under which cacheable products will be read/written.
"""
super().__init__(
artifact_root,
artifact_extraction_root,
compression=compression,
permissions=permissions,
)
def _store_tarball(self, cache_key, src):
return src
def has(self, cache_key):
return False
def use_cached_files(self, cache_key, results_dir=None):
return False
def delete(self, cache_key):
pass
|
|
from statsmodels.compat.python import (lrange, iterkeys, iteritems, lzip,
reduce, itervalues, zip, string_types,
range)
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
import datetime
import textwrap
from .table import SimpleTable
from .tableformatting import fmt_latex, fmt_txt
class Summary(object):
def __init__(self):
self.tables = []
self.settings = []
self.extra_txt = []
self.title = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
'''Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header: bool
Reproduce the DataFrame column labels in summary table
index: bool
Reproduce the DataFrame row labels in summary table
float_format: string
Formatting to float data columns
align : string
Data alignment (l/c/r)
'''
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings)
def add_array(self, array, align='r', float_format="%.4f"):
'''Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format: string
Formatting to array if type is float
align : string
Data alignment (l/c/r)
'''
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align)
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
'''Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols: int
Number of columns of the output table
align : string
Data alignment (l/c/r)
'''
keys = [_formatter(x, float_format) for x in iterkeys(d)]
vals = [_formatter(x, float_format) for x in itervalues(d)]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align)
def add_text(self, string):
'''Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indendented.
'''
self.extra_txt.append(string)
def add_title(self, title=None, results=None):
'''Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
'''
if isinstance(title, string_types):
self.title = title
else:
try:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
except:
self.title = ''
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
'''Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_formatting: string
Float formatting for summary of parameters (optional)
title : string
Title of the summary table (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
'''
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results)
def as_text(self):
'''Generate ASCII Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
#TODO: this isn't used anywhere?
rule_dash = widest * '-'
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest/2 - len(title)/2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out
def as_html(self):
'''Generate HTML Summary Table
'''
tables = self.tables
settings = self.settings
#TODO: this isn't used anywhere
title = self.title
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
return tab
def as_latex(self):
'''Generate LaTeX Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '} \\\\'
else:
title = '\\caption{}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\\hline\n'.join(tab)
out = '\\begin{table}', title, tab, '\\end{table}'
out = '\n'.join(out)
return out
def _measure_tables(tables, settings):
'''Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
'''
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = tables[i].shape[1] - 1
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length)
# Useful stuff
_model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'NBin': 'Negative binomial model',
'GLM' : 'Generalized linear model'
}
def summary_model(results):
'''Create a dict with information about the model
'''
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = OrderedDict()
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
info['R-squared:'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared:'] = lambda x: "%#8.3f" % x.rsquared_adj
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = OrderedDict()
for key, func in iteritems(info):
try:
out[key] = func(results)
# NOTE: some models don't have loglike defined (RLM), so that's NIE
except (AttributeError, KeyError, NotImplementedError):
pass
return out
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
'''create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
'''
if isinstance(results, tuple):
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if not xname:
data.index = results.model.exog_names
else:
data.index = xname
return data
# Vertical summary instance for multiple models
def _col_params(result, float_format='%.4f', stars=True, stat='std_err'):
'''Stack coefficients and selected statistic in single column
stat : 'p-value', 't-stat', or None
Name of the value to be displayed in the summary column along with coefficients.
Defaults to std_err.
'p-value' displays p value
't-stat' displays t statistic
'''
'''looks like speed is being traded for fragility.
If anymore modifications are made to this code, consider rewriting so that this function
is not as tightly coupled with summary_params methos (should not rely on blind indexing to retreive data)
index 0 = Coefs,
1 = std_err,
2 = t-stat,
3 = p-value
'''
# Decide which statistic to use and get that statistic's index
stat_index = 1;
if stat =='t-stat':
stat_index = 2
elif stat == 'p-value':
stat_index = 3
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[[0,stat_index]]:
res[col] = res[col].apply(lambda x: float_format % x)
# Add parens around selected statistic
res.ix[:, stat_index] = '(' + res.ix[:, stat_index] + ')'
# Significance stars
if stars:
idx = res.ix[:, 3] < .1
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .05
res.ix[idx, 0] = res.ix[idx, 0] + '*'
idx = res.ix[:, 3] < .01
res.ix[idx, 0] = res.ix[idx, 0] + '*'
res = res.ix[:, [0,stat_index]]
res = res.stack()
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res
def _col_info(result, info_dict=None):
'''Stack model info in a column
'''
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out
def _make_unique(list_of_names):
if len(set(list_of_names)) == len(list_of_names):
return list_of_names
# pandas does not like it if multiple columns have the same names
from collections import defaultdict
name_counter = defaultdict(str)
header = []
for _name in list_of_names:
name_counter[_name] += "I"
header.append(_name+" " + name_counter[_name])
return header
def summary_col(results, float_format='%.4f', model_names=[], stars=False,
info_dict=None, regressor_order=[], stat='std_err'):
"""
Summarize multiple results instances side-by-side (coefs and SEs)
Parameters
----------
results : statsmodels results instance or list of result instances
float_format : string
float format for coefficients and standard errors
Default : '%.4f'
model_names : list of strings of length len(results) if the names are not
unique, a roman number will be appended to all model names
stars : bool
print significance stars
info_dict : dict
dict of lambda functions to be applied to results instances to retrieve
model info. To use specific information for different models, add a
(nested) info_dict with model name as the key.
Example: `info_dict = {"N":..., "R2": ..., "OLS":{"R2":...}}` would
only show `R2` for OLS regression models, but additionally `N` for
all other results.
Default : None (use the info_dict specified in
result.default_model_infos, if this property exists)
regressor_order : list of strings
list of names of the regressors in the desired order. All regressors
not specified will be appended to the end of the list.
stat : string ('p-value', 't-value') or None
Name of the value to be displayed in the summary column along with coefficients.
Defaults to 'std_err'.
'p-value' will display p_values
't-stat' displays 't' t_pvalues
"""
if not isinstance(results, list):
results = [results]
cols = [_col_params(x, stars=stars, float_format=float_format, stat=stat) for x in
results]
# Unique column names (pandas has problems merging otherwise)
if model_names:
colnames = _make_unique(model_names)
else:
colnames = _make_unique([x.columns[0] for x in cols])
for i in range(len(cols)):
cols[i].columns = [colnames[i]]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
summ = reduce(merg, cols)
if regressor_order:
varnames = summ.index.get_level_values(0).tolist()
ordered = [x for x in regressor_order if x in varnames]
unordered = [x for x in varnames if x not in regressor_order + ['']]
order = ordered + list(np.unique(unordered))
f = lambda idx: sum([[x + 'coef', x + 'stde'] for x in idx], [])
summ.index = f(np.unique(varnames))
summ = summ.reindex(f(order))
summ.index = [x[:-4] for x in summ.index]
idx = pd.Series(lrange(summ.shape[0])) % 2 == 1
summ.index = np.where(idx, '', summ.index.get_level_values(0))
# add infos about the models.
if info_dict:
cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,
info_dict)) for x in results]
else:
cols = [_col_info(x, getattr(x, "default_model_infos", None)) for x in
results]
# use unique column names, otherwise the merge will not succeed
for df , name in zip(cols, _make_unique([df.columns[0] for df in cols])):
df.columns = [name]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
info = reduce(merg, cols)
dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error
dat.columns = summ.columns
dat.index = pd.Index(summ.index.tolist() + info.index.tolist())
summ = dat
summ = summ.fillna('')
smry = Summary()
smry.add_df(summ, header=True, align='l')
stat_name = 'Standard errors'
if stat == 't-stat':
stat_name = 't-statistics'
elif stat == 'p-value':
stat_name = 'p-values'
smry.add_text(stat_name + ' in parentheses.')
if stars:
smry.add_text('* p<.1, ** p<.05, ***p<.01')
return smry
def _formatter(element, float_format='%.4f'):
try:
out = float_format % element
except:
out = str(element)
return out.strip()
def _df_to_simpletable(df, align='r', float_format="%.4f", header=True,
index=True, table_dec_above='-', table_dec_below=None,
header_dec_below='-', pad_col=0, pad_index=0):
dat = df.copy()
dat = dat.applymap(lambda x: _formatter(x, float_format))
if header:
headers = [str(x) for x in dat.columns.tolist()]
else:
headers = None
if index:
stubs = [str(x) + int(pad_index) * ' ' for x in dat.index.tolist()]
else:
dat.ix[:, 0] = [str(x) + int(pad_index) * ' ' for x in dat.ix[:, 0]]
stubs = None
st = SimpleTable(np.array(dat), headers=headers, stubs=stubs,
ltx_fmt=fmt_latex, txt_fmt=fmt_txt)
st.output_formats['latex']['data_aligns'] = align
st.output_formats['txt']['data_aligns'] = align
st.output_formats['txt']['table_dec_above'] = table_dec_above
st.output_formats['txt']['table_dec_below'] = table_dec_below
st.output_formats['txt']['header_dec_below'] = header_dec_below
st.output_formats['txt']['colsep'] = ' ' * int(pad_col + 1)
return st
def _simple_tables(tables, settings, pad_col=None, pad_index=None):
simple_tables = []
float_format = '%.4f'
if pad_col is None:
pad_col = [0] * len(tables)
if pad_index is None:
pad_index = [0] * len(tables)
for i, v in enumerate(tables):
index = settings[i]['index']
header = settings[i]['header']
align = settings[i]['align']
simple_tables.append(_df_to_simpletable(v, align=align,
float_format=float_format,
header=header, index=index,
pad_col=pad_col[i],
pad_index=pad_index[i]))
return simple_tables
|
|
__author__ = 'yuxiang' # derived from honda.py by fyang
import datasets
import datasets.kitti
import os
import PIL
import datasets.imdb
import numpy as np
import scipy.sparse
from utils.cython_bbox import bbox_overlaps
from utils.boxes_grid import get_boxes_grid
import subprocess
import cPickle
from fast_rcnn.config import cfg
import math
from rpn_msr.generate_anchors import generate_anchors
class kitti(datasets.imdb):
def __init__(self, image_set, kitti_path=None):
datasets.imdb.__init__(self, 'kitti_' + image_set)
self._image_set = image_set
self._kitti_path = self._get_default_path() if kitti_path is None \
else kitti_path
self._data_path = os.path.join(self._kitti_path, 'data_object_image_2')
self._classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
if image_set == 'train' or image_set == 'val':
self._num_subclasses = 125 + 24 + 24 + 1
prefix = 'validation'
else:
self._num_subclasses = 227 + 36 + 36 + 1
prefix = 'test'
# load the mapping for subcalss to class
filename = os.path.join(self._kitti_path, cfg.SUBCLS_NAME, prefix, 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.int)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = self._class_to_ind[words[1]]
self._subclass_mapping = mapping
self.config = {'top_k': 100000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._kitti_path), \
'KITTI path does not exist: {}'.format(self._kitti_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# set the prefix
if self._image_set == 'test':
prefix = 'testing/image_2'
else:
prefix = 'training/image_2'
image_path = os.path.join(self._data_path, prefix, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._kitti_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'KITTI')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.SUBCLS_NAME + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_kitti_voxel_exemplar_annotation(index)
for index in self.image_index]
if cfg.IS_RPN:
# print out recall
for i in xrange(1, self.num_classes):
print '{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i])
print '{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i])
print '{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i]))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_kitti_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI format.
"""
if self._image_set == 'test':
lines = []
else:
filename = os.path.join(self._data_path, 'training', 'label_2', index + '.txt')
lines = []
with open(filename) as f:
for line in f:
line = line.replace('Van', 'Car')
words = line.split()
cls = words[0]
truncation = float(words[1])
occlusion = int(words[2])
height = float(words[7]) - float(words[5])
if cls in self._class_to_ind and truncation < 0.5 and occlusion < 3 and height > 25:
lines.append(line)
num_objs = len(lines)
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
boxes[ix, :] = [float(n) for n in words[4:8]]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
anchors = generate_anchors()
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps' : overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def _load_kitti_voxel_exemplar_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI voxel exemplar format.
"""
if self._image_set == 'train':
prefix = 'validation'
elif self._image_set == 'trainval':
prefix = 'test'
else:
return self._load_kitti_annotation(index)
filename = os.path.join(self._kitti_path, cfg.SUBCLS_NAME, prefix, index + '.txt')
assert os.path.exists(filename), \
'Path does not exist: {}'.format(filename)
# the annotation file contains flipped objects
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if subcls != -1:
if is_flip == 0:
lines.append(line)
else:
lines_flipped.append(line)
num_objs = len(lines)
# store information of flipped objects
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
for ix, line in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
boxes[ix, :] = [float(n) for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[ix, cls] = 1.0
subindexes[ix, cls] = subcls
subindexes_flipped[ix, cls] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = 2**np.arange(1, 6, 0.5)
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps': overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def region_proposal_roidb(self):
"""
Return the database of regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_' + cfg.SUBCLS_NAME + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} roidb loaded from {}'.format(self.name, cache_file)
return roidb
if self._image_set != 'test':
gt_roidb = self.gt_roidb()
print 'Loading region proposal network boxes...'
if self._image_set == 'trainval':
model = cfg.REGION_PROPOSAL + '_227/'
else:
model = cfg.REGION_PROPOSAL + '_125/'
rpn_roidb = self._load_rpn_roidb(gt_roidb, model)
print 'Region proposal network boxes loaded'
roidb = datasets.imdb.merge_roidbs(rpn_roidb, gt_roidb)
# print 'Loading voxel pattern boxes...'
# if self._image_set == 'trainval':
# model = '3DVP_227'
# else:
# model = '3DVP_125/'
# vp_roidb = self._load_voxel_pattern_roidb(gt_roidb, model)
# print 'Voxel pattern boxes loaded'
# roidb = datasets.imdb.merge_roidbs(vp_roidb, gt_roidb)
# print 'Loading selective search boxes...'
# ss_roidb = self._load_selective_search_roidb(gt_roidb)
# print 'Selective search boxes loaded'
# print 'Loading ACF boxes...'
# acf_roidb = self._load_acf_roidb(gt_roidb)
# print 'ACF boxes loaded'
# roidb = datasets.imdb.merge_roidbs(ss_roidb, gt_roidb)
# roidb = datasets.imdb.merge_roidbs(roidb, acf_roidb)
else:
print 'Loading region proposal network boxes...'
model = cfg.REGION_PROPOSAL + '_227/'
roidb = self._load_rpn_roidb(None, model)
print 'Region proposal network boxes loaded'
# print 'Loading voxel pattern boxes...'
# model = '3DVP_227/'
# roidb = self._load_voxel_pattern_roidb(None, model)
# print 'Voxel pattern boxes loaded'
# print 'Loading selective search boxes...'
# roidb = self._load_selective_search_roidb(None)
# print 'Selective search boxes loaded'
# print 'Loading ACF boxes...'
# acf_roidb = self._load_acf_roidb(None)
# print 'ACF boxes loaded'
# roidb = datasets.imdb.merge_roidbs(roidb, acf_roidb)
print '{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index))
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote roidb to {}'.format(cache_file)
return roidb
def _load_rpn_roidb(self, gt_roidb, model):
# set the prefix
if self._image_set == 'test':
prefix = model + 'testing'
else:
prefix = model + 'training'
box_list = []
for index in self.image_index:
filename = os.path.join(self._kitti_path, 'region_proposals', prefix, index + '.txt')
assert os.path.exists(filename), \
'RPN data not found at: {}'.format(filename)
raw_data = np.loadtxt(filename, dtype=float)
if len(raw_data.shape) == 1:
if raw_data.size == 0:
raw_data = raw_data.reshape((0, 5))
else:
raw_data = raw_data.reshape((1, 5))
x1 = raw_data[:, 0]
y1 = raw_data[:, 1]
x2 = raw_data[:, 2]
y2 = raw_data[:, 3]
score = raw_data[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
raw_data = raw_data[inds,:4]
self._num_boxes_proposal += raw_data.shape[0]
box_list.append(raw_data)
print 'load {}: {}'.format(model, index)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_voxel_pattern_roidb(self, gt_roidb, model):
# set the prefix
if self._image_set == 'test':
prefix = model + 'testing'
else:
prefix = model + 'training'
box_list = []
for index in self.image_index:
filename = os.path.join(self._kitti_path, 'region_proposals', prefix, index + '.txt')
assert os.path.exists(filename), \
'Voxel pattern data not found at: {}'.format(filename)
raw_data = np.loadtxt(filename, dtype=float)
if len(raw_data.shape) == 1:
if raw_data.size == 0:
raw_data = raw_data.reshape((0, 4))
else:
raw_data = raw_data.reshape((1, 4))
self._num_boxes_proposal += raw_data.shape[0]
box_list.append(raw_data)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_box_list.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
box_list = cPickle.load(fid)
print '{} boxes loaded from {}'.format(self.name, cache_file)
else:
# set the prefix
model = 'selective_search/'
if self._image_set == 'test':
prefix = model + 'testing'
else:
prefix = model + 'training'
box_list = []
for index in self.image_index:
filename = os.path.join(self._kitti_path, 'region_proposals', prefix, index + '.txt')
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = np.loadtxt(filename, dtype=float)
box_list.append(raw_data[:min(self.config['top_k'], raw_data.shape[0]), 1:])
with open(cache_file, 'wb') as fid:
cPickle.dump(box_list, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote selective search boxes to {}'.format(cache_file)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_acf_roidb(self, gt_roidb):
cache_file = os.path.join(self.cache_path,
self.name + '_acf_box_list.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
box_list = cPickle.load(fid)
print '{} boxes loaded from {}'.format(self.name, cache_file)
else:
# set the prefix
model = 'ACF/'
if self._image_set == 'test':
prefix = model + 'testing'
else:
prefix = model + 'training'
box_list = []
for index in self.image_index:
filename = os.path.join(self._kitti_path, 'region_proposals', prefix, index + '.txt')
assert os.path.exists(filename), \
'ACF data not found at: {}'.format(filename)
raw_data = np.loadtxt(filename, usecols=(2,3,4,5), dtype=float)
box_list.append(raw_data[:min(self.config['top_k'], raw_data.shape[0]), :])
with open(cache_file, 'wb') as fid:
cPickle.dump(box_list, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ACF boxes to {}'.format(cache_file)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def evaluate_detections(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
if self._image_set == 'val':
prefix = 'validation'
elif self._image_set == 'test':
prefix = 'test'
else:
prefix = ''
filename = os.path.join(self._kitti_path, cfg.SUBCLS_NAME, prefix, 'mapping.txt')
assert os.path.exists(filename), \
'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print 'Writing KITTI results to file ' + filename
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
if cfg.TEST.SUBCLS:
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
else:
alpha = -10
f.write('{:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1 -1 -1 -1 {:.32f}\n'.format(\
cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
# write detection results into one file
def evaluate_detections_one_file(self, all_boxes, output_dir):
# open results file
filename = os.path.join(output_dir, 'detections.txt')
print 'Writing all KITTI results to file ' + filename
with open(filename, 'wt') as f:
# for each image
for im_ind, index in enumerate(self.image_index):
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
if cfg.TEST.SUBCLS:
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
else:
subcls = -1
f.write('{:s} {:s} {:f} {:f} {:f} {:f} {:d} {:f}\n'.format(\
index, cls, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], subcls, dets[k, 4]))
def evaluate_proposals(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print 'Writing KITTI results to file ' + filename
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(\
dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals_msr(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print 'Writing KITTI results to file ' + filename
with open(filename, 'wt') as f:
dets = all_boxes[im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
if __name__ == '__main__':
d = datasets.kitti('train')
res = d.roidb
from IPython import embed; embed()
|
|
#!/usr/bin/env python
"""
Class that constructs a MultiplexMarkovchain given the counts. Also
has methods to get the null model and the parameters of the
probability distribution associated with the Markov chain. Currently
handles only 4-state Markov chains (i.e. two layer networks).
"""
from __future__ import division
import numpy as np
import networkx as nx
from warnings import warn
class MarkovChain(object):
"""
Instances of this class contain NetworkX DiGraph objects, whose edges have
attributes "counts" and "params", giving observed counts of transitions
and inferred transition probabilities, respectively.
A class that computes properties of a Markov chain such as the
transition parameters and standard deviation. The class assumes a
uniform prior for the distribution of transition parameters.
Each state of the Markov chain has a Dirichlet (multivariate beta)
distribution whose variates are the transition parameters. The
transition parameters are estimated as the mean of the
corresponding Dirichlet distribution.
Parameters
----------
counts : Counts for each transition of the Markov chain. Given as a dictionary
in the form counts = {(from_state,to_state):count}. For a Multiplex Markov Chain,
states themselves will be n-tuples (n = number of layers) whose entries
are the states in the individual layers
num_transitions : The number of transitions in the Markov chain. The number
of states of the Markov chain is sqrt(num_transitions). len(counts) =
num_transitions
state_totals : The total of counts leaving from the particular
state of the Markov chain. len(state_totals) is equal to the
number of states of the Markov chain. Can be accessed using the
method `get_state_totals`.
params : The average value of the probability of the transitions
assuming a uniform prior. Stored as a dictionary whose keys are
tuples (from_state,to_state). Can be accessed using method 'get_parameters'
std : The standard deviations of the variates of the Dirichlet
distributions associated with the Markov chain. Stored as a dictionary
whose keys are tuples (from_state,to_state). Can be accessed using
method 'get_std_dev'
"""
def __init__(self, counts):
if type(counts) is not dict:
raise AssertionError("counts must be presented as a dictionary: {transition: count}")
elif any([type(k) is not tuple for k in counts.keys()]):
raise AssertionError("keys of 'counts' must be tuples: (from_state, to_state)")
elif any([len(k) != 2 for k in counts.keys()]):
raise AssertionError("keys of 'counts' must be length-2 tuples: (from_state, to_state)")
elif any([type(v) is not int for v in counts.values()]):
raise AssertionError("values of 'counts' must be integers")
self.counts = counts
self.MC = nx.DiGraph()
for (from_state,to_state) in counts.keys():
self.MC.add_edge(from_state,to_state, count = counts[(from_state,to_state)])
#pad 'counts' with zeros and add all missing edges to self.MC
for from_state in self.MC.nodes():
for to_state in self.MC.nodes():
if ((from_state,to_state) not in counts.keys()):
self.MC.add_edge(from_state,to_state,count=0)
counts[(from_state,to_state)] = 0
self.params = None # probability of transitions
self.std = None # std. associated with the transitions
self.state_totals = None #total number of transitions leaving a state
self.num_transitions = (self.MC.number_of_nodes())**2
def compute_prob_params(self,counts):
"""
Given counts returns the mean, std. dev. for every transition
and normalization constant for each state, and attatches these
values to the edges of the MC.
It also packs the values into a dictionary (self.params, self.std)
whose keys are edges (ordered pairs of states)
"""
num_transitions = self.num_transitions
l = self.MC.number_of_nodes()
totals = dict()
for from_state in self.MC.nodes():
tot = sum([counts[(from_state,to_state)] if ((from_state,to_state) in counts.keys()) else 0 for to_state in self.MC.nodes()])
totals[from_state] = tot
if tot > 0:
for to_state in self.MC.nodes():
# mean and std. dev of the corresponding beta distribution
p = (counts[(from_state,to_state)]+1)/(tot+l) if ((from_state,to_state) in counts.keys()) else 1/(tot+l)
self.MC[from_state][to_state]['mu'] = p
self.MC[from_state][to_state]['sigma'] = np.sqrt(p*(1-p)/(tot+ (l+1)))
else:
for to_state in self.MC.nodes():
p = 1/(tot+l)
self.MC[from_state][to_state]['mu'] = p
self.MC[from_state][to_state]['sigma'] = np.sqrt(p*(1-p)/(tot+ (l+1)))
self.params = {(fs,ts):self.MC[fs][ts]['mu'] for (fs,ts) in self.MC.edges()}
self.std = {(fs,ts):self.MC[fs][ts]['sigma'] for (fs,ts) in self.MC.edges()}
self.state_totals = totals
def get_parameters(self):
if self.params is None:
self.compute_prob_params(self.counts)
return self.params
def get_std_dev(self):
if self.std is None:
self.compute_prob_params(self.counts)
return self.std
def get_state_totals(self):
if self.state_totals is None:
self.compute_prob_params(self.counts)
return self.state_totals
def _is_power_of_2(x):
return (x & (x-1) == 0 and x != 0)
class MultiplexMarkovChain(MarkovChain):
"""
Class inherits from MarkovChain. In addition, the class builds a
null model for detecting `dynamical spillover` (Insert ref. to
paper).
To build an instance of this class, you must provide a dictionary of
counts, formatted as for the MarkovChain class. Moreover, states must
have the form (layer1,layer2,...,layer_n), specifying the states of
the edge in the n layers of the multiplex.
In particular, keys of the dictionary 'counts' must have the form:
((layer1, layer2, ..., layer_n),(layer1', layer2', ..., layer_n'))
Parameters
----------
null_components : a list with a dictionary for each layer of the
multiplex network. The dictionary has two items:
counts : the total counts associated with a Markov chain
describing the edge dynamics on a particular layer
MC : the MarkovChain initialized with the above counts.
null_prob : transition parameters for the null model.
null_std : standard deviation associated with the transition
parameters of the null model.
See Also
---------
MarkovChain
"""
def __init__(self, counts):
'''
num_transitions = len(counts)
#check if the num_transitions is a power of 2.
if not _is_power_of_2(num_transitions):
raise AssertionError("Length of counts is not a power of 2.")
'''
MarkovChain.__init__(self, counts)
self.counts = counts
self.num_layers = len(counts.keys()[0][0])
self.null_components = None
self.null_prob = None
self.null_std = None
def _compute_null_counts(self, counts):
"""
This function computes counts for the null model.
"""
num_layers = self.num_layers
null_counts = [dict() for i in range(num_layers)]
for (joint_fs,joint_ts) in counts.keys():
for layer in range(num_layers):
if (joint_fs[layer],joint_ts[layer]) in null_counts[layer].keys():
null_counts[layer][(joint_fs[layer],joint_ts[layer])] += counts[(joint_fs,joint_ts)]
else:
null_counts[layer][(joint_fs[layer],joint_ts[layer])] = counts[(joint_fs,joint_ts)]
self.null_components = [{'counts':null_counts[i]} for i in range(num_layers)]
def compute_prob_null_components(self):
"""
Initializes a MarkovChain for each layer of the multiplex that
describes the evolution of edges on that layer independent of
the other layers.
"""
if self.null_components is None:
self._compute_null_counts(self.counts)
for component in self.null_components:
component["MC"] = MarkovChain(component["counts"])
def compute_null_components(self):
"""
Computes the components of the null model. For a 4-state MC,
they are two 2-state MCs.
"""
self._compute_null_counts(self.counts)
self.compute_prob_null_components()
def compute_null_prob_std(self):
"""
Computes the null probability using the null components. When
computing the standard deviation the method approximates the
beta distributions as a Gaussian distributions.
"""
num_transitions = self.num_transitions
num_layers = self.num_layers
pnull = dict()
std_null = dict()
#If the Gaussian approximation is not justified warn the user
state_totals = self.get_state_totals()
if (np.any(state_totals < 100)):
warn("Some of the state totals are less than 100. Gaussian approximation may not be justified.")
component_params = [self.null_components[k]['MC'].get_parameters() for k in range(num_layers)]
component_std_dev = [self.null_components[k]['MC'].get_std_dev() for k in range(num_layers)]
pnull = {(fs,ts):np.prod([component_params[k][(fs[k],ts[k])] for k in range(num_layers)]) for (fs,ts) in self.MC.edges()}
std_null = {(fs,ts):
pnull[(fs,ts)]*np.sqrt(
np.sum(
[(component_std_dev[k][(fs[k],ts[k])]/component_params[k][(fs[k],ts[k])])**2 for k in range(num_layers)]
)
) for (fs,ts) in self.MC.edges()}
self.null_prob = pnull
self.null_std = std_null
def get_null_prob(self):
"""
Return the probability for each transition
of the null model. Computes the null model the first time this
function is called.
"""
if self.null_prob is None:
if self.null_components is None:
self.compute_null_components()
self.compute_null_prob_std()
return self.null_prob
def get_null_std_dev(self):
"""
Returns the std dev. associated with the probability
distribution of the transition parameters of the null model.
"""
if self.null_std is None:
if self.null_components is None:
self.compute_null_components()
self.compute_null_prob_std()
return self.null_std
def differs_from_null(self):
"""
Returns a dictionary whose keys are transitions
and whose values are abs(null_prob - prob)/(std_null + std)
"""
if self.null_prob is None:
if self.null_components is None:
self.compute_null_components()
self.compute_null_prob_std()
if self.params is None:
self.compute_prob_params(self.counts)
return {k:(self.params[k]-self.null_prob[k])/(self.std[k]+self.null_std[k]) for k in self.params.keys()}
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for storing campaign-related entities derived from raw CAL-ACCESS data.
"""
from datetime import date
from calaccess_processed_elections import corrections, get_expected_election_date
# Managers
from calaccess_processed_filings.managers import Form501FilingManager
# Models
from django.db import models
from opencivicdata.elections.models import CandidateContest
from calaccess_processed_filings.models.base import FilingBaseModel
class Form501FilingBase(FilingBaseModel):
"""
Base and abstract model for Form 501 filings.
"""
date_filed = models.DateField(
verbose_name='from date',
null=True,
db_index=True,
help_text="Date when the Form 501 filing was filed (from F501_502_CD.RPT_DATE)",
)
statement_type = models.CharField(
max_length=62,
verbose_name='statement type',
help_text='Describes the type of statement, e.g. "ORIGINAL", "AMENDMENT" '
'(from LOOKUP_CODES.CODE_DESC)',
)
filer_id = models.CharField(
verbose_name="filer identifier",
max_length=9,
help_text="Filer's unique identifier (from F501_502_CD.FILER_ID)",
)
committee_id = models.CharField(
verbose_name='committee identifier',
max_length=9,
help_text="Candidate's committee's unique filer idenitifier (from "
"F501_502_CD.COMMITTEE_ID)",
)
title = models.CharField(
verbose_name="candidate name title",
max_length=100,
blank=True,
help_text="Name title of the candidate (from F501_502_CD.CAND_NAMT)",
)
last_name = models.CharField(
verbose_name="candidate last name",
max_length=200,
# just a few don't even have a last name
blank=True,
help_text="Last name of the candidate (from F501_502_CD.CAND_NAML)",
)
first_name = models.CharField(
verbose_name="candidate first name",
max_length=45,
blank=True,
help_text="First name of the candidate (from F501_502_CD.CAND_NAMF)",
)
middle_name = models.CharField(
verbose_name="candidate middle name",
max_length=20,
blank=True,
help_text="Middle name of the candidate (from F501_502_CD.CAND_NAMM)",
)
name_suffix = models.CharField(
verbose_name="candidate name suffix",
max_length=10,
blank=True,
help_text="Name suffix of the candidate (from F501_502_CD.CAND_NAMS)",
)
name_moniker = models.CharField(
verbose_name="candidate name moniker",
max_length=20,
blank=True,
help_text="Moniker (aka, nickname) of the candidate (from F501_502_CD"
".MONIKER)",
)
phone = models.CharField(
max_length=20,
verbose_name='candidate phone number',
blank=True,
help_text="Phone number of the candidate (from F501_502_CD.CAND_PHON)",
)
fax = models.CharField(
max_length=20,
verbose_name='fax number',
blank=True,
help_text="Phone number of the candidate (from F501_502_CD.CAND_FAX)",
)
email = models.CharField(
max_length=200,
verbose_name='email address',
blank=True,
help_text="Email address of the candidate (from F501_502_CD.CAND_EMAIL)",
)
city = models.CharField(
max_length=200,
verbose_name="candidate city",
blank=True,
help_text="City of the candidate (from F501_502_CD.CAND_CITY)",
)
state = models.CharField(
max_length=200,
verbose_name='candidate state',
blank=True,
help_text="State of the candidate (from F501_502_CD.CAND_ST)",
)
zip_code = models.CharField(
max_length=10,
verbose_name='zip code',
blank=True,
help_text='Zip code (usually zip5, sometimes zip9) of the '
'candidate (from F501_502_CD.CAND_ZIP4)',
)
office = models.CharField(
verbose_name='office sought',
max_length=80,
blank=True,
help_text='Position title of the office sought by the candidate (from '
'LOOKUP_CODES_CD.CODE_DESC, unless NULL or 0, then F501_502_CD.'
'OFFICE_DSCR)',
)
agency = models.CharField(
verbose_name='agency name',
max_length=200,
blank=True,
help_text='Name of the agency with the office sought (from '
'F501_502_CD.AGENCY_NAM)',
)
district = models.IntegerField(
verbose_name='district',
null=True,
help_text='District of office sought, if applicable (from LOOKUP_CODES_CD'
'.CODE_DESC, unless NULL or 0, then F501_502_CD.DIST_NO)',
)
party = models.CharField(
max_length=30,
verbose_name='political party',
blank=True,
help_text='Political party of the candidate (from LOOKUP_CODES_CD.'
'CODE_DESC, unless NULL or 0, then F501_502_CD.PARTY)',
)
jurisdiction = models.CharField(
max_length=30,
verbose_name='jurisdiction',
blank=True,
help_text='Jurisdiction of the office sought, e.g., "LOCAL", "STATE" '
'(from LOOKUP_CODES_CD.CODE_DESC)',
)
election_type = models.CharField(
verbose_name='election type',
max_length=16,
null=True,
help_text='Type of election in which the candidate is declaring intention'
' to run, e.g. "PRIMARY", "GENERAL" (from LOOKUP_CODES_CD.'
'CODE_DESC)',
)
election_year = models.IntegerField(
verbose_name='election year',
null=True,
help_text='Year in which the election is held (from F501_502_CD.YR_OF_ELEC)',
)
accepted_limit = models.BooleanField(
null=True,
help_text='Indicates if either the "I accept the voluntary expenditure '
'ceiling" or "I do not accept the voluntary expenditure" '
'box is checked (from F501_502_CD.ACCEPT_LIMIT_YN)',
)
limit_not_exceeded_election_date = models.DateField(
verbose_name='limit not exceeded election date',
null=True,
help_text='Date of the primary or special election in which the candidate '
'did not accept the voluntary expenditure ceiling but also did '
'exceed the ceiling. Candidates may amend their Form 501 to accept '
'the limits for the general election or special election runoff '
'and receive all the benefits of accepting the ceiling (from '
'F501_502_CD.DID_EXCEED_DT)'
)
personal_funds_contrib_date = models.DateField(
verbose_name='personal funds contribution date',
null=True,
help_text='Date on which the candidate contributed personal funds in excess '
'of the voluntary expenditure ceiling for the (from F501_502_CD'
'.CNTRB_PRSNL_FNDS_DT)',
)
executed_on = models.DateField(
verbose_name='executed on date',
null=True,
help_text='Date on which the candidate intention statement was signed '
'(from F501_502_CD.EXECUTE_DT)'
)
class Meta:
"""
Model options.
"""
abstract = True
app_label = 'calaccess_processed_filings'
class Form501Filing(Form501FilingBase):
"""
The most recent version of each Form 501 filing by a candidate.
Includes information from the most recent version of each Form 501 filing.
All versions of the filings can be found in Form501FilingVersion.
"""
filing_id = models.IntegerField(
verbose_name='filing id',
primary_key=True,
null=False,
help_text='Unique identification number for the Form 501 filing ('
'from F501_502_CD.FILING_ID)',
)
amendment_count = models.IntegerField(
verbose_name='Count amendments',
db_index=True,
null=False,
help_text='Number of amendments to the Form 501 filing (from '
'maximum value of F501_502_CD.AMEND_ID)',
)
objects = Form501FilingManager()
class Meta:
"""
Model options.
"""
app_label = 'calaccess_processed_filings'
index_together = ((
'filing_id',
'amendment_count',
),)
verbose_name = "Form 501 (Candidate Intention) filing"
def __str__(self):
return str(self.filing_id)
@property
def name(self):
"""
Return the 'name' of the candidate to match the format we typically put in the OCD Person model.
"""
split_name = self.sort_name.split(',')
split_name.reverse()
return ' '.join(split_name).strip()
@property
def sort_name(self):
"""
Return the 'sort_name' of the candidate to match the format we typically scrape from the CAL-ACCESS website.
This is useful when trying to consolidate these forms with scraped data in our OCD models.
"""
return '{0.last_name}, {0.first_name} {0.middle_name}'.format(self).strip()
@property
def parsed_name(self):
"""
The parsed name of the candidate ready to be converted into an OCD Person.
"""
return dict(
name=self.name,
sort_name=self.name
)
@property
def office_name(self):
"""
Return the 'office_name' of the candidate to match the format we typically scrape from the CAL-ACCESS website.
This is useful when trying to consolidate these forms with scraped data in our OCD models.
"""
if self.district:
return '{0.office} {0.district}'.format(self).strip()
else:
return self.office
@property
def ocd_election(self):
"""
Return Election occurring in year with name in including election_type.
Return None if none found.
"""
from calaccess_processed_elections.proxies import OCDElectionProxy
if not self.election_year or not self.election_type:
return None
try:
return OCDElectionProxy.objects.get(
date__year=self.election_year,
name__contains=self.election_type,
)
except (OCDElectionProxy.DoesNotExist, OCDElectionProxy.MultipleObjectsReturned):
# if it's a future primary, try to calculate the date
if self.election_year >= date.today().year and self.election_type == 'PRIMARY':
try:
dt_obj = get_expected_election_date(
self.election_year, self.election_type
)
except ValueError:
return None
return OCDElectionProxy.objects.create_from_calaccess(
'{0} {1}'.format(self.election_year, self.election_type),
dt_obj,
election_type=self.election_type,
)
else:
return None
def get_party(self):
"""
Get the Party from Form501Filing.
Return Party object or None.
"""
from calaccess_processed_elections.proxies import OCDPartyProxy
# first try the corrections
party = corrections.candidate_party(
'{0.last_name}, {0.first_name} {0.middle_name}'.format(self).strip(),
self.election_year,
self.election_type,
'{0.office} {0.district}'.format(self).strip().upper(),
)
if party:
return party
# then try using the party on the form501
party = OCDPartyProxy.objects.get_by_name(self.party)
if not party.is_unknown():
return party
# finally, try looking in FilerToFilerTypes
ocd_election = self.ocd_election
if not ocd_election:
return OCDPartyProxy.objects.unknown()
return OCDPartyProxy.objects.get_by_filer_id(int(self.filer_id), ocd_election.date)
def get_or_create_contest(self):
"""
Get or create a CandidateContest by extracting info form Form501Filing.
Return a CandidateContest or None, if extracted info is insufficient.
"""
from calaccess_processed_elections.proxies import OCDPostProxy
# Get or create an election
ocd_election = self.ocd_election
if not ocd_election:
return None
# Get or create a post
post = OCDPostProxy.objects.get_by_form501(self)
# Don't bother trying to get contest unless we have a post
if not post:
return None
# Seed contest data
contest_data = {
'posts__post': post,
'division': post.division,
'election': ocd_election,
}
# if looking for a pre-2012 primary, include party
if ocd_election.is_partisan_primary:
contest_data['party'] = self.get_party()
# Try to get it from the database
try:
return CandidateContest.objects.get(**contest_data)
except CandidateContest.DoesNotExist:
# if the election date is later than today, but no contest
if ocd_election.date > date.today():
# make the contest (CAL-ACCESS website might behind)
contest = CandidateContest.objects.create(
name=post.label.upper(),
division=contest_data['division'],
election=contest_data['election'],
)
contest.posts.create(
contest=contest,
post=contest_data['posts__post'],
)
return contest
# Otherwise give up
else:
return None
except CandidateContest.MultipleObjectsReturned:
# In this case, there is likely a primary and a runoff on the same day
# and the code is unable to specify between them.
# I don't yet have a solution to this problem so we are going to give up
return None
class Form501FilingVersion(Form501FilingBase):
"""
Every version of each Form 501 (Candidate Intention Statement) filing by candidates.
Includes information found on each version of each Form 501 filing. For the
most recent version of each filing, see Form501Filing.
"""
filing = models.ForeignKey(
'Form501Filing',
related_name='versions',
db_constraint=False,
null=True,
on_delete=models.SET_NULL,
help_text='Unique identification number for the Form 501 filing ('
'from F501_502_CD.FILING_ID)',
)
amend_id = models.IntegerField(
verbose_name='amendment id',
null=False,
help_text='Identifies the version of the Form 501 filing, with 0 '
'representing the initial filing (from F501_502_CD.FILING_ID)',
)
objects = Form501FilingManager()
class Meta:
"""
Model options.
"""
app_label = 'calaccess_processed_filings'
unique_together = ((
'filing',
'amend_id',
),)
index_together = ((
'filing',
'amend_id',
),)
verbose_name = "Form 501 (Candidate Intention) filing version"
def __str__(self):
return '{}-{}'.format(self.filing, self.amend_id)
|
|
# Copied from pytorch repository since my installation does not have this yet.
import math
import random
import torch
from torch.autograd import Variable
def calculate_gain(nonlinearity, param=None):
"""Return the recommended gain value for the given nonlinearity function. The values are as follows:
============ ==========================================
nonlinearity gain
============ ==========================================
linear :math:`1`
conv{1,2,3}d :math:`1`
sigmoid :math:`1`
tanh :math:`5 / 3`
relu :math:`\sqrt{2}`
leaky_relu :math:`\sqrt{2 / (1 + negative\_slope^2)}`
============ ==========================================
Args:
nonlinearity: the nonlinear function (`nn.functional` name)
param: optional parameter for the nonlinear function
Examples:
>>> gain = nn.init.gain('leaky_relu')
"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
def uniform(tensor, a=0, b=1):
"""Fills the input Tensor or Variable with values drawn from the uniform distribution :math:`U(a, b)`.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.uniform(w)
"""
if isinstance(tensor, Variable):
uniform(tensor.data, a=a, b=b)
return tensor
return tensor.uniform_(a, b)
def normal(tensor, mean=0, std=1):
"""Fills the input Tensor or Variable with values drawn from the normal distribution :math:`N(mean, std)`.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.normal(w)
"""
if isinstance(tensor, Variable):
normal(tensor.data, mean=mean, std=std)
return tensor
return tensor.normal_(mean, std)
def constant(tensor, val):
"""Fills the input Tensor or Variable with the value `val`.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
val: the value to fill the tensor with
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.constant(w)
"""
if isinstance(tensor, Variable):
constant(tensor.data, val)
return tensor
return tensor.fill_(val)
def eye(tensor):
"""Fills the 2-dimensional input Tensor or Variable with the identity matrix. Preserves the identity of the inputs in
Linear layers, where as many inputs are preserved as possible.
Args:
tensor: a 2-dimensional torch.Tensor or autograd.Variable
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.eye(w)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
if isinstance(tensor, Variable):
eye(tensor.data)
return tensor
return tensor.copy_(torch.eye(tensor.size(0), tensor.size(1)))
def dirac(tensor):
"""Fills the {3, 4, 5}-dimensional input Tensor or Variable with the Dirac delta function. Preserves the identity of
the inputs in Convolutional layers, where as many input channels are preserved as possible.
Args:
tensor: a {3, 4, 5}-dimensional torch.Tensor or autograd.Variable
Examples:
>>> w = torch.Tensor(3, 16, 5, 5)
>>> nn.init.dirac(w)
"""
dimensions = tensor.ndimension()
if dimensions not in [3, 4, 5]:
raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported")
if isinstance(tensor, Variable):
dirac(tensor.data)
return tensor
sizes = tensor.size()
min_dim = min(sizes[0], sizes[1])
tensor.zero_()
for d in range(min_dim):
if dimensions == 3: # Temporal convolution
tensor[d, d, tensor.size(2) // 2] = 1
elif dimensions == 4: # Spatial convolution
tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2] = 1
else: # Volumetric convolution
tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2, tensor.size(4) // 2] = 1
return tensor
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def xavier_uniform(tensor, gain=1):
"""Fills the input Tensor or Variable with values according to the method described in "Understanding the
difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform
distribution. The resulting tensor will have values sampled from :math:`U(-a, a)` where
:math:`a = gain \\times \sqrt{2 / (fan\_in + fan\_out)} \\times \sqrt{3}`. Also known as Glorot initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
gain: an optional scaling factor
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.xavier_uniform(w, gain=nn.init.calculate_gain('relu'))
"""
if isinstance(tensor, Variable):
xavier_uniform(tensor.data, gain=gain)
return tensor
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return tensor.uniform_(-a, a)
def xavier_normal(tensor, gain=1):
"""Fills the input Tensor or Variable with values according to the method described in "Understanding the
difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010), using a normal
distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where
:math:`std = gain \\times \sqrt{2 / (fan\_in + fan\_out)}`. Also known as Glorot initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
gain: an optional scaling factor
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.xavier_normal(w)
"""
if isinstance(tensor, Variable):
xavier_normal(tensor.data, gain=gain)
return tensor
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
return tensor.normal_(0, std)
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def kaiming_uniform(tensor, a=0, mode='fan_in'):
"""Fills the input Tensor or Variable with values according to the method described in "Delving deep into
rectifiers: Surpassing human-level performance on ImageNet classification" - He, K. et al. (2015), using a uniform
distribution. The resulting tensor will have values sampled from :math:`U(-bound, bound)` where
:math:`bound = \sqrt{2 / ((1 + a^2) \\times fan\_in)} \\times \sqrt{3}`. Also known as He initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
a: the negative slope of the rectifier used after this layer (0 for ReLU by default)
mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the
weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass.
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.kaiming_uniform(w, mode='fan_in')
"""
if isinstance(tensor, Variable):
kaiming_uniform(tensor.data, a=a, mode=mode)
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain('leaky_relu', a)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return tensor.uniform_(-bound, bound)
def kaiming_normal(tensor, a=0, mode='fan_in'):
"""Fills the input Tensor or Variable with values according to the method described in "Delving deep into
rectifiers: Surpassing human-level performance on ImageNet classification" - He, K. et al. (2015), using a normal
distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where
:math:`std = \sqrt{2 / ((1 + a^2) \\times fan\_in)}`. Also known as He initialisation.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
a: the negative slope of the rectifier used after this layer (0 for ReLU by default)
mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the
weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass.
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.kaiming_normal(w, mode='fan_out')
"""
if isinstance(tensor, Variable):
kaiming_normal(tensor.data, a=a, mode=mode)
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain('leaky_relu', a)
std = gain / math.sqrt(fan)
return tensor.normal_(0, std)
def orthogonal(tensor, gain=1):
"""Fills the input Tensor or Variable with a (semi) orthogonal matrix, as described in "Exact solutions to the
nonlinear dynamics of learning in deep linear neural networks" - Saxe, A. et al. (2013). The input tensor must have
at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened.
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable, where n >= 2
gain: optional scaling factor
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.orthogonal(w)
"""
if isinstance(tensor, Variable):
orthogonal(tensor.data, gain=gain)
return tensor
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = torch.Tensor(rows, cols).normal_(0, 1)
# Compute the qr factorization
q, r = torch.qr(flattened)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph.expand_as(q)
# Pad zeros to Q (if rows smaller than cols)
if rows < cols:
padding = torch.zeros(rows, cols - rows)
if q.is_cuda:
q = torch.cat([q, padding.cuda()], 1)
else:
q = torch.cat([q, padding], 1)
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def sparse(tensor, sparsity, std=0.01):
"""Fills the 2D input Tensor or Variable as a sparse matrix, where the non-zero elements will be drawn from
the normal distribution :math:`N(0, 0.01)`, as described in "Deep learning via
Hessian-free optimization" - Martens, J. (2010).
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
sparsity: The fraction of elements in each column to be set to zero
std: the standard deviation of the normal distribution used to generate the non-zero values
Examples:
>>> w = torch.Tensor(3, 5)
>>> nn.init.sparse(w, sparsity=0.1)
"""
if isinstance(tensor, Variable):
sparse(tensor.data, sparsity, std=std)
return tensor
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
tensor.normal_(0, std)
rows, cols = tensor.size(0), tensor.size(1)
num_zeros = int(math.ceil(cols * sparsity))
for col_idx in range(tensor.size(1)):
row_indices = list(range(rows))
random.shuffle(row_indices)
zero_indices = row_indices[:num_zeros]
for row_idx in zero_indices:
tensor[row_idx, col_idx] = 0
return tensor
|
|
# coding: utf-8
# imports
import os
import re
import datetime
from PIL import Image
# filebrowser imports
from filebrowser.settings import SAVE_FULL_URL, ADMIN_THUMBNAIL
from filebrowser.conf import fb_settings
from filebrowser.functions import get_file_type, url_join, get_version_path
class FileObject(object):
"""
The FileObject represents a File on the Server.
PATH has to be relative to MEDIA_ROOT.
"""
def __init__(self, path):
"""
`os.path.split` Split the pathname path into a pair, (head, tail)
where tail is the last pathname component and head is everything
leading up to that. The tail part will never contain a slash;
if path ends in a slash, tail will be empty.
If there is no slash in path, head will be empty.
If path is empty, both head and tail are empty.
"""
self.path = path
self.url_rel = path.replace("\\", "/")
self.head, self.filename = os.path.split(path)
# important for sorting
self.filename_lower = self.filename.lower()
# strange if file no extension then this folder
self.filetype = get_file_type(self.filename)
def _filesize(self):
"""
Filesize.
"""
path = self.path
if os.path.isfile(os.path.join(fb_settings.MEDIA_ROOT, path)) or \
os.path.isdir(os.path.join(fb_settings.MEDIA_ROOT, path)):
return os.path.getsize(os.path.join(fb_settings.MEDIA_ROOT, path))
return ""
filesize = property(_filesize)
def _date(self):
"""
Date.
"""
if os.path.isfile(os.path.join(fb_settings.MEDIA_ROOT, self.path)) or \
os.path.isdir(os.path.join(fb_settings.MEDIA_ROOT, self.path)):
return os.path.getmtime(
os.path.join(fb_settings.MEDIA_ROOT, self.path)
)
return ""
date = property(_date)
def _datetime(self):
"""
Datetime Object.
"""
return datetime.datetime.fromtimestamp(self.date)
datetime = property(_datetime)
def _extension(self):
"""
Extension.
"""
return u"{0}".format(os.path.splitext(self.filename)[1])
extension = property(_extension)
def _filetype_checked(self):
if self.filetype == "Folder" and os.path.isdir(self.path_full):
return self.filetype
elif self.filetype != "Folder" and os.path.isfile(self.path_full):
return self.filetype
else:
return ""
filetype_checked = property(_filetype_checked)
def _path_full(self):
"""
Full server PATH including MEDIA_ROOT.
"""
return os.path.join(fb_settings.MEDIA_ROOT, self.path)
path_full = property(_path_full)
def _path_relative(self):
return self.path
path_relative = property(_path_relative)
def _path_relative_directory(self):
"""
Path relative to initial directory.
"""
directory_re = re.compile(r'^({0})'.format(fb_settings.DIRECTORY))
value = directory_re.sub('', self.path)
return value
path_relative_directory = property(_path_relative_directory)
def _url_relative(self):
return self.url_rel
url_relative = property(_url_relative)
def _url_full(self):
"""
Full URL including MEDIA_URL.
"""
return url_join(fb_settings.MEDIA_URL, self.url_rel)
url_full = property(_url_full)
def _url_save(self):
"""
URL used for the filebrowsefield.
"""
if SAVE_FULL_URL:
return self.url_full
else:
return self.url_rel
url_save = property(_url_save)
def _url_thumbnail(self):
"""
Thumbnail URL.
"""
if self.filetype == "Image":
return "{0}".format(
url_join(
fb_settings.MEDIA_URL,
get_version_path(self.path, ADMIN_THUMBNAIL)
)
)
else:
return ""
url_thumbnail = property(_url_thumbnail)
def url_admin(self):
if self.filetype_checked == "Folder":
directory_re = re.compile(r'^({0})'.format(fb_settings.DIRECTORY))
value = directory_re.sub('', self.path)
return "{0}".format(value)
else:
return "{0}".format(url_join(fb_settings.MEDIA_URL, self.path))
def _dimensions(self):
"""
Image Dimensions.
"""
if self.filetype == 'Image':
try:
im = Image.open(
os.path.join(fb_settings.MEDIA_ROOT, self.path)
)
return im.size
except IOError:
pass
else:
return False
dimensions = property(_dimensions)
def _width(self):
"""
Image Width.
"""
return self.dimensions[0]
width = property(_width)
def _height(self):
"""
Image Height.
"""
return self.dimensions[1]
height = property(_height)
def _orientation(self):
"""
Image Orientation.
"""
if self.dimensions:
if self.dimensions[0] >= self.dimensions[1]:
return "Landscape"
else:
return "Portrait"
else:
return None
orientation = property(_orientation)
def _is_empty(self):
"""
True if Folder is empty, False if not.
"""
if os.path.isdir(self.path_full):
if not os.listdir(self.path_full):
return True
else:
return False
else:
return None
is_empty = property(_is_empty)
def __repr__(self):
return self.url_save
def __str__(self):
return self.url_save
def __unicode__(self):
return self.url_save
|
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import (
truncated_discrete_set, strict_discrete_set,
truncated_range
)
from time import sleep
import numpy as np
import re
class Yokogawa7651(Instrument):
""" Represents the Yokogawa 7651 Programmable DC Source
and provides a high-level for interacting with the instrument.
.. code-block:: python
yoko = Yokogawa7651("GPIB::1")
yoko.apply_current() # Sets up to source current
yoko.source_current_range = 10e-3 # Sets the current range to 10 mA
yoko.compliance_voltage = 10 # Sets the compliance voltage to 10 V
yoko.source_current = 0 # Sets the source current to 0 mA
yoko.enable_source() # Enables the current output
yoko.ramp_to_current(5e-3) # Ramps the current to 5 mA
yoko.shutdown() # Ramps the current to 0 mA and disables output
"""
@staticmethod
def _find(v, key):
""" Returns a value by parsing a current panel setting output
string array, which is returned with a call to "OS;E". This
is used for Instrument.control methods, and should not be
called directly by the user.
"""
status = ''.join(v.split("\r\n\n")[1:-1])
keys = re.findall('[^\dE+.-]+', status)
values = re.findall('[\dE+.-]+', status)
if key not in keys:
raise ValueError("Invalid key used to search for status of Yokogawa 7561")
else:
return values[keys.index(key)]
source_voltage = Instrument.control(
"OD;E", "S%g;E",
""" A floating point property that controls the source voltage
in Volts, if that mode is active. """
)
source_current = Instrument.control(
"OD;E", "S%g;E",
""" A floating point property that controls the source current
in Amps, if that mode is active. """
)
source_voltage_range = Instrument.control(
"OS;E", "R%d;E",
""" A floating point property that sets the source voltage range
in Volts, which can take values: 10 mV, 100 mV, 1 V, 10 V, and 30 V.
Voltages are truncted to an appropriate value if needed. """,
validator=truncated_discrete_set,
values={10e-3:2, 100e-3:3, 1:4, 10:5, 30:6},
map_values=True,
get_process=lambda v: int(Yokogawa7651._find(v, 'R'))
)
source_current_range = Instrument.control(
"OS;E", "R%d;E",
""" A floating point property that sets the current voltage range
in Amps, which can take values: 1 mA, 10 mA, and 100 mA.
Currents are truncted to an appropriate value if needed. """,
validator=truncated_discrete_set,
values={1e-3:4, 10e-3:5, 100e-3:6},
map_values=True,
get_process=lambda v: int(Yokogawa7651._find(v, 'R'))
)
source_mode = Instrument.control(
"OS;E", "F%d;E",
""" A string property that controls the source mode, which can
take the values 'current' or 'voltage'. The convenience methods
:meth:`~.Yokogawa7651.apply_current` and :meth:`~.Yokogawa7651.apply_voltage`
can also be used. """,
validator=strict_discrete_set,
values={'current':5, 'voltage':1},
map_values=True,
get_process=lambda v: int(Yokogawa7651._find(v, 'F'))
)
compliance_voltage = Instrument.control(
"OS;E", "LV%g;E",
""" A floating point property that sets the compliance voltage
in Volts, which can take values between 1 and 30 V. """,
validator=truncated_range,
values=[1, 30],
get_process=lambda v: int(Yokogawa7651._find(v, 'LV'))
)
compliance_current = Instrument.control(
"OS;E", "LA%g;E",
""" A floating point property that sets the compliance current
in Amps, which can take values from 5 to 120 mA. """,
validator=truncated_range,
values=[5e-3, 120e-3],
get_process=lambda v: float(Yokogawa7651._find(v, 'LA'))*1e-3, # converts A to mA
set_process=lambda v: v*1e3, # converts mA to A
)
def __init__(self, adapter, **kwargs):
super(Yokogawa7651, self).__init__(
adapter, "Yokogawa 7651 Programmable DC Source", **kwargs
)
self.write("H0;E") # Set no header in output data
@property
def id(self):
""" Returns the identification of the instrument """
return self.ask("OS;E").split('\r\n\n')[0]
@property
def source_enabled(self):
""" Reads a boolean value that is True if the source is enabled,
determined by checking if the 5th bit of the OC flag is a binary 1.
"""
oc = int(self.ask("OC;E")[5:])
return oc & 0b10000
def enable_source(self):
""" Enables the source of current or voltage depending on the
configuration of the instrument. """
self.write("O1;E")
def disable_source(self):
""" Disables the source of current or voltage depending on the
configuration of the instrument. """
self.write("O0;E")
def apply_current(self, max_current=1e-3, complinance_voltage=1):
""" Configures the instrument to apply a source current, which can
take optional parameters that defer to the :attr:`~.Yokogawa7651.source_current_range`
and :attr:`~.Yokogawa7651.compliance_voltage` properties. """
self.source_mode = 'current'
self.source_current_range = max_current
self.complinance_voltage = complinance_voltage
def apply_voltage(self, max_voltage=1, complinance_current=10e-3):
""" Configures the instrument to apply a source voltage, which can
take optional parameters that defer to the :attr:`~.Yokogawa7651.source_voltage_range`
and :attr:`~.Yokogawa7651.compliance_current` properties. """
self.source_mode = 'voltage'
self.source_voltage_range = max_voltage
self.complinance_current = compliance_current
def ramp_to_current(self, current, steps=25, duration=0.5):
""" Ramps the current to a value in Amps by traversing a linear spacing
of current steps over a duration, defined in seconds.
:param steps: A number of linear steps to traverse
:param duration: A time in seconds over which to ramp
"""
start_current = self.source_current
stop_current = current
pause = duration/steps
if (start_current != stop_current):
currents = np.linspace(start_current, stop_current, steps)
for current in currents:
self.source_current = current
sleep(pause)
def ramp_to_voltage(self, voltage, steps=25, duration=0.5):
""" Ramps the voltage to a value in Volts by traversing a linear spacing
of voltage steps over a duration, defined in seconds.
:param steps: A number of linear steps to traverse
:param duration: A time in seconds over which to ramp
"""
start_voltage = self.source_voltage
stop_voltage = voltage
pause = duration/steps
if (start_voltage != stop_voltage):
voltages = np.linspace(start_voltage, stop_voltage, steps)
for voltage in voltages:
self.source_voltage = voltage
sleep(pause)
def shutdown(self):
""" Shuts down the instrument, and ramps the current or voltage to zero
before disabling the source. """
# Since voltage and current are set the same way, this
# ramps either the current or voltage to zero
self.ramp_to_current(0.0, steps=25)
self.source_current = 0.0
self.disable_source()
super(Yokogawa7651, self).shutdown()
|
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
slimmed down version of OVSBridge in quantum agent
"""
import functools
from ryu import cfg
import logging
import ryu.exception as ryu_exc
import ryu.lib.dpid as dpid_lib
import ryu.lib.ovs.vsctl as ovs_vsctl
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts([
cfg.IntOpt('ovsdb-timeout', default=2, help='ovsdb timeout')
])
class OVSBridgeNotFound(ryu_exc.RyuException):
message = 'no bridge for datapath_id %(datapath_id)s'
class VifPort(object):
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
super(VifPort, self).__init__()
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ('iface-id=%s, '
'vif_mac=%s, '
'port_name=%s, '
'ofport=%d, '
'bridge_name=%s' % (self.vif_id,
self.vif_mac,
self.port_name,
self.ofport,
self.switch.br_name))
class TunnelPort(object):
def __init__(self, port_name, ofport, tunnel_type, local_ip, remote_ip):
super(TunnelPort, self).__init__()
self.port_name = port_name
self.ofport = ofport
self.tunnel_type = tunnel_type
self.local_ip = local_ip
self.remote_ip = remote_ip
def __eq__(self, other):
return (self.port_name == other.port_name and
self.ofport == other.ofport and
self.tunnel_type == other.tunnel_type and
self.local_ip == other.local_ip and
self.remote_ip == other.remote_ip)
def __str__(self):
return ('port_name=%s, '
'ofport=%s, '
'type=%s, '
'local_ip=%s, '
'remote_ip=%s' % (self.port_name,
self.ofport,
self.tunnel_type,
self.local_ip,
self.remote_ip))
class OVSBridge(object):
def __init__(self, CONF, datapath_id, ovsdb_addr, timeout=None,
exception=None):
super(OVSBridge, self).__init__()
self.datapath_id = datapath_id
self.vsctl = ovs_vsctl.VSCtl(ovsdb_addr)
self.timeout = timeout or CONF.ovsdb_timeout
self.exception = exception
self.br_name = None
def run_command(self, commands):
self.vsctl.run_command(commands, self.timeout, self.exception)
def init(self):
if self.br_name is None:
self.br_name = self._get_bridge_name()
def _get_bridge_name(self):
""" get Bridge name of a given 'datapath_id' """
command = ovs_vsctl.VSCtlCommand(
'find',
('Bridge',
'datapath_id=%s' % dpid_lib.dpid_to_str(self.datapath_id)))
self.run_command([command])
result = command.result
if len(result) == 0 or len(result) > 1:
raise OVSBridgeNotFound(
datapath_id=dpid_lib.dpid_to_str(self.datapath_id))
return result[0].name
def get_controller(self):
command = ovs_vsctl.VSCtlCommand('get-controller', [self.br_name])
self.run_command([command])
return command.result[0]
def set_controller(self, controllers):
command = ovs_vsctl.VSCtlCommand('set-controller', [self.br_name])
command.args.extend(controllers)
self.run_command([command])
def del_controller(self):
command = ovs_vsctl.VSCtlCommand('del-controller', [self.br_name])
self.run_command([command])
def set_db_attribute(self, table_name, record, column, value):
command = ovs_vsctl.VSCtlCommand(
'set', (table_name, record, '%s=%s' % (column, value)))
self.run_command([command])
def clear_db_attribute(self, table_name, record, column):
command = ovs_vsctl.VSCtlCommand('clear', (table_name, record, column))
self.run_command([command])
def db_get_val(self, table, record, column):
command = ovs_vsctl.VSCtlCommand('get', (table, record, column))
self.run_command([command])
assert len(command.result) == 1
return command.result[0]
def db_get_map(self, table, record, column):
val = self.db_get_val(table, record, column)
assert type(val) == dict
return val
def get_datapath_id(self):
return self.db_get_val('Bridge', self.br_name, 'datapath_id')
def delete_port(self, port_name):
command = ovs_vsctl.VSCtlCommand(
'del-port', (self.br_name, port_name), ('--if-exists'))
self.run_command([command])
def get_ofport(self, port_name):
ofport_list = self.db_get_val('Interface', port_name, 'ofport')
assert len(ofport_list) == 1
return int(ofport_list[0])
def get_port_name_list(self):
command = ovs_vsctl.VSCtlCommand('list-ports', (self.br_name, ))
self.run_command([command])
return command.result
def add_tunnel_port(self, name, tunnel_type, local_ip, remote_ip,
key=None):
options = 'local_ip=%(local_ip)s,remote_ip=%(remote_ip)s' % locals()
if key:
options += ',key=%(key)s' % locals()
command_add = ovs_vsctl.VSCtlCommand('add-port', (self.br_name, name))
command_set = ovs_vsctl.VSCtlCommand(
'set', ('Interface', name,
'type=%s' % tunnel_type, 'options=%s' % options))
self.run_command([command_add, command_set])
def add_gre_port(self, name, local_ip, remote_ip, key=None):
self.add_tunnel_port(name, 'gre', local_ip, remote_ip, key=key)
def del_port(self, port_name):
command = ovs_vsctl.VSCtlCommand('del-port', (self.br_name, port_name))
self.run_command([command])
def _get_ports(self, get_port):
ports = []
port_names = self.get_port_name_list()
for name in port_names:
if self.get_ofport(name) < 0:
continue
port = get_port(name)
if port:
ports.append(port)
return ports
def _vifport(self, name, external_ids):
ofport = self.get_ofport(name)
return VifPort(name, ofport, external_ids['iface-id'],
external_ids['attached-mac'], self)
def _get_vif_port(self, name):
external_ids = self.db_get_map('Interface', name, 'external_ids')
if 'iface-id' in external_ids and 'attached-mac' in external_ids:
return self._vifport(name, external_ids)
def get_vif_ports(self):
'returns a VIF object for each VIF port'
return self._get_ports(self._get_vif_port)
def _get_external_port(self, name):
# exclude vif ports
external_ids = self.db_get_map('Interface', name, 'external_ids')
if external_ids:
return
# exclude tunnel ports
options = self.db_get_map('Interface', name, 'options')
if 'remote_ip' in options:
return
ofport = self.get_ofport(name)
return VifPort(name, ofport, None, None, self)
def get_external_ports(self):
return self._get_ports(self._get_external_port)
def get_tunnel_port(self, name, tunnel_type='gre'):
type_ = self.db_get_val('Interface', name, 'type')
if type_ != tunnel_type:
return
options = self.db_get_map('Interface', name, 'options')
if 'local_ip' in options and 'remote_ip' in options:
ofport = self.get_ofport(name)
return TunnelPort(name, ofport, tunnel_type,
options['local_ip'], options['remote_ip'])
def get_tunnel_ports(self, tunnel_type='gre'):
get_tunnel_port = functools.partial(self.get_tunnel_port,
tunnel_type=tunnel_type)
return self._get_ports(get_tunnel_port)
def get_quantum_ports(self, port_name):
LOG.debug('port_name %s', port_name)
command = ovs_vsctl.VSCtlCommand(
'list-ifaces-verbose',
[dpid_lib.dpid_to_str(self.datapath_id), port_name])
self.run_command([command])
if command.result:
return command.result[0]
return None
|
|
###############################################################
# MPySIR: MPI python script for SIR
#
# SCRIPT: sirutils.py
###############################################################
"""
# Author: [email protected]
# Date: 09.06.2015
# Version: 1.4
"""
"""
Some functions useful for MPI parallel programming
"""
from mpi4py import MPI
from pySir.sirtools import lmodel8
#=============================================================================
def sirexe(fila, columna, myHeight, rank, sirfile, modeloFin, resultadoSir, sirmode, chi2map = True):
finalProfile = 'hsraB_3.per'
if sirmode == 'gammaV' or sirmode == 'gammVaddFullProfile':
gammaV()
if sirmode == 'medianFilter':
medianFilter(fila, columna)
import os
os.system('echo sir.trol | '+sirfile+' > pylog.txt')
# Ahora almacenamos los resultados en memoria
tau, magnitudes = lmodel8(modeloFin,verbose=False)
ERRtau, ERRmagnitudes = lmodel8(modeloFin[0:-4]+'.err',verbose=False)
magnitudes.insert(0,tau)
ERRmagnitudes.insert(0,ERRtau)
if chi2map:
chifile = open('sir.chi','r')
for line in chifile:
pass
chi2 = float(line.split()[1])
lenmag = len(magnitudes)
magnitudes.insert(lenmag,chi2)
ERRmagnitudes.insert(lenmag,chi2)
if sirmode == 'addFullProfile' or sirmode == 'gammVaddFullProfile':
addFullProfile(sirfile)
finalProfile = 'dataFull.per'
from pySir.sirtools import lperfil
xFull, stokesFull, [nL,posi,nN] = lperfil(finalProfile,verbose=False)
perfiles = [xFull,stokesFull]
modelos = [magnitudes, ERRmagnitudes]
punto = [fila + myHeight*rank, columna]
resultadoSir.append([punto,modelos,perfiles])
if sirmode == 'beforePixel':
os.system('rm hsraB.mod'); os.system('cp hsraB_3.mod hsraB.mod')
#=============================================================================
def pncore():
import platform; _platform = platform.system() # We execute SIR according to the OS:
from subprocess import PIPE, Popen
if _platform == "Linux": # Linux OS
proceso = Popen(['nproc'], stdout=PIPE, stderr=PIPE)
ncores = proceso.stdout.read().split('\n')[0]
print('Available cores = '+ncores)
elif _platform == "Darwin": # MAC OS X
proceso = Popen(['sysctl','hw.ncpu'], stdout=PIPE, stderr=PIPE)
ncores = proceso.stdout.read().split('\n')[0]
print('Available cores = '+ncores.split(':')[-1])
#=============================================================================
def pprint(ini='', end='', comm=MPI.COMM_WORLD):
"""Print for MPI parallel programs: Only rank 0 prints *str*."""
if comm.rank == 0:
print(str(ini)+end)
#=============================================================================
def getTerminalSize():
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
# import os
# cr[0],cr[1] = os.popen('stty size', 'r').read().split()
except:
pass
# print("Default")
# cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
#=============================================================================
def plotper():
import matplotlib.pyplot as plt
import os
import numpy as np
# import seaborn as sns
# #sns.set(font="serif")
# sns.set_style("ticks")
filename='hsraB_3.per'
#LineName=['SiI 10827.1']
LineName = ['FeI 6301.5']#,'FeI 6302.5','SiI 10827.1','FeI 15648.5','FeI 15652.9']
NumeroLineas = len(LineName)
MainFile = 'data.per'
Color2 = 'm'
Color1='k'
# XRANGEMAX = array([0.68,0.6,1.6,1.,1.])
# XRANGEMIN = -XRANGEMAX; XRANGEMIN[0] = -3
YRANGEMAX = np.array([1.1,3.,3.,3.,3.])
YRANGEMIN = -YRANGEMAX; YRANGEMIN[0] = 0
from pySir.sirtools import lperfil
# Abrimos los ficheros:
x0, stokes0, [nL,posi,nN] = lperfil(MainFile)
if nL == 1:
x0 = np.array(x0)
StokeI0=stokes0[0]
StokeQ0=stokes0[1]
StokeU0=stokes0[2]
StokeV0=stokes0[3]
x, stokes, [nL,posi,nN] = lperfil(filename)
x = np.array(x)
StokeI=stokes[0]
StokeQ=stokes[1]
StokeU=stokes[2]
StokeV=stokes[3]
lennN = len(nN)
NumeroLineas = nL
PosiNn0T = list(posi); PosiNn0T.append(lennN-1)
PosiNn1T = list(posi); PosiNn1T.append(lennN-1)
x0A=x0/1000.
xA=x/1000.
sParam = 4
plt.figure(figsize=(15,5))
# Ejemplo de titulo
# title(r' '+LineName[Index].split()[0]+' $'+LineName[Index].split()[1]+'\AA$',fontsize=10)
for Index in range(0,NumeroLineas):
plt.subplot(NumeroLineas,sParam,Index+1)
plt.plot(x0A[PosiNn0T[Index]:PosiNn0T[Index+1]-1],StokeI0[PosiNn0T[Index]:PosiNn0T[Index+1]-1],Color1,lw=1.0)
# xticks(fontsize = 7)
# yticks(fontsize = 9)
plt.tick_params(axis='y', direction='in')
plt.tick_params(axis='x', direction='in')
plt.xlabel(r'$\Delta\lambda$ [$\AA$]', fontsize=15)
plt.ylabel(r'$I/I_c$', fontsize=15)
plt.xlim(x0A[PosiNn0T[Index]],x0A[PosiNn0T[Index+1]-1])
plt.ylim(YRANGEMIN[0],YRANGEMAX[0])
plt.grid(alpha=0.2,linestyle='-')
plt.locator_params(axis = 'x', nbins = 4)
plt.locator_params(axis = 'y', nbins = 6)
plt.subplot(NumeroLineas,sParam,Index+1+NumeroLineas)
plt.plot(x0A[PosiNn0T[Index]:PosiNn0T[Index+1]-1],100*StokeQ0[PosiNn0T[Index]:PosiNn0T[Index+1]-1],Color1,lw=1.0)
# xticks(fontsize = 7)
# yticks(fontsize = 9)
plt.tick_params(axis='y', direction='in')
plt.tick_params(axis='x', direction='in')
plt.xlabel(r'$\Delta\lambda$ [$\AA$]', fontsize=15)
plt.ylabel(r'$Q/I_c$ $[\%]$', fontsize=15)
plt.ylim(YRANGEMIN[1],YRANGEMAX[1])
plt.xlim(x0A[PosiNn0T[Index]],x0A[PosiNn0T[Index+1]-1])
plt.grid(alpha=0.2,linestyle='-')
plt.locator_params(axis = 'x', nbins = 4)
plt.locator_params(axis = 'y', nbins = 6)
plt.subplot(NumeroLineas,sParam,Index+2+NumeroLineas)
plt.plot(x0A[PosiNn0T[Index]:PosiNn0T[Index+1]-1],100*StokeU0[PosiNn0T[Index]:PosiNn0T[Index+1]-1],Color1,lw=1.0)
# xticks(fontsize = 7)
# yticks(fontsize = 9)
plt.tick_params(axis='y', direction='in')
plt.tick_params(axis='x', direction='in')
plt.xlabel(r'$\Delta\lambda$ [$\AA$]', fontsize=15)
plt.ylabel(r'$U/I_c$ $[\%]$', fontsize=15)
plt.grid(alpha=0.2,linestyle='-')
plt.locator_params(axis = 'x', nbins = 4)
plt.xlim(x0A[PosiNn0T[Index]],x0A[PosiNn0T[Index+1]-1])
plt.ylim(YRANGEMIN[2],YRANGEMAX[2])
plt.locator_params(axis = 'y', nbins = 6)
plt.subplot(NumeroLineas,sParam,Index+3+NumeroLineas)
plt.plot(x0A[PosiNn0T[Index]:PosiNn0T[Index+1]-1],100*StokeV0[PosiNn0T[Index]:PosiNn0T[Index+1]-1],Color1,lw=1.0)
# xticks(fontsize = 7)
# yticks(fontsize = 9)
plt.tick_params(axis='y', direction='in')
plt.tick_params(axis='x', direction='in')
plt.xlabel(r'$\Delta\lambda$ [$\AA$]', fontsize=15)
plt.ylabel(r'$V/I_c$ $[\%]$', fontsize=15)
plt.ylim(YRANGEMIN[3],YRANGEMAX[3])
plt.xlim(x0A[PosiNn0T[Index]],x0A[PosiNn0T[Index+1]-1])
plt.grid(alpha=0.2,linestyle='-')
plt.locator_params(axis = 'x', nbins = 4)
plt.locator_params(axis = 'y', nbins = 6)
# ========================================================================================================
plt.subplot(NumeroLineas,sParam,Index+1)
plt.plot(xA[PosiNn1T[Index]:PosiNn1T[Index+1]-1],StokeI[PosiNn1T[Index]:PosiNn1T[Index+1]-1],Color2,lw=1.0)
plt.subplot(NumeroLineas,sParam,Index+1+NumeroLineas)
plt.plot(xA[PosiNn1T[Index]:PosiNn1T[Index+1]-1],100*StokeQ[PosiNn1T[Index]:PosiNn1T[Index+1]-1],Color2,lw=1.0)
plt.subplot(NumeroLineas,sParam,Index+2+NumeroLineas)
plt.plot(xA[PosiNn1T[Index]:PosiNn1T[Index+1]-1],100*StokeU[PosiNn1T[Index]:PosiNn1T[Index+1]-1],Color2,lw=1.0)
plt.subplot(NumeroLineas,sParam,Index+3+NumeroLineas)
plt.plot(xA[PosiNn1T[Index]:PosiNn1T[Index+1]-1],100*StokeV[PosiNn1T[Index]:PosiNn1T[Index+1]-1],Color2,lw=1.0)
# ========================================================================================================
plt.tight_layout()
plt.savefig('P'+filename[0:-4]+'.pdf', bbox_inches='tight')#, pad_inches=0)
print('P'+filename[0:-4]+'.pdf'+':: GUARDADO')
return
#=============================================================================
def plotmfit():
# def plotmfit(initModel,outModel,errorModel=False,whichMag,verbose=False):
import matplotlib.pyplot as plt
import os
import numpy as np
# import seaborn as sns
# sns.set_style("ticks")
filename = 'hsraB_3.mod'
errorModel = True
#====================================================================
#; 0:= temp , 1:= pres, 2:= vmic, 3:= B, 4:= vlos 5:=gamma
PosiPlot = [0,3,4,5]
LabelPlot= ['$T$ $[kK]$',r'$P_e$'+' [dyn cm^-3]',r'$v_{mic}$'+' [km/s]','$B$ $[kG]$',r'$v_{LOS}$'+' $[km/s]$',r'$\gamma$ $[deg]$']
TEXTAU = r'$\tau$'
NumPlots = len(PosiPlot)
MainFile = 'hsraB.mod'
Color1='k'
Color2 = 'm'
from pySir.sirtools import lmodel8
tau, TodoPlot = lmodel8(MainFile,verbose=False)
tau2, TodoPlot2 = lmodel8(filename,verbose=False)
if errorModel:
ERRtau2, TodoPlot3 = lmodel8(filename[0:-4]+'.err')
MMargen=[0.2,0.3,0.3,0.3]
plt.figure(figsize=(4*NumPlots,5))
for i in range(0,len(PosiPlot)):
plt.subplot(1,len(PosiPlot),i+1)
Cantidad0=TodoPlot[PosiPlot[i]]
Cantidad1=TodoPlot2[PosiPlot[i]]
plt.plot(tau,Cantidad0,Color1,lw=1.0)
plt.plot(tau2,Cantidad1,Color2,lw=1.0)
if errorModel:
ERRCantidad1=TodoPlot3[PosiPlot[i]]
plt.fill_between(tau2, Cantidad1-ERRCantidad1, Cantidad1+ERRCantidad1,facecolor='m', alpha=0.2)
#ylim(YRANGEMIN[Index],YRANGEMAX[Index])
plt.xlim(min(tau2),max(tau2))
Min0 = min(Cantidad0)
Min1 = min(Cantidad1)
Max0 = max(Cantidad0)
Max1 = max(Cantidad1)
if Min0 == Min1 : MinAbs = Min0
if Min0 != Min1 : MinAbs = min([Min0,Min1])
if Max0 == Max1 : MaxAbs = Max0
if Max0 != Max1 : MaxAbs = max([Max0,Max1])
NuevoMargen= abs(MinAbs-MaxAbs)*MMargen[i]
plt.ylim(MinAbs-NuevoMargen,MaxAbs+NuevoMargen)
#ylim(0.,2.5)
plt.tick_params(axis='y', direction='in')#,labelsize=20)
plt.tick_params(axis='x', direction='in')#,labelsize=20)
plt.xlabel(r'$log(\tau)$', fontsize=20)
plt.ylabel(LabelPlot[PosiPlot[i]], fontsize=20)
plt.locator_params(axis = 'y', nbins = 6)
plt.locator_params(axis = 'x', nbins = 4)
plt.grid(alpha=0.2,linestyle='-')
plt.tight_layout()
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.4, hspace=0.3)
plt.savefig('M'+filename[0:-4]+'.pdf', bbox_inches='tight')#, pad_inches=0)
print('M'+filename[0:-4]+'.pdf'+':: GUARDADO')
return
#=============================================================================
def cerca(number, array):
from numpy import argmin, abs
indice = argmin(abs(number-array))
return indice
#=============================================================================
def gammaV():
from scipy import integrate
from scipy.interpolate import interp1d
MainFile = 'data.per'
from pySir.sirtools import lperfil
x0, stokes0, [nL,posi,nN] = lperfil(MainFile)
indice = cerca(0.,x0)
distmin = min([abs(indice),abs(len(x0)-indice)])
centro = indice
x = x0
y = stokes0[3]
xlobuloazul = x[centro+1-distmin:centro+1]
ylobuloazul = y[centro+1-distmin:centro+1]
xlobulorojo = x[centro:centro+distmin]
ylobulorojo = y[centro:centro+distmin]
int_roja = integrate.simps(ylobulorojo,xlobulorojo)
int_azul = integrate.simps(ylobuloazul,xlobuloazul)
if int_azul > int_roja: gamma = 45.0
if int_azul < int_roja: gamma = 135.0
from pySir.sirtools import lmodel8, wmodel8
from numpy import ones
tau, magnitudes = lmodel8('hsraB.mod',verbose=False)
modelo = [tau, magnitudes]
magnitudes[5] = gamma*ones(len(magnitudes[5]))
wmodel8(modelo,'hsraB.mod',verbose=False)
#=============================================================================
def medianFilter(fila, columna):
import numpy as np
medianGAMMA = np.load('medianGAMMA.npy')
gamma_median = medianGAMMA[columna, fila]
from pySir.sirtools import lmodel8, wmodel8
from numpy import ones
tau, magnitudes = lmodel8('hsraB.mod',verbose=False)
modelo = [tau, magnitudes]
magnitudes[5] = gamma_median*ones(len(magnitudes[5]))
wmodel8(modelo,'hsraB.mod',verbose=False)
#=============================================================================
def addFullProfile(sirfile):
import os
os.system('echo sirFull.trol | '+sirfile+' > pylogFull.txt')
|
|
import re
import urllib
import os
from datetime import datetime
from django.conf import settings
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.functional import curry
from django.core.files.images import ImageFile
from django.contrib.staticfiles import finders
from sorl.thumbnail import get_thumbnail
class FilePath(unicode):
def __new__(cls, str, instance=None, field=None, settings={}):
self = super(FilePath, cls).__new__(cls, str.strip())
self._instance = instance
self._field = field
self._exists = None
self._size = None
self._accessed_time = None
self._created_time = None
self._modified_time = None
self._thumbnails = {}
self.settings = {
'img_attrs': {},
'thumbnail_size': None,
'thumbnail_attrs': {},
}
self.settings.update(settings)
return self
def _html_attrs(self, **kwargs):
attrs = {}
attrs.update(kwargs)
if 'css_class' in attrs:
attrs['class'] = attrs['css_class']
del attrs['css_class']
return attrs
@property
def unescaped(self):
return urllib.unquote(self)
@property
def escaped(self):
return urllib.quote(self.unescaped)
@property
def url(self):
if not self.startswith('/') and self.find('//') == -1:
return os.path.join(settings.MEDIA_URL, self.escaped)
return self.escaped
@property
def local_path(self):
if not self.startswith('/') and self.find('//') == -1:
return os.path.join(settings.MEDIA_ROOT, urllib.unquote(self))
return self
def _get_local_path_or_file(self):
# if file is in static instead of media directory, sorl raises
# a suspicious operation error. So we open it safely without errors.
if self.startswith('/'):
if self.startswith('/static/'):
path = self.replace('/static/', '')
elif self.startswith(settings.STATIC_URL):
path = self.replace(settings.STATIC_URL, '')
else:
return self.local_path
else:
return self.local_path
path = finders.find(urllib.unquote(path))
image = ImageFile(open(path, 'r'))
return image
@property
def filename(self):
return urllib.unquote(re.sub(r'^.+\/', '', self))
@property
def display_name(self):
without_extension = re.sub(r'\.[\w\d]+$', '', self.filename)
with_spaces = re.sub(r'_', ' ', without_extension)
return with_spaces
@property
def ext(self):
return re.sub(r'^.+\.', '', self.filename)
def exists(self):
if self._exists == None:
self._exists = os.path.exists(self.local_path)
return self._exists
def get_size(self):
if self._size == None:
self._size = os.path.getsize(self.local_path)
return self._size
def get_accessed_time(self):
if self._accessed_time is None:
self._accessed_time = datetime.fromtimestamp(os.path.getatime(self.local_path))
return self._accessed_time
def get_created_time(self):
if self._created_time is None:
self._created_time = datetime.fromtimestamp(os.path.getctime(self.local_path))
return self._created_time
def get_modified_time(self):
if self._modified_time is None:
self._modified_time = datetime.fromtimestamp(os.path.getmtime(self.local_path))
return self._modified_time
class ImagePath(FilePath):
def img_tag(self, **kwargs):
attrs = {}
attrs.update(self.settings['img_attrs'])
attrs.update(kwargs)
attrs = self._html_attrs(**attrs)
attrs_str = ''.join([
u'%s="%s" ' % (key, value)
for key, value in attrs.items()
])
return mark_safe(u'<img src="%s" %s/>' % (self.url, attrs_str))
def _thumbnail_file_format(self):
if self.ext.lower() in ['gif', 'png']:
return 'PNG'
return 'JPEG'
def thumbnail(self, size=None, **kwargs):
size = size or self.settings['thumbnail_size']
if not size:
raise Exception('No thumbnail size supplied')
attrs = {
'format': self._thumbnail_file_format(),
'upscale': False,
}
attrs.update(self.settings['thumbnail_attrs'])
attrs.update(kwargs)
all_attrs = {'size': size}
all_attrs.update(attrs)
key = hash(frozenset(all_attrs))
if not key in self._thumbnails:
#self._thumbnails[key] = get_thumbnail(self._get_local_path_or_file(), size, **attrs)
self._thumbnails[key] = get_thumbnail(self.local_path, size, **attrs)
return self._thumbnails[key]
def thumbnail_tag(self, size, opts={}, **kwargs):
try:
thumbnail = self.thumbnail(size, **opts)
except EnvironmentError, e:
if settings.THUMBNAIL_DEBUG:
raise e
return ''
src = ImagePath(thumbnail.url, self._instance, self._field)
attrs = { 'width': thumbnail.width, 'height': thumbnail.height }
attrs.update(self.settings['img_attrs'])
attrs.update(kwargs)
return src.img_tag(**attrs)
def __getattr__(self, attr):
thumbnail_mxn = re.match(r'^thumbnail_(tag_)?(\d*x?\d+)$', attr)
if thumbnail_mxn:
tag = thumbnail_mxn.group(1) == 'tag_'
size = thumbnail_mxn.group(2)
if tag:
return curry(self.thumbnail_tag, size)
else:
return curry(self.thumbnail, size)
raise AttributeError
class FilePaths(unicode):
item_class = FilePath
def __new__(cls, str, instance=None, field=None, settings={}):
self = super(FilePaths, cls).__new__(cls, str)
self._instance = instance
self._field = field
self._all = None
self._length = None
self._current = 0
self.settings = {
'img_attrs': {},
'thumbnail_size': None,
'thumbnail_attrs': {},
}
self.settings.update(settings)
return self
def all(self):
if self._all == None:
self._all = []
for f in self.splitlines():
self._all.append(self._field.attr_class.item_class(f, self._instance, self._field, self.settings))
self._length = len(self._all)
return self._all
def count(self):
self.all()
return self._length
def first(self):
return self.all() and self.all()[0] or None
def last(self):
return self.all() and self.all()[-1] or None
def next(self):
f = self.all()[self._current]
self._current += 1
return f
def next_n(self, n):
files = self.all()[self._current:self._current+n]
self._current += n
return files
def next_all(self):
files = self.all()[self._current:]
self._current = self._length - 1
return files
def has_next(self):
self.all()
return max(0, self._length - self._current - 1)
def reset(self):
self._current = 0
def __getattr__(self, attr):
next_n = re.match(r'^next_(\d+)$', attr)
if next_n:
n = int(next_n.group(1))
return curry(self.next_n, n)
raise AttributeError
class ImagePaths(FilePaths):
item_class = ImagePath
def as_gallery(self):
raise NotImplementedError
def as_carousel(self):
raise NotImplementedError
class FilesDescriptor(object):
"""
Used django.db.models.fields.files.FileDescriptor as an example.
This descriptor returns an unicode object, with special methods
for formatting like filename(), absolute(), relative() and img_tag().
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
files = instance.__dict__[self.field.name]
if isinstance(files, six.string_types) and not isinstance(files, (FilePath, FilePaths)):
attr = self.field.attr_class(files, instance, self.field)
instance.__dict__[self.field.name] = attr
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
|
|
#! /usr/bin/env python
#
# oravg_s2.py - An engine, adapted from the original oravg.py, to
# calculate orientation averaged cross sections and
# orientation-dependent spectral information starting from several
# fixed orientation spectra of a nanoparticle with axial
# symmetry. Uses interpolation on the sphere S^2 with a linear
# combination of translates of a single strictly positive definite
# basis function. The interpolation scheme is based on the "generating
# function kernel" in:
#
# Jetter K, Stoeckler J, Ward JD, Math. Computation 1999, 68, 733-747.
#
# Works with FDTD runs using x-propagating, z-polarized plane
# waves. It is assumed that the particle's axis of rotational symmetry
# is the z-axis when the quaternion is (0,0,0,1).
#
# To use: need a system with Python 2.5.2 and NumPy 1.1.0.
# Command line on Unix-like system:
#
# chmod +x ./oravg_s2.py
# ./oravg_s2.py <control_file >output_file
#
#
# CONTROL FILE: text file with a header, choice of interpolation
# parameters, and body.
#
# HEADER: define the job type and related parameters. Choices
# currently are orientation-averaged spectrum ("o"), fixed orientation
# spectrum ("f"), or polar view of cross section as particle is
# rotated ("p"). Put one of the following at the top of the control
# file:
#
# Orientation-averaged spectrum:
# o <- Job type
#
# Fixed orientation spectrum:
# f <- Job type
# 0.0 0.38268 0.0 0.92388 <- Unit quaternion (x,y,z,w) defining desired
# orientation
# Polar view:
# p <- Job type
# 0.0 0.0 0.0 1.0 <- Unit quaternion defining desired orientation
# 1.0 0.0 0.0 <- Unit vector defining axis about which particle
# is to be rotated
# 101 <- Number of angular samples from 0 to 2pi
# (samples include 0 and 2pi)
# 667, 1.0 <- Wavelength/frequency to sample and tolerance
# between given value and value in data file
# (in same units as in data file)
#
# CHOICE OF INTERPOLATION PARAMETERS: Put the following next in the
# control file:
#
# 0.3 <- Peakedness z of the kernel (0 < z < 1)
# dinfh <- Point group of nanoparticle
#
# BODY: Put the following at the end of the control file:
#
# 0.0 0.0 0.0 1.0 30.dat F F
# 0.707106781187 0.0 0.0 0.707106781187 30a.dat F F
# 0.0 0.707106781187 0.0 0.707106781187 30b.dat F F
# 0.325057583672 0.325057583672 0.0 0.888073833977 30c.dat T T
#
# where first 4 fields represent the unit quaternion (x,y,z,w) by
# which the particle is rotated; the fifth is the data file name (by
# default in same subdirectory as this program) of the corresponding
# spectrum, and the last two are whether reflection in the xy and xz
# planes, respectively, give a different but valid orientation with an
# identical spectrum.
#
#
# DATA FILES:
# The data files must be lists of wavelength or frequency followed by the
# cross section, one entry per line, with no blank lines:
#
# 810.2498 1.6144834E-12
#
# The list of wavelengths sampled must be identical between files.
#
#
# Raman Shah, April-August 2010
# IMPORTED MODULES
from math import *
import numpy as np
from itertools import imap
from operator import mul
# MISCELLANEOUS UTILITIES
# Convert Ts and Fs in file to boolean values in Python
# Note: Text must be trimmed of any leading whitespace!
def parsebool(string):
return string[0].upper() == 'T'
# Read standard input until a nonblank like is found, and return the nonblank
# line as a list of words. Recognize comma as a word separator.
def readlist():
while True:
line = raw_input().replace(',', ' ').split()
if line != []:
return line
# QUATERNION CLASS AND RELATED METHODS
class quaternion:
"""A self-normalized rotation quaternion (x,y,z,w)."""
def __init__(self, xpart, ypart, zpart, wpart):
norm = sqrt(xpart * xpart + ypart * ypart + zpart * zpart \
+ wpart * wpart)
if norm == 0:
raise ValueError
self.x = xpart / norm
self.y = ypart / norm
self.z = zpart / norm
self.w = wpart / norm
def invert(self):
# Quaternion for the inverse rotation has same axis
# but opposite angle, so the vector components flip sign
return quaternion(-1 * self.x, -1 * self.y, -1 * self.z, self.w)
def xyreflect(self):
return quaternion(-1 * self.x, -1 * self.y, self.z, self.w)
def xzreflect(self):
return quaternion(-1 * self.x, self.y, -1 * self.z, self.w)
def show(self):
print self.x, self.y, self.z, self.w
def multiply(quat1, quat2):
prodw = quat1.w*quat2.w - quat1.x*quat2.x \
- quat1.y*quat2.y - quat1.z*quat2.z
prodx = quat1.w*quat2.x + quat1.x*quat2.w \
+ quat1.y*quat2.z - quat1.z*quat2.y
prody = quat1.w*quat2.y + quat1.y*quat2.w \
+ quat1.z*quat2.x - quat1.x*quat2.z
prodz = quat1.w*quat2.z + quat1.z*quat2.w \
+ quat1.x*quat2.y - quat1.y*quat2.x
return quaternion(prodx,prody,prodz,prodw)
# FUNCTIONS FOR WORKING IN S^2
def point(quat):
# Gives the unit vector (x, y, z) in R^3 resulting when (0, 0, 1)
# is rotated by the given quaternion. The third column of the
# quaternion rotation matrix.
return (2 * (quat.w * quat.y + quat.x * quat.z),
2 * (quat.y * quat.z - quat.x * quat.w),
(quat.w * quat.w - quat.x * quat.x - \
quat.y * quat.y + quat.z * quat.z))
def dot(x, y):
# Dot product of two vectors in R^3, written as tuples
return sum(imap(mul, x, y))
# FUNCTIONS FOR SYMMETRY ADAPTATION OF DATA SET
def cinfv_symadapt(input_tuple):
return [input_tuple]
def dinfh_symadapt(input_tuple):
# Takes a 2-tuple (quaternion + filename) and returns a
# list of the 2 tuples equivalent to the given tuple via the discrete
# subgroup of D_infh.
# Right-handed rotation by pi about the x axis
perpc2 = quaternion(1, 0, 0, 0)
quat = input_tuple[0]
text = input_tuple[1]
return [(quat, text), (multiply(quat, perpc2), text)]
def choose_point_group(point_group):
if point_group == 'cinfv':
return cinfv_symadapt
if point_group == 'dinfh':
return dinfh_symadapt
# DEFINITION OF INTERPOLATOR AND ITS INTEGRAL
def choose_interpolator(z):
# This is the generating function kernel, normalized to 1 for
# cos_theta = 1, and its integral over S^2.
interpolator = (lambda cos_theta: \
((1 - z) ** 3.0 / (1 + z ** 2 - 2 * z * cos_theta) ** 1.5))
integral = (1 - z) ** 2 / (1 + z)
return (interpolator, integral)
# EXECUTION BEGINS HERE: READ CONTROL FILE TO INITIALIZE
# Initialize job using header of control file piped to standard input
job_type = readlist()[0]
if job_type == 'o':
pass
elif job_type == 'f':
components = map(float, readlist())
orientation = quaternion(components[0], components[1], \
components[2], components[3])
elif job_type == 'p':
components = map(float, readlist())
orientation = quaternion(components[0], components[1], \
components[2], components[3])
axis = map(float, readlist())
stepcount = int(readlist()[0])
wavelength, wavelength_tol = map(float, readlist())
else:
raise IOError('Job type read as ' + job_type + ' is not valid.')
# Define interpolator, integral, rot_adapt from control file
z = float(readlist()[0])
if not ((z > 0) and (z < 1)):
raise ValueError('Need 0 < z < 1 to give a valid kernel.')
point_group = readlist()[0]
interpolator, integral = choose_interpolator(z)
rot_adapt = choose_point_group(point_group)
# Parse input from body of control file into a control array with
# entries [quaternion, string, boolean, boolean]. Also create a
# dictionary of open data files to read later in program.
control_list = []
datafile_dict = {}
while True:
try:
words = readlist()
curr_quat = quaternion(float(words[0]), float(words[1]), \
float(words[2]), float(words[3]))
xybool = parsebool(words[5])
xzbool = parsebool(words[6])
control_list.append([curr_quat, words[4], xybool, xzbool])
datafile_dict[words[4]] = open('./' + words[4], 'r')
except EOFError:
break
# ADAPT DATA SET TO SYMMETRY OF PROBLEM
# Add reflected quaternions as directed in the control list
# giving an array of tuples (quaternion, string)
reflected_list = []
for item in control_list:
reflected_list.append((item[0], item[1]))
if (item[2] and item[3]):
#xy and xz both give new equivalent configurations
reflected_list.append((item[0].xyreflect(), item[1]))
reflected_list.append((item[0].xzreflect(), item[1]))
reflected_list.append((item[0].xyreflect().xzreflect(), item[1]))
continue
if item[2]:
#only xy gives a new equivalent configuration
reflected_list.append((item[0].xyreflect(), item[1]))
continue
if item[3]:
#only xz gives a new equivalent configuration
reflected_list.append((item[0].xzreflect(), item[1]))
continue
# For each quaternion, add the configurations equivalent
# under the rotational point group, giving a master array
# of tuples (quaternion, string) ready for processing
master_list = []
for item in reflected_list:
master_list += rot_adapt(item)
# Give tuples of the quaternions and associated filenames
main_quat_list, main_filename_list = zip(*master_list)
# NUMERICAL SECTION: FUNCTION EVALUATION AND MATRIX INVERSION
# Instantiate a matrix of the dot products between corresponding points in
# R^3 as well as a matrix of the interpolator evaluations. The former is useful
# for measuring the separation distance of the data set.
# a_ij = x_i dot x_j
# A_ij = Phi_z(a_ij)
dimension = len(main_quat_list)
a = np.matrix(np.zeros((dimension, dimension)))
A = np.matrix(np.zeros((dimension, dimension)))
for i in range(dimension):
for j in range(dimension):
a[i, j] = dot(point(main_quat_list[i]), point(main_quat_list[j]))
A[i, j] = interpolator(a[i, j])
# # Some verbose info
#
# # Find separation distance of the set of quaternions
# biggest_costheta = 0
# for i in range(dimension):
# for j in range(dimension):
# if (i != j) and a[i,j] > biggest_costheta:
# biggest_costheta = a[i,j]
# print 'Smallest separation angle (in rad) is ' + \
# str(acos(biggest_costheta)) + '.'
#
# print 'Dimension is ' + str(dimension) + '.'
#
#print 'Points sampled are:'
#for quat in main_quat_list:
# print point(quat)
#
# for i in range(dimension):
# for j in range(dimension):
# if ((i != j) and A[i,j] == 1):
# print 'Off-diagonal element ' + str(i+1) + ', ' + str(j+1) + \
# ' is 1.'
# if A[i,j] == 0:
# print 'Element ' + str(i+1) + ', ' + str(j+1) + ' is 0.'
# Invert the matrix A to get the matrix that converts data
# to coefficients
Ainv = np.linalg.inv(A)
# CALCULATION AND OUTPUT OF RESULTS
# Spectrum-like jobs: main loop increments line number for data files
if job_type in ['o', 'f']:
if job_type == 'f':
eval_row = np.matrix(np.zeros((1, dimension)))
for j in range(dimension):
eval_row[0,j] = interpolator(dot(point(orientation), \
point(main_quat_list[j])))
while True: # Loop runs until a blank line is seen in data file
# Initialize the iteration
cs_vector = np.matrix(np.zeros((dimension, 1)))
coef_vector = np.matrix(np.zeros((dimension, 1)))
curr_line_dict = {}
# Read a new line, parsed into floats, to give a dictionary
for filename in datafile_dict:
curr_line_dict[filename] = \
map(float, datafile_dict[filename].readline().split())
if curr_line_dict.values()[0] == []:
break
# Look up the wavelength and cross sections for this iteration
curr_wavelength = curr_line_dict.values()[0][0]
for i in range(dimension):
cs_vector[i] = curr_line_dict[main_filename_list[i]][1]
# Compute the vector of coefficients and the sum of
# coefficients
coef_vector = Ainv * cs_vector
if job_type == 'o':
print curr_wavelength, np.sum(coef_vector) * integral
if job_type == 'f':
print curr_wavelength, (eval_row * coef_vector)[0,0]
# Monochromatic jobs: main loop increments parameter; one line per file used
if job_type == 'p':
# Look up data for the desired wavelength
while True:
curr_line_dict = {}
# Read a new line, parsed into floats, to give a dictionary
for filename in datafile_dict:
curr_line_dict[filename] = \
map(float, datafile_dict[filename].readline().split())
if curr_line_dict.values()[0] == []:
raise IOError('Wavelength/frequency ' + \
str(wavelength) + ' not found!')
curr_wavelength = float(curr_line_dict.values()[0][0])
if abs(curr_wavelength - wavelength) < wavelength_tol:
# Comment out following line to get pure stream of numbers
print '# Wavelength/frequency sampled is: ' + str(curr_wavelength)
break
# Find vector of coefficients just once for the chosen wavelength
cs_vector = np.matrix(np.zeros((dimension, 1)))
for i in range(dimension):
cs_vector[i] = curr_line_dict[main_filename_list[i]][1]
coef_vector = Ainv * cs_vector
angle_list = np.linspace(0, 2*pi, stepcount, True)
# Main loop
for angle in angle_list:
curr_quat = multiply(quaternion(axis[0] * sin(angle / 2), \
axis[1] * sin(angle / 2), \
axis[2] * sin(angle / 2), \
cos (angle / 2)), \
orientation)
eval_row = np.matrix(np.zeros((1, dimension)))
for j in range(dimension):
eval_row[0,j] = interpolator(dot(point(curr_quat), \
point(main_quat_list[j])))
print angle, (eval_row * coef_vector)[0,0]
# FINAL I/O CLEANUP
for filename in datafile_dict:
datafile_dict[filename].close()
|
|
# -*- coding: utf-8 -*-
#
# This file is part of sensim
# License: BSD 3 clause
# 2016, 2017
"""Helpers for sentence semantic similarity model.
.. Author:: Hussein AL-NATSHEH <[email protected]>
"""
from polyglot.text import Text
from polyglot.mapping import Embedding
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
vocab = Embedding.load("../polyglot_data/embeddings2/en/embeddings_pkl.tar.bz2")
def polyglot_words(s):
text = Text(s)
text.language = 'en'
return text.words
def polyglot_name_entities(s):
text = Text(s)
text.language = 'en'
entities = text.entities
org = list(' ')
loc = list(' ')
per = list(' ')
for entity in entities:
if entity.tag == 'I-ORG':
org = list(entity)
elif entity.tag == 'I-PER':
per = list(entity)
else:
loc = list(entity)
return org, per, loc
def polyglot_organizations(s):
org, _, _ = polyglot_name_entities(s)
return org
def polyglot_persons(s):
_, per, _ = polyglot_name_entities(s)
return per
def polyglot_locations(s):
_, _, loc = polyglot_name_entities(s)
return loc
def polyglot_pos(s):
"""Get dictionary of list POS_tags words from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: Dictionay
Dictionary of list POS_tags words
"""
def polyglot_pos(pos, pos_codes):
for w in text.words:
if w.pos_tag == pos_code:
pos.append(w)
if len(pos) > 0:
return pos
else:
return ([' '])
text = Text(s)
text.language = 'en'
text.pos_tags
POS = {}
pos_lst = []
adjectives = []
pos_lst.append((adjectives, 'ADJ'))
adpositions = []
pos_lst.append((adpositions, 'ADP'))
adverbs = []
pos_lst.append((adverbs, 'ADV'))
auxiliary_verbs = []
pos_lst.append((auxiliary_verbs, 'AUX'))
coordinating_conjunctions = []
pos_lst.append((coordinating_conjunctions, 'CONJ'))
determiners = []
pos_lst.append((determiners, 'DET'))
interjections = []
pos_lst.append((interjections, 'INTJ'))
nouns = []
pos_lst.append((nouns, 'NOUN'))
numerals = []
pos_lst.append((numerals, 'NUM'))
particles = []
pos_lst.append((particles, 'PART'))
pronouns = []
pos_lst.append((pronouns, 'PRON'))
proper_nouns = []
pos_lst.append((proper_nouns, 'PROPN'))
punctuations = []
pos_lst.append((punctuations, 'PUNCT'))
subordinating_conjunctions = []
pos_lst.append((subordinating_conjunctions, 'SCONJ'))
symbols = []
pos_lst.append((symbols, 'SYM'))
verbs = []
pos_lst.append((verbs, 'VERB'))
others = []
pos_lst.append((others, 'X'))
for pos, pos_code in pos_lst:
POS[pos_code] = polyglot_pos(pos, pos_code)
return POS
def polyglot_adpositions(s):
"""Get the set of th adpositions tags from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
adpositions
"""
adpositions = polyglot_pos(s)['ADP']
return (adpositions)
def polyglot_others(s):
"""Get the set of other PoS tags from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
other PoS tags
"""
others = polyglot_pos(s)['X']
return (others)
def polyglot_subordinating_conjunctions(s):
"""Get the subordinating_conjunctions from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
subordinating_conjunctions
"""
subordinating_conjunctions = polyglot_pos(s)['SCONJ']
return (subordinating_conjunctions)
def polyglot_punctuation(s):
"""Get the punctuation from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
punctuation
"""
punctuations = polyglot_pos(s)['PUNCT']
return (punctuations)
def polyglot_particle(s):
"""Get the particle from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
particle
"""
particles = polyglot_pos(s)['PART']
return (particles)
def polyglot_determiner(s):
"""Get the determiner from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
determiner
"""
determiners = polyglot_pos(s)['DET']
return (determiners)
def polyglot_interjection(s):
"""Get the interjection from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
interjection
"""
interjections = polyglot_pos(s)['INTJ']
return (interjections)
def polyglot_coordinating_conjunction(s):
"""Get the coordinating_conjunction from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
coordinating_conjunction
"""
coordinating_conjunctions = polyglot_pos(s)['CONJ']
return (coordinating_conjunctions)
def polyglot_symbol(s):
"""Get the symbol from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: array of string
symbol
"""
symbols = polyglot_pos(s)['SYM']
return (symbols)
def polyglot_nouns(s):
"""Get list of nouns from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of nouns
"""
return (polyglot_pos(s)['NOUN'])
def polyglot_1st_noun(s):
"""Get first noun from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first noun
"""
nouns = polyglot_nouns(s)
return nouns[0]
def polyglot_2nd_noun(s):
"""Get second noun from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second noun
"""
nouns = polyglot_nouns(s)
if len(nouns) > 1:
return nouns[1]
else:
return (' ')
def polyglot_proper_nouns(s):
"""Get list of proper_nouns from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of proper_nouns
"""
return (polyglot_pos(s)['PROPN'])
def polyglot_1st_proper_noun(s):
"""Get first proper_noun from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first proper_noun
"""
proper_nouns = polyglot_proper_nouns(s)
return proper_nouns[0]
def polyglot_2nd_proper_noun(s):
"""Get second proper_noun from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second proper_noun
"""
proper_nouns = polyglot_proper_nouns(s)
if len(proper_nouns) > 1:
return proper_nouns[1]
else:
return (' ')
def polyglot_pronouns(s):
"""Get list of pronouns from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of pronouns
"""
return (polyglot_pos(s)['PRON'])
def polyglot_1st_pronoun(s):
"""Get first pronoun from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first pronoun
"""
pronouns = polyglot_pronouns(s)
return pronouns[0]
def polyglot_2nd_pronoun(s):
"""Get second pronoun from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second pronoun
"""
pronouns = polyglot_pronouns(s)
if len(pronouns) > 1:
return pronouns[1]
else:
return (' ')
def polyglot_verbs(s):
"""Get list of verbs from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of verbs
"""
return (polyglot_pos(s)['VERB'])
def polyglot_1st_verb(s):
"""Get first verb from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first verb
"""
verbs = polyglot_verbs(s)
return verbs[0]
def polyglot_2nd_verb(s):
"""Get second verb from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second verb
"""
verbs = polyglot_verbs(s)
if len(verbs) > 1:
return verbs[1]
else:
return (' ')
def polyglot_auxiliary_verbs(s):
"""Get list of auxiliary_verbs from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of auxiliary_verbs
"""
return (polyglot_pos(s)['AUX'])
def polyglot_1st_auxiliary_verb(s):
"""Get first auxiliary_verb from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first auxiliary_verb
"""
auxiliary_verbs = polyglot_auxiliary_verbs(s)
return auxiliary_verbs[0]
def polyglot_2nd_auxiliary_verb(s):
"""Get second auxiliary_verb from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second auxiliary_verb
"""
auxiliary_verbs = polyglot_auxiliary_verbs(s)
if len(auxiliary_verbs) > 1:
return auxiliary_verbs[1]
else:
return (' ')
def polyglot_adjectives(s):
"""Get list of adjectives from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of adjectives
"""
return (polyglot_pos(s)['ADJ'])
def polyglot_1st_adjective(s):
"""Get first adjective from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first adjective
"""
adjectives = polyglot_adjectives(s)
return adjectives[0]
def polyglot_2nd_adjective(s):
"""Get second adjective from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second adjective
"""
adjectives = polyglot_adjectives(s)
if len(adjectives) > 1:
return adjectives[1]
else:
return (' ')
def polyglot_adverbs(s):
"""Get list of adverbs from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of adverbs
"""
return (polyglot_pos(s)['ADV'])
def polyglot_1st_adverb(s):
"""Get first adverb from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first adverb
"""
adverbs = polyglot_adverbs(s)
return adverbs[0]
def polyglot_2nd_adverb(s):
"""Get second adverb from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second adverb
"""
adverbs = polyglot_adverbs(s)
if len(adverbs) > 1:
return adverbs[1]
else:
return (' ')
def polyglot_numbers(s):
"""Get list of numbers from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: list of strings
list of numbers
"""
return (polyglot_pos(s)['NUM'])
def polyglot_1st_number(s):
"""Get first number from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
first number
"""
numbers = polyglot_numbers(s)
return numbers[0]
def polyglot_2nd_number(s):
"""Get second number from the sentence.
Parameters
----------
:param s: string
Sentence
Returns
-------
:returns: string
second number
"""
numbers = polyglot_numbers(s)
if len(numbers) > 1:
return numbers[1]
else:
return (' ')
# Get the vector representation of the word using polyglot
def _polyglot_vec(word):
word = word.encode('ascii','ignore').decode('ascii')
if word not in vocab:
return np.zeros(64, dtype=float, order='C').reshape(1, -1)
else:
w = vocab[word]
return w.reshape(1, -1)
class PairPolyglotVecTransformer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
n_samples = len(X)
Xt = np.zeros(n_samples, dtype=object)
s_id = 0
for sample in X:
lst = []
for tup in sample:
w1, w2 = tup
w1_id, w1_text = w1
w2_id, w2_text = w2
w1_vec = _polyglot_vec(w1_text)
w2_vec = _polyglot_vec(w2_text)
lst.append(((w1_id, w1_vec), (w2_id, w2_vec)))
Xt[s_id] = lst
s_id += 1
return Xt
|
|
"""
The :mod:`model_selection.split<surprise.model_selection.split>` module
contains various cross-validation iterators. Design and tools are inspired from
the mighty scikit learn.
The available iterators are:
.. autosummary::
:nosignatures:
KFold
RepeatedKFold
ShuffleSplit
LeaveOneOut
PredefinedKFold
This module also contains a function for splitting datasets into trainset and
testset:
.. autosummary::
:nosignatures:
train_test_split
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from itertools import chain
from math import ceil, floor
import numbers
from collections import defaultdict
from six import iteritems
from six import string_types
import numpy as np
from ..utils import get_rng
def get_cv(cv):
"""Return a 'validated' CV iterator."""
if cv is None:
return KFold(n_splits=5)
if isinstance(cv, numbers.Integral):
return KFold(n_splits=cv)
if hasattr(cv, 'split') and not isinstance(cv, string_types):
return cv # str have split
raise ValueError('Wrong CV object. Expecting None, an int or CV iterator, '
'got a {}'.format(type(cv)))
class KFold():
"""A basic cross-validation iterator.
Each fold is used once as a testset while the k - 1 remaining folds are
used for training.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Default
is ``True``.
"""
def __init__(self, n_splits=5, random_state=None, shuffle=True):
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
if self.n_splits > len(data.raw_ratings) or self.n_splits < 2:
raise ValueError('Incorrect value for n_splits={0}. '
'Must be >=2 and less than the number '
'of ratings'.format(len(data.raw_ratings)))
# We use indices to avoid shuffling the original data.raw_ratings list.
indices = np.arange(len(data.raw_ratings))
if self.shuffle:
get_rng(self.random_state).shuffle(indices)
start, stop = 0, 0
for fold_i in range(self.n_splits):
start = stop
stop += len(indices) // self.n_splits
if fold_i < len(indices) % self.n_splits:
stop += 1
raw_trainset = [data.raw_ratings[i] for i in chain(indices[:start],
indices[stop:])]
raw_testset = [data.raw_ratings[i] for i in indices[start:stop]]
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
class RepeatedKFold():
"""
Repeated :class:`KFold` cross validator.
Repeats :class:`KFold` n times with different randomization in each
repetition.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
n_repeats(int): The number of repetitions.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Default
is ``True``.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
self.n_repeats = n_repeats
self.random_state = random_state
self.n_splits = n_splits
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
rng = get_rng(self.random_state)
for _ in range(self.n_repeats):
cv = KFold(n_splits=self.n_splits, random_state=rng, shuffle=True)
for trainset, testset in cv.split(data):
yield trainset, testset
def get_n_folds(self):
return self.n_repeats * self.n_splits
class ShuffleSplit():
"""A basic cross-validation iterator with random trainsets and testsets.
Contrary to other cross-validation strategies, random splits do not
guarantee that all folds will be different, although this is still very
likely for sizeable datasets.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
test_size(float or int ``None``): If float, it represents the
proportion of ratings to include in the testset. If int,
represents the absolute number of ratings in the testset. If
``None``, the value is set to the complement of the trainset size.
Default is ``.2``.
train_size(float or int or ``None``): If float, it represents the
proportion of ratings to include in the trainset. If int,
represents the absolute number of ratings in the trainset. If
``None``, the value is set to the complement of the testset size.
Default is ``None``.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Setting
this to `False` defeats the purpose of this iterator, but it's
useful for the implementation of :func:`train_test_split`. Default
is ``True``.
"""
def __init__(self, n_splits=5, test_size=.2, train_size=None,
random_state=None, shuffle=True):
if n_splits <= 0:
raise ValueError('n_splits = {0} should be strictly greater than '
'0.'.format(n_splits))
if test_size is not None and test_size <= 0:
raise ValueError('test_size={0} should be strictly greater than '
'0'.format(test_size))
if train_size is not None and train_size <= 0:
raise ValueError('train_size={0} should be strictly greater than '
'0'.format(train_size))
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.shuffle = shuffle
def validate_train_test_sizes(self, test_size, train_size, n_ratings):
if test_size is not None and test_size >= n_ratings:
raise ValueError('test_size={0} should be less than the number of '
'ratings {1}'.format(test_size, n_ratings))
if train_size is not None and train_size >= n_ratings:
raise ValueError('train_size={0} should be less than the number of'
' ratings {1}'.format(train_size, n_ratings))
if np.asarray(test_size).dtype.kind == 'f':
test_size = ceil(test_size * n_ratings)
if train_size is None:
train_size = n_ratings - test_size
elif np.asarray(train_size).dtype.kind == 'f':
train_size = floor(train_size * n_ratings)
if test_size is None:
test_size = n_ratings - train_size
if train_size + test_size > n_ratings:
raise ValueError('The sum of train_size and test_size ({0}) '
'should be smaller than the number of '
'ratings {1}.'.format(train_size + test_size,
n_ratings))
return int(train_size), int(test_size)
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
test_size, train_size = self.validate_train_test_sizes(
self.test_size, self.train_size, len(data.raw_ratings))
rng = get_rng(self.random_state)
for _ in range(self.n_splits):
if self.shuffle:
permutation = rng.permutation(len(data.raw_ratings))
else:
permutation = np.arange(len(data.raw_ratings))
raw_trainset = [data.raw_ratings[i] for i in
permutation[:test_size]]
raw_testset = [data.raw_ratings[i] for i in
permutation[test_size:(test_size + train_size)]]
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
def train_test_split(data, test_size=.2, train_size=None, random_state=None,
shuffle=True):
"""Split a dataset into trainset and testset.
See an example in the :ref:`User Guide <train_test_split_example>`.
Note: this function cannot be used as a cross-validation iterator.
Args:
data(:obj:`Dataset <surprise.dataset.Dataset>`): The dataset to split
into trainset and testset.
test_size(float or int ``None``): If float, it represents the
proportion of ratings to include in the testset. If int,
represents the absolute number of ratings in the testset. If
``None``, the value is set to the complement of the trainset size.
Default is ``.2``.
train_size(float or int or ``None``): If float, it represents the
proportion of ratings to include in the trainset. If int,
represents the absolute number of ratings in the trainset. If
``None``, the value is set to the complement of the testset size.
Default is ``None``.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data``
parameter. Shuffling is not done in-place. Default is ``True``.
"""
ss = ShuffleSplit(n_splits=1, test_size=test_size, train_size=train_size,
random_state=random_state, shuffle=shuffle)
return next(ss.split(data))
class LeaveOneOut():
"""Cross-validation iterator where each user has exactly one rating in the
testset.
Contrary to other cross-validation strategies, ``LeaveOneOut`` does not
guarantee that all folds will be different, although this is still very
likely for sizeable datasets.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
min_n_ratings(int): Minimum number of ratings for each user in the
trainset. E.g. if ``min_n_ratings`` is ``2``, we are sure each user
has at least ``2`` ratings in the trainset (and ``1`` in the
testset). Other users are discarded. Default is ``0``, so some
users (having only one rating) may be in the testset and not in the
trainset.
"""
def __init__(self, n_splits=5, random_state=None, min_n_ratings=0):
self.n_splits = n_splits
self.random_state = random_state
self.min_n_ratings = min_n_ratings
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
# map ratings to the users ids
user_ratings = defaultdict(list)
for uid, iid, r_ui, _ in data.raw_ratings:
user_ratings[uid].append((uid, iid, r_ui, None))
rng = get_rng(self.random_state)
for _ in range(self.n_splits):
# for each user, randomly choose a rating and put it in the
# testset.
raw_trainset, raw_testset = [], []
for uid, ratings in iteritems(user_ratings):
if len(ratings) > self.min_n_ratings:
i = rng.randint(0, len(ratings))
raw_testset.append(ratings[i])
raw_trainset += [rating for (j, rating)
in enumerate(ratings) if j != i]
if not raw_trainset:
raise ValueError('Could not build any trainset. Maybe '
'min_n_ratings is too high?')
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
class PredefinedKFold():
"""A cross-validation iterator to when a dataset has been loaded with the
:meth:`load_from_folds <surprise.dataset.Dataset.load_from_folds>`
method.
See an example in the :ref:`User Guide <load_from_folds_example>`.
"""
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
self.n_splits = len(data.folds_files)
for train_file, test_file in data.folds_files:
raw_trainset = data.read_ratings(train_file)
raw_testset = data.read_ratings(test_file)
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
|
|
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from datetime import datetime
import logging
import json
from urllib.request import urlopen
from urllib.parse import urlparse
from functools import partial
from flask_babel import gettext
from owslib.wms import WebMapService
from owslib.wmts import WebMapTileService
from owslib.tms import TileMapService
from owslib.wfs import WebFeatureService
from owslib.wcs import WebCoverageService
from owslib.wps import WebProcessingService
from owslib.csw import CatalogueServiceWeb
from owslib.sos import SensorObservationService
from init import App
from enums import RESOURCE_TYPES
from models import Resource, Run
from probe import Probe
from result import ResourceResult
from notifications import notify
LOGGER = logging.getLogger(__name__)
APP = App.get_app()
DB = App.get_db()
# commit or rollback shorthand
def db_commit():
err = None
try:
DB.session.commit()
except Exception as err:
LOGGER.warning('Cannot commit to database {}'.format(err))
DB.session.rollback()
# finally:
# DB.session.close()
return err
def run_resources():
for resource in Resource.query.all(): # run all tests
LOGGER.info('Testing %s %s' %
(resource.resource_type, resource.url))
run_resource(resource.identifier)
# complete handle of resource test
def run_resource(resourceid):
resource = Resource.query.filter_by(identifier=resourceid).first()
if not resource.active:
# Exit test of resource if it's not active
return
# Get the status of the last run,
# assume success if there is none
last_run_success = True
last_run = resource.last_run
if last_run:
last_run_success = last_run.success
# Run test
result = run_test_resource(resource)
run1 = Run(resource, result, datetime.utcnow())
DB.session.add(run1)
# commit or rollback each run to avoid long-lived transactions
# see https://github.com/geopython/GeoHealthCheck/issues/14
db_commit()
if APP.config['GHC_NOTIFICATIONS']:
# Attempt notification
try:
notify(APP.config, resource, run1, last_run_success)
except Exception as err:
# Don't bail out on failure in order to commit the Run
msg = str(err)
logging.warn('error notifying: %s' % msg)
if not __name__ == '__main__':
DB.session.remove()
def run_test_resource(resource):
"""tests a service and provides run metrics"""
result = ResourceResult(resource)
if not resource.active:
result.message = 'Skipped'
return result
result.start()
probes = resource.probe_vars
for probe in probes:
result.add_result(Probe.run(resource, probe))
result.stop()
return result
def sniff_test_resource(config, resource_type, url):
"""tests a Resource endpoint for general compliance"""
out = []
tag_list = []
if resource_type not in RESOURCE_TYPES.keys():
msg = gettext('Invalid resource type')
msg2 = '%s: %s' % (msg, resource_type)
LOGGER.error(msg2)
raise RuntimeError(msg2)
title = None
start_time = datetime.utcnow()
message = None
resource_type_map = {'OGC:WMS': [partial(WebMapService, version='1.3.0'),
partial(WebMapService, version='1.1.1')],
'OGC:WMTS': [WebMapTileService],
'OSGeo:TMS': [TileMapService],
'OGC:WFS': [WebFeatureService],
'OGC:WCS': [WebCoverageService],
'OGC:WPS': [WebProcessingService],
'OGC:CSW': [CatalogueServiceWeb],
'OGC:SOS': [SensorObservationService],
'OGC:WFS3': [urlopen],
'OGC:3DTiles': [urlopen],
'ESRI:FS': [urlopen],
'OGC:STA': [urlopen],
'WWW:LINK': [urlopen],
'FTP': [urlopen],
'GHC:Report': [urlopen],
'OSGeo:GeoNode': [geonode_get_ows],
'Mapbox:TileJSON': [urlopen]
}
try:
ows = None
try:
ows_handlers = resource_type_map[resource_type]
except KeyError:
LOGGER.error("No handler for %s type", resource_type)
raise
for ows_handler in ows_handlers:
try:
ows = ows_handler(url)
break
except Exception as err:
LOGGER.warning("Cannot use %s on %s: %s",
ows_handler, url, err, exc_info=err)
if ows is None:
message = ("Cannot get {} service instance "
"for {}".format(resource_type, url))
raise ValueError(message)
if resource_type == 'WWW:LINK':
content_type = ows.info().get('Content-Type')
# When response is not an image try parse out Title and
# any Exceptions
if 'image/' not in content_type:
content = ows.read()
import re
try:
title_re = re.compile('<title>(.+?)</title>'.encode())
title = title_re.search(content).group(1).decode()
except Exception:
title = url
# Optional check for any OGC-Exceptions in Response
if config and config['GHC_WWW_LINK_EXCEPTION_CHECK']:
exception_text = None
try:
except_re = re.compile(
'ServiceException>|ExceptionReport>'.encode())
exception_text = except_re.search(content).\
group(0).decode()
except Exception:
# No Exception in Response text
pass
if exception_text:
# Found OGC-Exception in Response text
raise Exception(
"Exception in response: %s" % exception_text)
del content
elif resource_type == 'urn:geoss:waf':
title = 'WAF %s %s' % (gettext('for'), urlparse(url).hostname)
elif resource_type == 'FTP':
title = urlparse(url).hostname
elif resource_type == 'OSGeo:GeoNode':
endpoints = ows
end_time = datetime.utcnow()
delta = end_time - start_time
response_time = '%s.%s' % (delta.seconds, delta.microseconds)
base_tags = geonode_make_tags(url)
for epoint in endpoints:
row = sniff_test_resource(config,
epoint['type'],
epoint['url'])
if row:
_tags = row[0][-1]
_tags.extend(base_tags)
row[0][-1] = _tags
out.append(row[0])
elif resource_type.startswith(('OGC:', 'OSGeo', 'ESRI')):
if resource_type == 'OGC:STA':
title = 'OGC STA'
elif resource_type == 'OGC:WFS3':
title = 'OGC API Features (OAFeat)'
elif resource_type == 'ESRI:FS':
title = 'ESRI ArcGIS FS'
elif resource_type == 'OGC:3DTiles':
title = 'OGC 3D Tiles'
else:
title = ows.identification.title
if title is None:
title = '%s %s %s' % (resource_type, gettext('for'), url)
success = True
except Exception as err:
title = 'Untitled'
msg = 'Getting metadata failed: %s' % str(err)
LOGGER.error(msg, exc_info=err)
message = msg
success = False
end_time = datetime.utcnow()
delta = end_time - start_time
response_time = '%s.%s' % (delta.seconds, delta.microseconds)
# if out is not populated yet, that means it should be populated now
if not out:
out.append([resource_type,
url,
title,
success,
response_time,
message,
start_time,
tag_list])
return out
GEONODE_OWS_API = '/api/ows_endpoints/'
def geonode_get_ows(base_url):
r = urlopen('{}{}'.format(base_url.rstrip('/'), GEONODE_OWS_API))
url = urlparse(base_url)
base_name = 'GeoNode {}: {{}}'.format(url.hostname)
status_code = r.getcode()
if status_code != 200:
msg = "Error response from GeoNode at {}: {}".format(
base_url, r.text)
raise ValueError(msg)
try:
data = json.load(r)
except (TypeError, ValueError,) as err:
msg = "Cannot decode response from GeoNode at {}: {}".format(base_url,
err)
raise ValueError(msg)
def update(val):
val['title'] = base_name.format(val['type'])
return val
return [update(d) for d in data['data']]
def geonode_make_tags(base_url):
url = urlparse(base_url)
tag_name = 'GeoNode: {}'.format(url.hostname)
return [tag_name]
if __name__ == '__main__':
print('START - Running health check tests on %s'
% datetime.utcnow().isoformat())
run_resources()
print('END - Running health check tests on %s'
% datetime.utcnow().isoformat())
# from init import App
# if len(sys.argv) < 3:
# print('Usage: %s <resource_type> <url>' % sys.argv[0])
# sys.exit(1)
#
# # TODO: need APP.config here, None for now
# pprint(sniff_test_resource(App.get_config(), sys.argv[1], sys.argv[2]))
|
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.comptool import wait_until
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 400 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxuploadtarget=400", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.wallet.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].wallet.generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 400*1024*1024
daily_buffer = 144 * 1000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~290 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import errno
import logging
import os
import six
import subprocess
import warnings
import shlex
import sys
from future import standard_library
standard_library.install_aliases()
from builtins import str
from collections import OrderedDict
from six.moves import configparser
from airflow.exceptions import AirflowConfigException
# show Airflow's deprecation warnings
warnings.filterwarnings(
action='default', category=DeprecationWarning, module='airflow')
warnings.filterwarnings(
action='default', category=PendingDeprecationWarning, module='airflow')
ConfigParser = configparser.ConfigParser
def generate_fernet_key():
try:
from cryptography.fernet import Fernet
except ImportError:
pass
try:
key = Fernet.generate_key().decode()
except NameError:
key = "cryptography_not_found_storing_passwords_in_plain_text"
return key
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore')
for stream in process.communicate()]
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output
_templates_dir = os.path.join(os.path.dirname(__file__), 'config_templates')
with open(os.path.join(_templates_dir, 'default_airflow.cfg')) as f:
DEFAULT_CONFIG = f.read()
with open(os.path.join(_templates_dir, 'default_test.cfg')) as f:
TEST_CONFIG = f.read()
class AirflowConfigParser(ConfigParser):
# These configuration elements can be fetched as the stdout of commands
# following the "{section}__{name}__cmd" pattern, the idea behind this
# is to not store password on boxes in text files.
as_command_stdout = {
('core', 'sql_alchemy_conn'),
('core', 'fernet_key'),
('celery', 'broker_url'),
('celery', 'celery_result_backend')
}
def __init__(self, *args, **kwargs):
ConfigParser.__init__(self, *args, **kwargs)
self.read_string(parameterized_config(DEFAULT_CONFIG))
self.is_validated = False
def read_string(self, string, source='<string>'):
"""
Read configuration from a string.
A backwards-compatible version of the ConfigParser.read_string()
method that was introduced in Python 3.
"""
# Python 3 added read_string() method
if six.PY3:
ConfigParser.read_string(self, string, source=source)
# Python 2 requires StringIO buffer
else:
import StringIO
self.readfp(StringIO.StringIO(string))
def _validate(self):
if (
self.get("core", "executor") != 'SequentialExecutor' and
"sqlite" in self.get('core', 'sql_alchemy_conn')):
raise AirflowConfigException(
"error: cannot use sqlite with the {}".format(
self.get('core', 'executor')))
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode") not in ['user', 'ldapgroup']
):
raise AirflowConfigException(
"error: owner_mode option should be either "
"'user' or 'ldapgroup' when filtering by owner is set")
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode").lower() == 'ldapgroup' and
self.get("webserver", "auth_backend") != (
'airflow.contrib.auth.backends.ldap_auth')
):
raise AirflowConfigException(
"error: attempt at using ldapgroup "
"filtering without using the Ldap backend")
self.is_validated = True
def _get_env_var_option(self, section, key):
# must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
def _get_cmd_option(self, section, key):
fallback_key = key + '_cmd'
# if this is a valid command key...
if (section, key) in AirflowConfigParser.as_command_stdout:
# if the original key is present, return it no matter what
if self.has_option(section, key):
return ConfigParser.get(self, section, key)
# otherwise, execute the fallback key
elif self.has_option(section, fallback_key):
command = self.get(section, fallback_key)
return run_command(command)
def get(self, section, key, **kwargs):
section = str(section).lower()
key = str(key).lower()
# first check environment variables
option = self._get_env_var_option(section, key)
if option is not None:
return option
# ...then the config file
if self.has_option(section, key):
return expand_env_var(
ConfigParser.get(self, section, key, **kwargs))
# ...then commands
option = self._get_cmd_option(section, key)
if option:
return option
else:
logging.warning("section/key [{section}/{key}] not found "
"in config".format(**locals()))
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val.lower() in ('t', 'true', '1'):
return True
elif val.lower() in ('f', 'false', '0'):
return False
else:
raise AirflowConfigException(
'The value for configuration option "{}:{}" is not a '
'boolean (received "{}").'.format(section, key, val))
def getint(self, section, key):
return int(self.get(section, key))
def getfloat(self, section, key):
return float(self.get(section, key))
def read(self, filenames):
ConfigParser.read(self, filenames)
self._validate()
def as_dict(self, display_source=False, display_sensitive=False):
"""
Returns the current configuration as an OrderedDict of OrderedDicts.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg' or 'default'.
:type display_source: bool
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:type display_sensitive: bool
"""
cfg = copy.deepcopy(self._sections)
# remove __name__ (affects Python 2 only)
for options in cfg.values():
options.pop('__name__', None)
# add source
if display_source:
for section in cfg:
for k, v in cfg[section].items():
cfg[section][k] = (v, 'airflow config')
# add env vars and overwrite because they have priority
for ev in [ev for ev in os.environ if ev.startswith('AIRFLOW__')]:
try:
_, section, key = ev.split('__')
opt = self._get_env_var_option(section, key)
except ValueError:
opt = None
if opt:
if (
not display_sensitive
and ev != 'AIRFLOW__CORE__UNIT_TEST_MODE'):
opt = '< hidden >'
if display_source:
opt = (opt, 'env var')
cfg.setdefault(section.lower(), OrderedDict()).update(
{key.lower(): opt})
# add bash commands
for (section, key) in AirflowConfigParser.as_command_stdout:
opt = self._get_cmd_option(section, key)
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'bash cmd')
cfg.setdefault(section, OrderedDict()).update({key: opt})
return cfg
def load_test_config(self):
"""
Load the unit test configuration.
Note: this is not reversible.
"""
# override any custom settings with defaults
self.read_string(parameterized_config(DEFAULT_CONFIG))
# then read test config
self.read_string(parameterized_config(TEST_CONFIG))
# then read any "custom" test settings
self.read(TEST_CONFIG_FILE)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
# "~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
# Set up dags folder for unit tests
# this directory won't exist if users install via pip
_TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'tests',
'dags')
if os.path.exists(_TEST_DAGS_FOLDER):
TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER
else:
TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, 'dags')
# Set up plugins folder for unit tests
_TEST_PLUGINS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'tests',
'plugins')
if os.path.exists(_TEST_PLUGINS_FOLDER):
TEST_PLUGINS_FOLDER = _TEST_PLUGINS_FOLDER
else:
TEST_PLUGINS_FOLDER = os.path.join(AIRFLOW_HOME, 'plugins')
def parameterized_config(template):
"""
Generates a configuration from the provided template + variables defined in
current scope
:param template: a config content templated with {{variables}}
"""
all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}
return template.format(**all_vars)
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
# only generate a Fernet key if we need to create a new config file
if not os.path.isfile(TEST_CONFIG_FILE) or not os.path.isfile(AIRFLOW_CONFIG):
FERNET_KEY = generate_fernet_key()
else:
FERNET_KEY = ''
TEMPLATE_START = (
'# ----------------------- TEMPLATE BEGINS HERE -----------------------')
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info(
'Creating new Airflow config file for unit tests in: {}'.format(
TEST_CONFIG_FILE))
with open(TEST_CONFIG_FILE, 'w') as f:
cfg = parameterized_config(TEST_CONFIG)
f.write(cfg.split(TEMPLATE_START)[-1].strip())
if not os.path.isfile(AIRFLOW_CONFIG):
logging.info('Creating new Airflow config file in: {}'.format(
AIRFLOW_CONFIG))
with open(AIRFLOW_CONFIG, 'w') as f:
cfg = parameterized_config(DEFAULT_CONFIG)
f.write(cfg.split(TEMPLATE_START)[-1].strip())
logging.info("Reading the config from " + AIRFLOW_CONFIG)
conf = AirflowConfigParser()
conf.read(AIRFLOW_CONFIG)
def load_test_config():
"""
Load the unit test configuration.
Note: this is not reversible.
"""
conf.load_test_config()
if conf.getboolean('core', 'unit_test_mode'):
load_test_config()
def get(section, key, **kwargs):
return conf.get(section, key, **kwargs)
def getboolean(section, key):
return conf.getboolean(section, key)
def getfloat(section, key):
return conf.getfloat(section, key)
def getint(section, key):
return conf.getint(section, key)
def has_option(section, key):
return conf.has_option(section, key)
def remove_option(section, option):
return conf.remove_option(section, option)
def as_dict(display_source=False, display_sensitive=False):
return conf.as_dict(
display_source=display_source, display_sensitive=display_sensitive)
as_dict.__doc__ = conf.as_dict.__doc__
def set(section, option, value): # noqa
return conf.set(section, option, value)
|
|
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import do_change_stream_invite_only
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Message, UserMessage, get_client, get_realm, get_stream
class TopicHistoryTest(ZulipTestCase):
def test_topics_history_zephyr_mirror(self) -> None:
user_profile = self.mit_user("sipbtest")
stream_name = "new_stream"
# Send a message to this new stream from another user
self.subscribe(self.mit_user("starnine"), stream_name)
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(self.mit_user("starnine"), stream_name, topic_name="secret topic")
# Now subscribe this MIT user to the new stream and verify
# that the new topic is not accessible
self.login_user(user_profile)
self.subscribe(user_profile, stream_name)
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint, {}, subdomain="zephyr")
self.assert_json_success(result)
history = result.json()["topics"]
self.assertEqual(history, [])
def test_topics_history(self) -> None:
# verified: int(UserMessage.flags.read) == 1
user_profile = self.example_user("iago")
self.login_user(user_profile)
stream_name = "Verona"
stream = get_stream(stream_name, user_profile.realm)
recipient = stream.recipient
def create_test_message(topic: str) -> int:
# TODO: Clean this up to send messages the normal way.
hamlet = self.example_user("hamlet")
message = Message(
sender=hamlet,
recipient=recipient,
content="whatever",
date_sent=timezone_now(),
sending_client=get_client("whatever"),
)
message.set_topic_name(topic)
message.save()
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=0,
)
return message.id
# our most recent topics are topic0, topic1, topic2
# Create old messages with strange spellings.
create_test_message("topic2")
create_test_message("toPIc1")
create_test_message("toPIc0")
create_test_message("topic2")
create_test_message("topic2")
create_test_message("Topic2")
# Create new messages
topic2_msg_id = create_test_message("topic2")
create_test_message("topic1")
create_test_message("topic1")
topic1_msg_id = create_test_message("topic1")
topic0_msg_id = create_test_message("topic0")
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint, {})
self.assert_json_success(result)
history = result.json()["topics"]
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual(
[topic["name"] for topic in history],
[
"topic0",
"topic1",
"topic2",
],
)
self.assertEqual(
[topic["max_id"] for topic in history],
[
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
],
)
# Now try as cordelia, who we imagine as a totally new user in
# that she doesn't have UserMessage rows. We should see the
# same results for a public stream.
self.login("cordelia")
result = self.client_get(endpoint, {})
self.assert_json_success(result)
history = result.json()["topics"]
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual(
[topic["name"] for topic in history],
[
"topic0",
"topic1",
"topic2",
],
)
self.assertIn("topic0", [topic["name"] for topic in history])
self.assertEqual(
[topic["max_id"] for topic in history],
[
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
],
)
# Now make stream private, but subscribe cordelia
do_change_stream_invite_only(stream, True)
self.subscribe(self.example_user("cordelia"), stream.name)
result = self.client_get(endpoint, {})
self.assert_json_success(result)
history = result.json()["topics"]
history = history[:3]
# Cordelia doesn't have these recent history items when we
# wasn't subscribed in her results.
self.assertNotIn("topic0", [topic["name"] for topic in history])
self.assertNotIn("topic1", [topic["name"] for topic in history])
self.assertNotIn("topic2", [topic["name"] for topic in history])
def test_bad_stream_id(self) -> None:
self.login("iago")
# non-sensible stream id
endpoint = "/json/users/me/9999999999/topics"
result = self.client_get(endpoint, {})
self.assert_json_error(result, "Invalid stream id")
# out of realm
bad_stream = self.make_stream(
"mit_stream",
realm=get_realm("zephyr"),
)
endpoint = f"/json/users/me/{bad_stream.id}/topics"
result = self.client_get(endpoint, {})
self.assert_json_error(result, "Invalid stream id")
# private stream to which I am not subscribed
private_stream = self.make_stream(
"private_stream",
invite_only=True,
)
endpoint = f"/json/users/me/{private_stream.id}/topics"
result = self.client_get(endpoint, {})
self.assert_json_error(result, "Invalid stream id")
def test_get_topics_web_public_stream_web_public_request(self) -> None:
stream = self.make_stream("web-public-steram", is_web_public=True)
for i in range(3):
self.send_stream_message(
self.example_user("iago"), stream.name, topic_name="topic" + str(i)
)
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint)
self.assert_json_success(result)
history = result.json()["topics"]
self.assertEqual(
[topic["name"] for topic in history],
[
"topic2",
"topic1",
"topic0",
],
)
def test_get_topics_non_web_public_stream_web_public_request(self) -> None:
stream = get_stream("Verona", self.example_user("iago").realm)
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint)
self.assert_json_error(result, "Invalid stream id", 400)
def test_get_topics_non_existant_stream_web_public_request(self) -> None:
non_existant_stream_id = 10000000000000000000000
endpoint = f"/json/users/me/{non_existant_stream_id}/topics"
result = self.client_get(endpoint)
self.assert_json_error(result, "Invalid stream id", 400)
class TopicDeleteTest(ZulipTestCase):
def test_topic_delete(self) -> None:
initial_last_msg_id = self.get_last_message().id
stream_name = "new_stream"
topic_name = "new topic 2"
# NON-ADMIN USER
user_profile = self.example_user("hamlet")
self.subscribe(user_profile, stream_name)
# Send message
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Deleting the topic
self.login_user(user_profile)
endpoint = "/json/streams/" + str(stream.id) + "/delete_topic"
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_error(result, "Must be an organization administrator")
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make stream private with limited history
do_change_stream_invite_only(stream, invite_only=True, history_public_to_subscribers=False)
# ADMIN USER subscribed now
user_profile = self.example_user("iago")
self.subscribe(user_profile, stream_name)
self.login_user(user_profile)
new_last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Now admin deletes all messages in topic -- which should only
# delete new_last_msg_id, i.e. the one sent since they joined.
self.assertEqual(self.get_last_message().id, new_last_msg_id)
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Try to delete all messages in the topic again. There are no messages accessible
# to the administrator, so this should do nothing.
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make the stream's history public to subscribers
do_change_stream_invite_only(stream, invite_only=True, history_public_to_subscribers=True)
# Delete the topic should now remove all messages
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
# Delete again, to test the edge case of deleting an empty topic.
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
|
|
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Related Flags**
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:target_host: the iSCSI Target Host IP address, i.e. the IP
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import contextlib
import cPickle as pickle
import math
import time
import urlparse
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo.config import cfg
from nova import context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('xenapi_connection_url',
help='URL for connection to XenServer/Xen Cloud Platform. '
'A special value of unix://local can be used to connect '
'to the local unix socket. '
'Required if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_password',
help='Password for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver',
secret=True),
cfg.IntOpt('xenapi_connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval',
default=5.0,
help='The interval used for polling of coalescing vhds. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.BoolOpt('xenapi_check_host',
default=True,
help='Ensure compute service is running on host XenAPI '
'connects to.'),
cfg.IntOpt('xenapi_vhd_coalesce_max_attempts',
default=5,
help='Max number of times to poll for VHD to coalesce. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository'),
cfg.StrOpt('target_host',
help='iSCSI Target Host'),
cfg.StrOpt('target_port',
default='3260',
help='iSCSI Target Port, 3260 Default'),
cfg.StrOpt('iqn_prefix',
default='iqn.2010-10.org.openstack',
help='IQN Prefix'),
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
# when we pull support for it, we should remove this
cfg.BoolOpt('xenapi_remap_vbd_dev',
default=False,
help='Used to enable the remapping of VBD dev '
'(Works around an issue in Ubuntu Maverick)'),
cfg.StrOpt('xenapi_remap_vbd_dev_prefix',
default='sd',
help='Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)'),
cfg.IntOpt('xenapi_login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
CONF.import_opt('host', 'nova.netconf')
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
if not url or password is None:
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = XenAPISession(url, username, password, self.virtapi)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenapi_check_host:
vm_utils.ensure_correct_host(self._session)
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_('Failure while cleaning up attached VDIs'))
def instance_exists(self, instance_name):
"""Checks existence of an instance on the host.
:param instance_name: The name of the instance to lookup
Returns True if an instance with the supplied name exists on
the host, False otherwise.
NOTE(belliott): This is an override of the base method for
efficiency.
"""
return self._vmops.instance_exists(instance_name)
def estimate_instance_overhead(self, instance_info):
"""Get virtualization overhead required to build an instance of the
given flavor.
:param instance_info: Instance/flavor to calculate overhead for.
:returns: Overhead memory in MB.
"""
# XenServer memory overhead is proportional to the size of the
# VM. Larger flavor VMs become more efficient with respect to
# overhead.
# interpolated formula to predict overhead required per vm.
# based on data from:
# https://wiki.openstack.org/wiki/XenServer/Overhead
base = 3 # MB
per_mb = 0.0081 # MB
memory_mb = instance_info['memory_mb']
overhead = memory_mb * per_mb + base
overhead = math.ceil(overhead)
return {'memory_mb': overhead}
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True, context=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk
"""
# NOTE(vish): Xen currently does not use network info.
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type, block_device_info)
def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance_ref, network_info)
def unplug_vifs(self, instance_ref, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.iteritems():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warn(_('Could not determine key: %s') % err,
instance=instance)
self._initiator = None
return {
'ip': self.get_host_ip_addr(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
@staticmethod
def get_host_ip_addr():
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
def attach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage from VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenapi_connection_username,
'password': CONF.xenapi_connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024)
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
hyper_ver = utils.convert_version_to_int(self._session.product_version)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': 0,
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'xen',
'hypervisor_version': hyper_ver,
'hypervisor_hostname': host_stats['host_hostname'],
'cpu_info': host_stats['host_cpu_info']['cpu_count'],
'supported_instances': jsonutils.dumps(
host_stats['supported_instances'])}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
# NOTE(salvatore-orlando): it enforces security groups on
# host initialization and live migration.
# In XenAPI we do not assume instances running upon host initialization
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
return self._vmops.check_can_live_migrate_destination(ctxt,
instance_ref,
block_migration,
disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param disk_over_commit: if true, allow disk over commit
"""
pass
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
"""
return self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
dest_check_data)
def get_instance_disk_info(self, instance_name):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us.
"""
pass
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params
"""
self._vmops.live_migrate(ctxt, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data)
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, data, migrate_data=None):
"""Preparation live migration.
:params block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
"""
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
pre_live_migration_result = {}
pre_live_migration_result['sr_uuid_map'] = \
self._vmops.attach_block_device_volumes(block_device_info)
return pre_live_migration_result
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params network_info: instance network information
:params : block_migration: if true, post operation of block_migraiton.
"""
self._vmops.post_live_migration_at_destination(ctxt, instance_ref,
network_info, block_device_info, block_device_info)
def unfilter_instance(self, instance_ref, network_info):
"""Removes security groups configured for an instance."""
return self._vmops.unfilter_instance(instance_ref, network_info)
def refresh_security_group_rules(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when security group rules are updated.
"""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when instances are added/removed to a security group.
"""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Updates security group rules for specified instance.
Invoked when instances are added/removed to a security group
or when a rule is added/removed to a security group.
"""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
return self._vmops.refresh_provider_fw_rules()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run the update first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(host, action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._host.set_host_enabled(host, enabled)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime(host)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage
info
"""
return self._vmops.get_per_instance_usage()
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
# This is not a config option as it should only ever be
# changed in development environments.
# MAJOR VERSION: Incompatible changes with the plugins
# MINOR VERSION: Compatible changes, new plguins, etc
PLUGIN_REQUIRED_VERSION = '1.0'
def __init__(self, url, user, pw, virtapi):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._virtapi = virtapi
self._verify_plugin_version()
def _verify_plugin_version(self):
# Verify that we're using the right version of the plugins
returned_version = self.call_plugin_serialized(
'nova_plugin_version', 'get_version')
# Can't use vmops.cmp_version because that tolerates differences in
# major version
req_maj, req_min = self.PLUGIN_REQUIRED_VERSION.split('.')
got_maj, got_min = returned_version.split('.')
if req_maj != got_maj or req_min > got_min:
raise self.XenAPI.Failure(
_("Plugin version mismatch (Expected %(exp)s, got %(got)s)") %
{'exp': self.PLUGIN_REQUIRED_VERSION, 'got': returned_version})
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure as e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw)
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in xrange(CONF.xenapi_connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = self._virtapi.aggregate_get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand.
"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
# Product version is only set in some cases (e.g. XCP, XenServer) and
# not in others (e.g. xenserver-core, XAPI-XCP).
# In these cases, the platform version is the best number to use.
if product_version_str is None:
product_version_str = software_version.get('platform_version',
'0.0.0')
product_brand = software_version.get('product_brand')
product_version = tuple(int(part) for part in
product_version_str.split('.'))
return product_version, product_brand
def _get_software_version(self):
host = self.get_xenapi_host()
return self.call_xenapi('host.get_software_version', host)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def get_xenapi_host(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(johannes): Fetch host before we acquire a session. Since
# get_xenapi_host() acquires a session too, it can result in a
# deadlock if multiple greenthreads race with each other. See
# bug 924918
host = self.get_xenapi_host()
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
host, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
"""Allows a plugin to raise RetryableError so we can try again."""
attempts = num_retries + 1
sleep_time = 0.5
for attempt in xrange(1, attempts + 1):
LOG.info(_('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d'),
{'plugin': plugin, 'fn': fn, 'attempt': attempt,
'attempts': attempts})
try:
if callback:
callback(kwargs)
return self.call_plugin_serialized(plugin, fn, *args, **kwargs)
except self.XenAPI.Failure as exc:
if self._is_retryable_exception(exc):
LOG.warn(_('%(plugin)s.%(fn)s failed. Retrying call.')
% {'plugin': plugin, 'fn': fn})
else:
raise
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
raise exception.PluginRetriesExceeded(num_retries=num_retries)
def _is_retryable_exception(self, exc):
_type, method, error = exc.details[:3]
if error == 'RetryableError':
LOG.debug(_("RetryableError, so retrying upload_vhd"),
exc_info=True)
return True
elif "signal" in method:
LOG.debug(_("Error due to a signal, retrying upload_vhd"),
exc_info=True)
return True
else:
return False
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
self.is_local_connection = url == "unix://local"
if self.is_local_connection:
return self.XenAPI.xapi_local()
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.
params = eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug(_("Got exception: %s"), exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
for ref in self.call_xenapi('%s.get_all' % record_type):
rec = self.get_rec(record_type, ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_record call
if rec:
yield ref, rec
|
|
"""Test the MySensors config flow."""
from __future__ import annotations
from typing import Any
from unittest.mock import patch
import pytest
from homeassistant import config_entries, setup
from homeassistant.components.mysensors.const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_MQTT,
CONF_GATEWAY_TYPE_SERIAL,
CONF_GATEWAY_TYPE_TCP,
CONF_PERSISTENCE,
CONF_PERSISTENCE_FILE,
CONF_RETAIN,
CONF_TCP_PORT,
CONF_TOPIC_IN_PREFIX,
CONF_TOPIC_OUT_PREFIX,
CONF_VERSION,
DOMAIN,
ConfGatewayType,
)
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResult
from tests.common import MockConfigEntry
async def get_form(
hass: HomeAssistant, gatway_type: ConfGatewayType, expected_step_id: str
) -> FlowResult:
"""Get a form for the given gateway type."""
await setup.async_setup_component(hass, "persistent_notification", {})
stepuser = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert stepuser["type"] == "form"
assert not stepuser["errors"]
result = await hass.config_entries.flow.async_configure(
stepuser["flow_id"],
{CONF_GATEWAY_TYPE: gatway_type},
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == expected_step_id
return result
async def test_config_mqtt(hass: HomeAssistant, mqtt: None) -> None:
"""Test configuring a mqtt gateway."""
step = await get_form(hass, CONF_GATEWAY_TYPE_MQTT, "gw_mqtt")
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "bla",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
if "errors" in result2:
assert not result2["errors"]
assert result2["type"] == "create_entry"
assert result2["title"] == "mqtt"
assert result2["data"] == {
CONF_DEVICE: "mqtt",
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "bla",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_VERSION: "2.4",
CONF_GATEWAY_TYPE: "MQTT",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_missing_mqtt(hass: HomeAssistant) -> None:
"""Test configuring a mqtt gateway without mqtt integration setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert not result["errors"]
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_MQTT},
)
assert result["step_id"] == "user"
assert result["type"] == "form"
assert result["errors"] == {"base": "mqtt_required"}
async def test_config_serial(hass: HomeAssistant) -> None:
"""Test configuring a gateway via serial."""
step = await get_form(hass, CONF_GATEWAY_TYPE_SERIAL, "gw_serial")
flow_id = step["flow_id"]
with patch( # mock is_serial_port because otherwise the test will be platform dependent (/dev/ttyACMx vs COMx)
"homeassistant.components.mysensors.config_flow.is_serial_port",
return_value=True,
), patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_BAUD_RATE: 115200,
CONF_DEVICE: "/dev/ttyACM0",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
if "errors" in result2:
assert not result2["errors"]
assert result2["type"] == "create_entry"
assert result2["title"] == "/dev/ttyACM0"
assert result2["data"] == {
CONF_DEVICE: "/dev/ttyACM0",
CONF_BAUD_RATE: 115200,
CONF_VERSION: "2.4",
CONF_GATEWAY_TYPE: "Serial",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_config_tcp(hass: HomeAssistant) -> None:
"""Test configuring a gateway via tcp."""
step = await get_form(hass, CONF_GATEWAY_TYPE_TCP, "gw_tcp")
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
if "errors" in result2:
assert not result2["errors"]
assert result2["type"] == "create_entry"
assert result2["title"] == "127.0.0.1"
assert result2["data"] == {
CONF_DEVICE: "127.0.0.1",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.4",
CONF_GATEWAY_TYPE: "TCP",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_fail_to_connect(hass: HomeAssistant) -> None:
"""Test configuring a gateway via tcp."""
step = await get_form(hass, CONF_GATEWAY_TYPE_TCP, "gw_tcp")
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=False
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert "errors" in result2
errors = result2["errors"]
assert errors
assert errors.get("base") == "cannot_connect"
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
@pytest.mark.parametrize(
"gateway_type, expected_step_id, user_input, err_field, err_string",
[
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 600_000,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
CONF_TCP_PORT,
"port_out_of_range",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 0,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "2.4",
},
CONF_TCP_PORT,
"port_out_of_range",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "a",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "a.b",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "4",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.1",
CONF_VERSION: "v3",
},
CONF_VERSION,
"invalid_version",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "127.0.0.",
CONF_VERSION: "2.4",
},
CONF_DEVICE,
"invalid_ip",
),
(
CONF_GATEWAY_TYPE_TCP,
"gw_tcp",
{
CONF_TCP_PORT: 5003,
CONF_DEVICE: "abcd",
CONF_VERSION: "2.4",
},
CONF_DEVICE,
"invalid_ip",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "bla",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_PERSISTENCE_FILE: "asdf.zip",
CONF_VERSION: "2.4",
},
CONF_PERSISTENCE_FILE,
"invalid_persistence_file",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "/#/#",
CONF_TOPIC_OUT_PREFIX: "blub",
CONF_VERSION: "2.4",
},
CONF_TOPIC_IN_PREFIX,
"invalid_subscribe_topic",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "asdf",
CONF_TOPIC_OUT_PREFIX: "/#/#",
CONF_VERSION: "2.4",
},
CONF_TOPIC_OUT_PREFIX,
"invalid_publish_topic",
),
(
CONF_GATEWAY_TYPE_MQTT,
"gw_mqtt",
{
CONF_RETAIN: True,
CONF_TOPIC_IN_PREFIX: "asdf",
CONF_TOPIC_OUT_PREFIX: "asdf",
CONF_VERSION: "2.4",
},
CONF_TOPIC_OUT_PREFIX,
"same_topic",
),
],
)
async def test_config_invalid(
hass: HomeAssistant,
mqtt: None,
gateway_type: ConfGatewayType,
expected_step_id: str,
user_input: dict[str, Any],
err_field: str,
err_string: str,
) -> None:
"""Perform a test that is expected to generate an error."""
step = await get_form(hass, gateway_type, expected_step_id)
flow_id = step["flow_id"]
with patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.gateway.socket.getaddrinfo",
side_effect=OSError,
), patch(
"homeassistant.components.mysensors.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
flow_id,
user_input,
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert "errors" in result2
errors = result2["errors"]
assert errors
assert err_field in errors
assert errors[err_field] == err_string
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
@pytest.mark.parametrize(
"user_input",
[
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 57600,
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "bla.json",
},
{
CONF_DEVICE: "COM5",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 57600,
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: True,
},
{
CONF_DEVICE: "mqtt",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
CONF_TOPIC_IN_PREFIX: "intopic",
CONF_TOPIC_OUT_PREFIX: "outtopic",
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "127.0.0.1",
CONF_PERSISTENCE_FILE: "blub.pickle",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 343,
CONF_VERSION: "2.4",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
],
)
async def test_import(hass: HomeAssistant, mqtt: None, user_input: dict) -> None:
"""Test importing a gateway."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch("sys.platform", "win32"), patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, data=user_input, context={"source": config_entries.SOURCE_IMPORT}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
@pytest.mark.parametrize(
"first_input, second_input, expected_result",
[
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "same2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "same2",
},
(CONF_TOPIC_IN_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "different1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "different3",
CONF_TOPIC_OUT_PREFIX: "different4",
},
None,
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different4",
},
(CONF_TOPIC_IN_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "different1",
CONF_TOPIC_OUT_PREFIX: "same1",
},
(CONF_TOPIC_OUT_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different2",
},
{
CONF_DEVICE: "mqtt",
CONF_VERSION: "2.3",
CONF_TOPIC_IN_PREFIX: "same1",
CONF_TOPIC_OUT_PREFIX: "different1",
},
(CONF_TOPIC_IN_PREFIX, "duplicate_topic"),
),
(
{
CONF_DEVICE: "127.0.0.1",
CONF_PERSISTENCE_FILE: "same.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "same.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
("persistence_file", "duplicate_persistence_file"),
),
(
{
CONF_DEVICE: "127.0.0.1",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "same.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "127.0.0.1",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different1.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different2.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
("base", "already_configured"),
),
(
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different1.json",
CONF_TCP_PORT: 343,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.2",
CONF_PERSISTENCE_FILE: "different2.json",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "192.168.1.2",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
{
CONF_DEVICE: "192.168.1.3",
CONF_TCP_PORT: 5003,
CONF_VERSION: "2.3",
CONF_PERSISTENCE: False,
CONF_RETAIN: False,
},
None,
),
(
{
CONF_DEVICE: "COM5",
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different1.json",
},
{
CONF_DEVICE: "COM5",
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different2.json",
},
("base", "already_configured"),
),
(
{
CONF_DEVICE: "COM6",
CONF_BAUD_RATE: 57600,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
},
{
CONF_DEVICE: "COM5",
CONF_TCP_PORT: 5003,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
},
None,
),
(
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 115200,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different1.json",
},
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 57600,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "different2.json",
},
("base", "already_configured"),
),
(
{
CONF_DEVICE: "COM5",
CONF_BAUD_RATE: 115200,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "same.json",
},
{
CONF_DEVICE: "COM6",
CONF_BAUD_RATE: 57600,
CONF_RETAIN: True,
CONF_VERSION: "2.3",
CONF_PERSISTENCE_FILE: "same.json",
},
("persistence_file", "duplicate_persistence_file"),
),
(
{
CONF_DEVICE: "mqtt",
CONF_PERSISTENCE_FILE: "bla.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
CONF_VERSION: "1.4",
},
{
CONF_DEVICE: "COM6",
CONF_PERSISTENCE_FILE: "bla2.json",
CONF_BAUD_RATE: 115200,
CONF_TCP_PORT: 5003,
CONF_VERSION: "1.4",
},
None,
),
],
)
async def test_duplicate(
hass: HomeAssistant,
mqtt: None,
first_input: dict,
second_input: dict,
expected_result: tuple[str, str] | None,
) -> None:
"""Test duplicate detection."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch("sys.platform", "win32"), patch(
"homeassistant.components.mysensors.config_flow.try_connect", return_value=True
), patch(
"homeassistant.components.mysensors.async_setup_entry",
return_value=True,
):
MockConfigEntry(domain=DOMAIN, data=first_input).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, data=second_input, context={"source": config_entries.SOURCE_IMPORT}
)
await hass.async_block_till_done()
if expected_result is None:
assert result["type"] == "create_entry"
else:
assert result["type"] == "abort"
assert result["reason"] == expected_result[1]
|
|
"""Responses.
responses serve both testing purpose aswell as dynamic docstring replacement
"""
responses = {
"_v3_accounts": {
"url": "/v3/accounts",
"response": {
"accounts": [
{
"id": "101-004-1435156-002",
"tags": []
},
{
"id": "101-004-1435156-001",
"tags": []
}
]
},
},
"_v3_account_by_accountID": {
"url": "/v3/accounts/{}",
"response": {
"account": {
"trades": [
{
"instrument": "DE30_EUR",
"financing": "0.0000",
"openTime": "2016-07-12T09:32:18.062823776Z",
"initialUnits": "-10",
"currentUnits": "-10",
"price": "9984.7",
"unrealizedPL": "341.0000",
"realizedPL": "0.0000",
"state": "OPEN",
"id": "821"
},
{
"instrument": "DE30_EUR",
"financing": "0.0000",
"openTime": "2016-07-12T09:32:18.206929733Z",
"initialUnits": "-10",
"currentUnits": "-10",
"price": "9984.7",
"unrealizedPL": "341.0000",
"realizedPL": "0.0000",
"state": "OPEN",
"id": "823"
}
],
"marginCloseoutNAV": "49393.6580",
"marginUsed": "9948.9000",
"currency": "EUR",
"resettablePL": "-1301.0046",
"NAV": "49377.6580",
"marginCloseoutMarginUsed": "9949.8000",
"id": "101-004-1435156-001",
"marginCloseoutPositionValue": "198996.0000",
"openTradeCount": 2,
"orders": [
{
"partialFill": "DEFAULT_FILL",
"price": "0.87000",
"stopLossOnFill": {
"timeInForce": "GTC",
"price": "0.88000"
},
"timeInForce": "GTC",
"clientExtensions": {
"comment": "myComment",
"id": "myID"
},
"id": "204",
"triggerCondition": "TRIGGER_DEFAULT",
"replacesOrderID": "200",
"positionFill": "POSITION_DEFAULT",
"createTime": "2016-07-08T07:18:47.623211321Z",
"instrument": "EUR_GBP",
"state": "PENDING",
"units": "-50000",
"type": "LIMIT"
}
],
"hedgingEnabled": False,
"marginCloseoutPercent": "0.10072",
"marginCallMarginUsed": "9949.8000",
"openPositionCount": 1,
"positionValue": "198978.0000",
"pl": "-1301.0046",
"lastTransactionID": "833",
"marginAvailable": "39428.7580",
"marginCloseoutUnrealizedPL": "698.0000",
"marginRate": "0.05",
"marginCallPercent": "0.20144",
"pendingOrderCount": 1,
"withdrawalLimit": "39428.7580",
"unrealizedPL": "682.0000",
"alias": "hootnotv20",
"createdByUserID": 1435156,
"positions": [
{
"short": {
"units": "0",
"resettablePL": "0.0000",
"unrealizedPL": "0.0000",
"pl": "0.0000"
},
"unrealizedPL": "0.0000",
"long": {
"units": "0",
"resettablePL": "-3.8046",
"unrealizedPL": "0.0000",
"pl": "-3.8046"
},
"instrument": "EUR_USD",
"resettablePL": "-3.8046",
"pl": "-3.8046"
},
{
"short": {
"unrealizedPL": "682.0000",
"tradeIDs": [
"821",
"823"
],
"resettablePL": "-1744.8000",
"units": "-20",
"averagePrice": "9984.7",
"pl": "-1744.8000"
},
"unrealizedPL": "682.0000",
"long": {
"units": "0",
"resettablePL": "447.6000",
"unrealizedPL": "0.0000",
"pl": "447.6000"
},
"instrument": "DE30_EUR",
"resettablePL": "-1297.2000",
"pl": "-1297.2000"
}
],
"createdTime": "2016-06-24T21:03:50.914647476Z",
"balance": "48695.6580"
},
"lastTransactionID": "833"
}
},
"_v3_account_by_accountID_summary": {
"url": "v3/accounts/{accountID}/summary",
"response": {
"account": {
"marginCloseoutNAV": "35454.4740",
"marginUsed": "10581.5000",
"currency": "EUR",
"resettablePL": "-13840.3525",
"NAV": "35454.4740",
"marginCloseoutMarginUsed": "10581.5000",
"marginCloseoutPositionValue": "211630.0000",
"openTradeCount": 2,
"id": "101-004-1435156-001",
"openPositionCount": 1,
"marginCloseoutPercent": "0.14923",
"marginCallMarginUsed": "10581.5000",
"hedgingEnabled": False,
"positionValue": "211630.0000",
"pl": "-13840.3525",
"lastTransactionID": "2123",
"marginAvailable": "24872.9740",
"marginRate": "0.05",
"marginCallPercent": "0.29845",
"pendingOrderCount": 0,
"withdrawalLimit": "24872.9740",
"unrealizedPL": "0.0000",
"alias": "hootnotv20",
"createdByUserID": 1435156,
"marginCloseoutUnrealizedPL": "0.0000",
"createdTime": "2016-06-24T21:03:50.914647476Z",
"balance": "35454.4740"
},
"lastTransactionID": "2123"
}
},
"_v3_account_by_accountID_instruments": {
"url": "/v3/accounts/{accountID}/instuments",
"params": {
"instruments": "EU50_EUR,EUR_USD,US30_USD,"
"FR40_EUR,EUR_CHF,DE30_EUR"
},
"response": {
"instruments": [
{
"minimumTradeSize": "1",
"displayName": "Europe 50",
"name": "EU50_EUR",
"displayPrecision": 1,
"type": "CFD",
"minimumTrailingStopDistance": "5.0",
"marginRate": "0.05",
"maximumOrderUnits": "3000",
"tradeUnitsPrecision": 0,
"pipLocation": 0,
"maximumPositionSize": "0",
"maximumTrailingStopDistance": "10000.0"
},
{
"minimumTradeSize": "1",
"displayName": "EUR/USD",
"name": "EUR_USD",
"displayPrecision": 5,
"type": "CURRENCY",
"minimumTrailingStopDistance": "0.00050",
"marginRate": "0.05",
"maximumOrderUnits": "100000000",
"tradeUnitsPrecision": 0,
"pipLocation": -4,
"maximumPositionSize": "0",
"maximumTrailingStopDistance": "1.00000"
},
{
"minimumTradeSize": "1",
"displayName": "US Wall St 30",
"name": "US30_USD",
"displayPrecision": 1,
"type": "CFD",
"minimumTrailingStopDistance": "5.0",
"marginRate": "0.05",
"maximumOrderUnits": "1000",
"tradeUnitsPrecision": 0,
"pipLocation": 0,
"maximumPositionSize": "0",
"maximumTrailingStopDistance": "10000.0"
},
{
"minimumTradeSize": "1",
"displayName": "France 40",
"name": "FR40_EUR",
"displayPrecision": 1,
"type": "CFD",
"minimumTrailingStopDistance": "5.0",
"marginRate": "0.05",
"maximumOrderUnits": "2000",
"tradeUnitsPrecision": 0,
"pipLocation": 0,
"maximumPositionSize": "0",
"maximumTrailingStopDistance": "10000.0"
},
{
"minimumTradeSize": "1",
"displayName": "EUR/CHF",
"name": "EUR_CHF",
"displayPrecision": 5,
"type": "CURRENCY",
"minimumTrailingStopDistance": "0.00050",
"marginRate": "0.05",
"maximumOrderUnits": "100000000",
"tradeUnitsPrecision": 0,
"pipLocation": -4,
"maximumPositionSize": "0",
"maximumTrailingStopDistance": "1.00000"
},
{
"minimumTradeSize": "1",
"displayName": "Germany 30",
"name": "DE30_EUR",
"displayPrecision": 1,
"type": "CFD",
"minimumTrailingStopDistance": "5.0",
"marginRate": "0.05",
"maximumOrderUnits": "2500",
"tradeUnitsPrecision": 0,
"pipLocation": 0,
"maximumPositionSize": "0",
"maximumTrailingStopDistance": "10000.0"
},
],
"lastTransactionID": "2124"
},
},
"_v3_accounts_accountID_account_config": {
"url": "/v3/accounts/{accountID}/configuration",
"body": {
"marginRate": "0.05"
},
"response": {
"lastTransactionID": "830",
"clientConfigureTransaction": {
"userID": 1435156,
"marginRate": "0.05",
"batchID": "830",
"time": "2016-07-12T19:48:11.657494168Z",
"type": "CLIENT_CONFIGURE",
"id": "830",
"accountID": "101-004-1435156-001"
}
},
},
"_v3_accounts_accountID_account_changes": {
"url": "/v3/accounts/{accountID}/changes",
"params": {
"sinceTransactionID": 2308
},
"response": {
"state": {
"trades": [],
"marginCloseoutNAV": "33848.2663",
"unrealizedPL": "0.0000",
"marginUsed": "0.0000",
"marginAvailable": "33848.2663",
"positions": [],
"marginCloseoutUnrealizedPL": "0.0000",
"marginCallMarginUsed": "0.0000",
"marginCallPercent": "0.00000",
"marginCloseoutPercent": "0.00000",
"NAV": "33848.2663",
"marginCloseoutMarginUsed": "0.0000",
"positionValue": "0.0000",
"orders": [],
"withdrawalLimit": "33848.2663"
},
"changes": {
"tradesReduced": [],
"tradesOpened": [],
"ordersFilled": [],
"transactions": [
{
"timeInForce": "GTC",
"triggerCondition": "TRIGGER_DEFAULT",
"positionFill": "DEFAULT",
"stopLossOnFill": {
"timeInForce": "GTC",
"price": "1.22000"
},
"userID": 1435156,
"id": "2309",
"batchID": "2309",
"instrument": "EUR_USD",
"reason": "CLIENT_ORDER",
"time": "2016-10-25T21:07:21.065554321Z",
"units": "-100",
"type": "LIMIT_ORDER",
"price": "1.20000",
"accountID": "101-004-1435156-001"
}
],
"ordersCreated": [
{
"triggerCondition": "TRIGGER_DEFAULT",
"partialFill": "DEFAULT_FILL",
"price": "1.20000",
"stopLossOnFill": {
"timeInForce": "GTC",
"price": "1.22000"
},
"createTime": "2016-10-25T21:07:21.065554321Z",
"timeInForce": "GTC",
"instrument": "EUR_USD",
"state": "PENDING",
"units": "-100",
"id": "2309",
"type": "LIMIT",
"positionFill": "POSITION_DEFAULT"
}
],
"positions": [],
"ordersTriggered": [],
"ordersCancelled": [],
"tradesClosed": []
},
"lastTransactionID": "2309"
}
}
}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mox
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.openstack.common import jsonutils
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
from nova.scheduler import driver
from nova.scheduler import manager
from nova import servicegroup
from nova import test
from nova.tests import fake_instance_actions
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.scheduler import fakes
from nova import utils
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'nova.scheduler.driver.Scheduler'
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
fake_instance_actions.stub_out_action_events(self.stubs)
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertTrue(isinstance(manager.driver, self.driver_cls))
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
self.mox.StubOutWithMock(self.manager.driver,
'update_service_capabilities')
# Test no capabilities passes empty dictionary
self.manager.driver.update_service_capabilities(service_name,
host, {})
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host, capabilities={})
self.mox.VerifyAll()
self.mox.ResetAll()
# Test capabilities passes correctly
capabilities = {'fake_capability': 'fake_value'}
self.manager.driver.update_service_capabilities(
service_name, host, capabilities)
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host,
capabilities=capabilities)
def test_update_service_multiple_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
self.mox.StubOutWithMock(self.manager.driver,
'update_service_capabilities')
capab1 = {'fake_capability': 'fake_value1'},
capab2 = {'fake_capability': 'fake_value2'},
capab3 = None
self.manager.driver.update_service_capabilities(
service_name, host, capab1)
self.manager.driver.update_service_capabilities(
service_name, host, capab2)
# None is converted to {}
self.manager.driver.update_service_capabilities(
service_name, host, {})
self.mox.ReplayAll()
self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host,
capabilities=[capab1, capab2, capab3])
def test_show_host_resources(self):
host = 'fake_host'
compute_node = {'host': host,
'compute_node': [{'vcpus': 4,
'vcpus_used': 2,
'memory_mb': 1024,
'memory_mb_used': 512,
'local_gb': 1024,
'local_gb_used': 512}]}
instances = [{'project_id': 'project1',
'vcpus': 1,
'memory_mb': 128,
'root_gb': 128,
'ephemeral_gb': 0},
{'project_id': 'project1',
'vcpus': 2,
'memory_mb': 256,
'root_gb': 384,
'ephemeral_gb': 0},
{'project_id': 'project2',
'vcpus': 2,
'memory_mb': 256,
'root_gb': 256,
'ephemeral_gb': 0}]
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.service_get_by_compute_host(self.context, host).AndReturn(
compute_node)
db.instance_get_all_by_host(self.context, host).AndReturn(instances)
self.mox.ReplayAll()
result = self.manager.show_host_resources(self.context, host)
expected = {'usage': {'project1': {'memory_mb': 384,
'vcpus': 3,
'root_gb': 512,
'ephemeral_gb': 0},
'project2': {'memory_mb': 256,
'vcpus': 2,
'root_gb': 256,
'ephemeral_gb': 0}},
'resource': {'vcpus': 4,
'vcpus_used': 2,
'local_gb': 1024,
'local_gb_used': 512,
'memory_mb': 1024,
'memory_mb_used': 512}}
self.assertThat(result, matchers.DictMatches(expected))
def _mox_schedule_method_helper(self, method_name):
# Make sure the method exists that we're going to test call
def stub_method(*args, **kwargs):
pass
setattr(self.manager.driver, method_name, stub_method)
self.mox.StubOutWithMock(self.manager.driver,
method_name)
def test_run_instance_exception_puts_instance_in_error_state(self):
fake_instance_uuid = 'fake-instance-id'
inst = {"vm_state": "", "task_state": ""}
self._mox_schedule_method_helper('schedule_run_instance')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
request_spec = {'instance_properties': inst,
'instance_uuids': [fake_instance_uuid]}
self.manager.driver.schedule_run_instance(self.context,
request_spec, None, None, None, None, {}).AndRaise(
exception.NoValidHost(reason=""))
old, new_ref = db.instance_update_and_get_original(self.context,
fake_instance_uuid,
{"vm_state": vm_states.ERROR,
"task_state": None}).AndReturn((inst, inst))
compute_utils.add_instance_fault_from_exc(self.context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
self.manager.run_instance(self.context, request_spec,
None, None, None, None, {})
def test_live_migration_schedule_novalidhost(self):
inst = {"uuid": "fake-instance-id",
"vm_state": vm_states.ACTIVE,
"task_state": task_states.MIGRATING, }
dest = None
block_migration = False
disk_over_commit = False
self._mox_schedule_method_helper('schedule_live_migration')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.manager.driver.schedule_live_migration(self.context,
inst, dest, block_migration, disk_over_commit).AndRaise(
exception.NoValidHost(reason=""))
db.instance_update_and_get_original(self.context, inst["uuid"],
{"vm_state": inst['vm_state'],
"task_state": None,
"expected_task_state": task_states.MIGRATING,
}).AndReturn((inst, inst))
compute_utils.add_instance_fault_from_exc(self.context,
mox.IsA(conductor_api.LocalAPI), inst,
mox.IsA(exception.NoValidHost),
mox.IgnoreArg())
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost,
self.manager.live_migration,
self.context, inst, dest, block_migration,
disk_over_commit)
def test_live_migration_compute_service_notavailable(self):
inst = {"uuid": "fake-instance-id",
"vm_state": vm_states.ACTIVE,
"task_state": task_states.MIGRATING, }
dest = 'fake_host'
block_migration = False
disk_over_commit = False
self._mox_schedule_method_helper('schedule_live_migration')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.manager.driver.schedule_live_migration(self.context,
inst, dest, block_migration, disk_over_commit).AndRaise(
exception.ComputeServiceUnavailable(host="src"))
db.instance_update_and_get_original(self.context, inst["uuid"],
{"vm_state": inst['vm_state'],
"task_state": None,
"expected_task_state": task_states.MIGRATING,
}).AndReturn((inst, inst))
compute_utils.add_instance_fault_from_exc(self.context,
mox.IsA(conductor_api.LocalAPI), inst,
mox.IsA(exception.ComputeServiceUnavailable),
mox.IgnoreArg())
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.manager.live_migration,
self.context, inst, dest, block_migration,
disk_over_commit)
def test_prep_resize_no_valid_host_back_in_active_state(self):
fake_instance_uuid = 'fake-instance-id'
fake_instance = {'uuid': fake_instance_uuid}
inst = {"vm_state": "", "task_state": ""}
self._mox_schedule_method_helper('schedule_prep_resize')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
request_spec = {'instance_type': 'fake_type',
'instance_uuids': [fake_instance_uuid],
'instance_properties': {'uuid': fake_instance_uuid}}
kwargs = {
'context': self.context,
'image': 'fake_image',
'request_spec': request_spec,
'filter_properties': 'fake_props',
'instance': fake_instance,
'instance_type': 'fake_type',
'reservations': list('fake_res'),
}
self.manager.driver.schedule_prep_resize(**kwargs).AndRaise(
exception.NoValidHost(reason=""))
old_ref, new_ref = db.instance_update_and_get_original(self.context,
fake_instance_uuid,
{"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn(
(inst, inst))
compute_utils.add_instance_fault_from_exc(self.context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
self.manager.prep_resize(**kwargs)
def test_prep_resize_exception_host_in_error_state_and_raise(self):
fake_instance_uuid = 'fake-instance-id'
fake_instance = {'uuid': fake_instance_uuid}
self._mox_schedule_method_helper('schedule_prep_resize')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
request_spec = {'instance_properties':
{'uuid': fake_instance_uuid}}
kwargs = {
'context': self.context,
'image': 'fake_image',
'request_spec': request_spec,
'filter_properties': 'fake_props',
'instance': fake_instance,
'instance_type': 'fake_type',
'reservations': list('fake_res'),
}
self.manager.driver.schedule_prep_resize(**kwargs).AndRaise(
test.TestingException('something happened'))
inst = {
"vm_state": "",
"task_state": "",
}
old_ref, new_ref = db.instance_update_and_get_original(self.context,
fake_instance_uuid,
{"vm_state": vm_states.ERROR,
"task_state": None}).AndReturn((inst, inst))
compute_utils.add_instance_fault_from_exc(self.context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(test.TestingException), mox.IgnoreArg())
self.mox.ReplayAll()
self.assertRaises(test.TestingException, self.manager.prep_resize,
**kwargs)
def test_set_vm_state_and_notify_adds_instance_fault(self):
request = {'instance_properties': {'uuid': 'fake-uuid'}}
updates = {'vm_state': 'foo'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_fault_create')
self.mox.StubOutWithMock(notifier, 'notify')
db.instance_update_and_get_original(self.context, 'fake-uuid',
updates).AndReturn((None,
fake_inst))
db.instance_fault_create(self.context, mox.IgnoreArg())
notifier.notify(self.context, mox.IgnoreArg(), 'scheduler.foo',
notifier.ERROR, mox.IgnoreArg())
self.mox.ReplayAll()
self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'},
self.context, None, request)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
def fake_show(meh, context, id):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.image_service = glance.get_default_image_service()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.servicegroup_api = servicegroup.API()
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
self.mox.StubOutWithMock(self.driver.host_manager,
'update_service_capabilities')
capabilities = {'fake_capability': 'fake_value'}
self.driver.host_manager.update_service_capabilities(
service_name, host, capabilities)
self.mox.ReplayAll()
result = self.driver.update_service_capabilities(service_name,
host, capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
db.service_get_all_by_topic(self.context,
self.topic).AndReturn(services)
self.servicegroup_api.service_is_up(service1).AndReturn(False)
self.servicegroup_api.service_is_up(service2).AndReturn(True)
self.mox.ReplayAll()
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
def _live_migration_instance(self):
inst_type = instance_types.get_instance_type(1)
# NOTE(danms): we have _got_ to stop doing this!
inst_type['memory_mb'] = 1024
sys_meta = utils.dict_to_metadata(
instance_types.save_instance_type_info({}, inst_type))
return {'id': 31337,
'uuid': 'fake_uuid',
'name': 'fake-instance',
'host': 'fake_host1',
'power_state': power_state.RUNNING,
'memory_mb': 1024,
'root_gb': 1024,
'ephemeral_gb': 0,
'vm_state': '',
'task_state': '',
'instance_type_id': inst_type['id'],
'image_ref': 'fake-image-ref',
'system_metadata': sys_meta}
def test_live_migration_basic(self):
# Test basic schedule_live_migration functionality.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
self.mox.StubOutWithMock(self.driver.compute_rpcapi,
'check_can_live_migrate_destination')
self.mox.StubOutWithMock(self.driver.compute_rpcapi,
'live_migration')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = jsonutils.to_primitive(self._live_migration_instance())
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest).AndReturn(dest)
self.driver._live_migration_common_check(self.context, instance,
dest)
self.driver.compute_rpcapi.check_can_live_migrate_destination(
self.context, instance, dest, block_migration,
disk_over_commit).AndReturn({})
self.driver.compute_rpcapi.live_migration(self.context,
host=instance['host'], instance=instance, dest=dest,
block_migration=block_migration, migrate_data={})
self.mox.ReplayAll()
self.driver.schedule_live_migration(self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_all_checks_pass(self):
# Test live migration when all checks pass.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(self.driver.compute_rpcapi,
'live_migration')
dest = 'fake_host2'
block_migration = True
disk_over_commit = True
instance = jsonutils.to_primitive(self._live_migration_instance())
# Source checks
db.service_get_by_compute_host(self.context,
instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(True)
# Destination checks (compute is up, enough memory, disk)
db.service_get_by_compute_host(self.context,
dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
# assert_compute_node_has_enough_memory()
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'memory_mb': 2048,
'free_disk_gb': 512,
'local_gb_used': 512,
'free_ram_mb': 1280,
'local_gb': 1024,
'vcpus': 4,
'vcpus_used': 2,
'updated_at': None,
'hypervisor_version': 1}]})
# Common checks (same hypervisor, etc)
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]})
db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1,
'cpu_info': 'fake_cpu_info'}]})
rpc.call(self.context, "compute.fake_host2",
{"method": 'check_can_live_migrate_destination',
"args": {'instance': instance,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
None).AndReturn({})
self.driver.compute_rpcapi.live_migration(self.context,
host=instance['host'], instance=instance, dest=dest,
block_migration=block_migration, migrate_data={})
self.mox.ReplayAll()
result = self.driver.schedule_live_migration(self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(result, None)
def test_live_migration_instance_not_running(self):
# The instance given by instance_id is not running.
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
instance['power_state'] = power_state.NOSTATE
self.assertRaises(exception.InstanceNotRunning,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_exist(self):
# Raise exception when src compute node is does not exist.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
# Compute down
db.service_get_by_compute_host(self.context,
instance['host']).AndRaise(
exception.ComputeHostNotFound(host='fake'))
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_alive(self):
# Raise exception when src compute node is not alive.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
# Compute down
db.service_get_by_compute_host(self.context,
instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_compute_dest_not_exist(self):
# Raise exception when dest compute node does not exist.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
# Compute down
db.service_get_by_compute_host(self.context,
dest).AndRaise(exception.NotFound())
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_compute_dest_not_alive(self):
# Raise exception when dest compute node is not alive.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
db.service_get_by_compute_host(self.context,
dest).AndReturn('fake_service3')
# Compute is down
self.servicegroup_api.service_is_up('fake_service3').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_service_same_host(self):
# Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
block_migration = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
self.driver._live_migration_src_check(self.context, instance)
self.mox.ReplayAll()
self.assertRaises(exception.UnableToMigrateToSelf,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self):
# Confirms exception raises when dest doesn't have enough memory.
# Flag needed to make FilterScheduler test hit memory limit since the
# default for it is to allow memory overcommit by a factor of 1.5.
self.flags(ram_allocation_ratio=1.0)
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
db.service_get_by_compute_host(self.context,
dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest).AndReturn(
{'memory_mb': 2048,
'free_disk_gb': 512,
'local_gb_used': 512,
'free_ram_mb': 512,
'local_gb': 1024,
'vcpus': 4,
'vcpus_used': 2,
'updated_at': None})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self):
# Confirm live_migration to hypervisor of different type raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest).AndReturn(dest)
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]})
db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
{'compute_node': [{'hypervisor_type': 'not-xen',
'hypervisor_version': 1}]})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_hypervisor_version_older_raises(self):
# Confirm live migration to older hypervisor raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest).AndReturn(dest)
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]})
db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 2}]})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_auto_set_host(self):
instance = self._live_migration_instance()
# Confirm dest is picked by scheduler if not set.
self.mox.StubOutWithMock(self.driver, 'select_hosts')
self.mox.StubOutWithMock(instance_types, 'extract_instance_type')
request_spec = {'instance_properties': instance,
'instance_type': {},
'instance_uuids': [instance['uuid']],
'image': self.image_service.show(self.context,
instance['image_ref'])
}
ignore_hosts = [instance['host']]
filter_properties = {'ignore_hosts': ignore_hosts}
instance_types.extract_instance_type(instance).AndReturn({})
self.driver.select_hosts(self.context, request_spec,
filter_properties).AndReturn(['fake_host2'])
self.mox.ReplayAll()
result = self.driver._live_migration_dest_check(self.context, instance,
None, ignore_hosts)
self.assertEqual('fake_host2', result)
def test_live_migration_auto_set_dest(self):
instance = self._live_migration_instance()
# Confirm scheduler picks target host if none given.
self.mox.StubOutWithMock(instance_types, 'extract_instance_type')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, 'select_hosts')
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration')
dest = None
block_migration = False
disk_over_commit = False
request_spec = {'instance_properties': instance,
'instance_type': {},
'instance_uuids': [instance['uuid']],
'image': self.image_service.show(self.context,
instance['image_ref'])
}
self.driver._live_migration_src_check(self.context, instance)
instance_types.extract_instance_type(
instance).MultipleTimes().AndReturn({})
# First selected host raises exception.InvalidHypervisorType
self.driver.select_hosts(self.context, request_spec,
{'ignore_hosts': [instance['host']]}).AndReturn(['fake_host2'])
self.driver._live_migration_common_check(self.context, instance,
'fake_host2').AndRaise(exception.InvalidHypervisorType())
# Second selected host raises exception.InvalidCPUInfo
self.driver.select_hosts(self.context, request_spec,
{'ignore_hosts': [instance['host'],
'fake_host2']}).AndReturn(['fake_host3'])
self.driver._live_migration_common_check(self.context, instance,
'fake_host3')
rpc.call(self.context, "compute.fake_host3",
{"method": 'check_can_live_migrate_destination',
"args": {'instance': instance,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
None).AndRaise(exception.InvalidCPUInfo(reason=""))
# Third selected host pass all checks
self.driver.select_hosts(self.context, request_spec,
{'ignore_hosts': [instance['host'],
'fake_host2',
'fake_host3']}).AndReturn(['fake_host4'])
self.driver._live_migration_common_check(self.context, instance,
'fake_host4')
rpc.call(self.context, "compute.fake_host4",
{"method": 'check_can_live_migrate_destination',
"args": {'instance': instance,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
None).AndReturn({})
self.driver.compute_rpcapi.live_migration(self.context,
host=instance['host'], instance=instance, dest='fake_host4',
block_migration=block_migration, migrate_data={})
self.mox.ReplayAll()
result = self.driver.schedule_live_migration(self.context,
instance=instance, dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(result, None)
def test_handle_schedule_error_adds_instance_fault(self):
instance = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_fault_create')
self.mox.StubOutWithMock(notifier, 'notify')
db.instance_update_and_get_original(self.context, instance['uuid'],
mox.IgnoreArg()).AndReturn(
(None, instance))
db.instance_fault_create(self.context, mox.IgnoreArg())
notifier.notify(self.context, mox.IgnoreArg(),
'scheduler.run_instance',
notifier.ERROR, mox.IgnoreArg())
self.mox.ReplayAll()
driver.handle_schedule_error(self.context,
exception.NoValidHost('test'),
instance['uuid'], {})
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
that can't will fail if the driver is changed"""
def test_unimplemented_schedule_run_instance(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
fake_request_spec = {'instance_properties':
{'uuid': 'uuid'}}
self.assertRaises(NotImplementedError,
self.driver.schedule_run_instance,
self.context, fake_request_spec, None, None, None,
None, None)
def test_unimplemented_schedule_prep_resize(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
fake_request_spec = {'instance_properties':
{'uuid': 'uuid'}}
self.assertRaises(NotImplementedError,
self.driver.schedule_prep_resize,
self.context, {},
fake_request_spec, {}, {}, {}, None)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
def test_encode_instance(self):
instance = {'id': 31337,
'test_arg': 'meow'}
result = driver.encode_instance(instance, True)
expected = {'id': instance['id'], '_is_precooked': False}
self.assertThat(result, matchers.DictMatches(expected))
# Orig dict not changed
self.assertNotEqual(result, instance)
result = driver.encode_instance(instance, False)
expected = {}
expected.update(instance)
expected['_is_precooked'] = True
self.assertThat(result, matchers.DictMatches(expected))
# Orig dict not changed
self.assertNotEqual(result, instance)
|
|
#!/usr/bin/python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# Topology with two switches and two hosts. Uses SAI thrift APIs to configure
# the switches. Set 'DOCKER_IMAGE=bm-switchsai' when creating the docker image.
#
# 172.16.101.0/24 172.16.10.0/24 172.16.102.0./24
# h1 ------------------- sw1 ------------------ sw2------- -------------h2
# .5 .1 .1 .2 .1 .5
##############################################################################
from mininet.net import Mininet, VERSION
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from distutils.version import StrictVersion
from p4_mininet import P4DockerSwitch
import os
import sys
import time
lib_path = os.path.abspath(os.path.join('..', 'targets', 'switch', 'tests',
'pd_thrift'))
sys.path.append(lib_path)
import switch_sai_thrift.switch_sai_rpc as switch_sai_rpc
from switch_sai_thrift.ttypes import *
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
def open_connection(port):
transport = TSocket.TSocket('localhost', port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = switch_sai_rpc.Client(protocol)
transport.open()
return transport, client
def close_connection(transport):
transport.close()
def create_virtual_router(client, v4_enabled, v6_enabled):
#v4 enabled
vr_attribute1_value = sai_thrift_attribute_value_t(booldata=v4_enabled)
vr_attribute1 = sai_thrift_attribute_t(id=0, value=vr_attribute1_value)
#v6 enabled
vr_attribute2_value = sai_thrift_attribute_value_t(booldata=v6_enabled)
vr_attribute2 = sai_thrift_attribute_t(id=1, value=vr_attribute1_value)
vr_attr_list = [vr_attribute1, vr_attribute2]
vr_id = client.sai_thrift_create_virtual_router(thrift_attr_list=vr_attr_list)
return vr_id
def create_router_interface(client, vr_id, is_port, port_id, vlan_id,
v4_enabled, v6_enabled):
#vrf attribute
rif_attribute1_value = sai_thrift_attribute_value_t(oid=vr_id)
rif_attribute1 = sai_thrift_attribute_t(id=0, value=rif_attribute1_value)
if is_port:
#port type and port id
rif_attribute2_value = sai_thrift_attribute_value_t(u8=0)
rif_attribute2 = sai_thrift_attribute_t(id=1,
value=rif_attribute2_value)
rif_attribute3_value = sai_thrift_attribute_value_t(oid=port_id)
rif_attribute3 = sai_thrift_attribute_t(id=2,
value=rif_attribute3_value)
else:
#vlan type and vlan id
rif_attribute2_value = sai_thrift_attribute_value_t(u8=1)
rif_attribute2 = sai_thrift_attribute_t(id=1,
value=rif_attribute2_value)
rif_attribute3_value = sai_thrift_attribute_value_t(u16=vlan_id)
rif_attribute3 = sai_thrift_attribute_t(id=3,
value=rif_attribute3_value)
#v4_enabled
rif_attribute4_value = sai_thrift_attribute_value_t(booldata=v4_enabled)
rif_attribute4 = sai_thrift_attribute_t(id=5, value=rif_attribute4_value)
#v6_enabled
rif_attribute5_value = sai_thrift_attribute_value_t(booldata=v6_enabled)
rif_attribute5 = sai_thrift_attribute_t(id=6, value=rif_attribute5_value)
rif_attr_list = [rif_attribute1, rif_attribute2, rif_attribute3,
rif_attribute4, rif_attribute5]
rif_id = client.sai_thrift_create_router_interface(rif_attr_list)
return rif_id
def create_route(client, vr_id, addr_family, ip_addr, ip_mask, nhop):
if addr_family == 0:
addr = sai_thrift_ip_t(ip4=ip_addr)
mask = sai_thrift_ip_t(ip4=ip_mask)
ip_prefix = sai_thrift_ip_prefix_t(addr_family=0, addr=addr, mask=mask)
else:
addr = sai_thrift_ip_t(ip6=ip_addr)
mask = sai_thrift_ip_t(ip6=ip_mask)
ip_prefix = sai_thrift_ip_prefix_t(addr_family=1, addr=addr, mask=mask)
route_attribute1_value = sai_thrift_attribute_value_t(oid=nhop)
route_attribute1 = sai_thrift_attribute_t(id=2,
value=route_attribute1_value)
route = sai_thrift_unicast_route_entry_t(vr_id, ip_prefix)
route_attr_list = [route_attribute1]
client.sai_thrift_create_route(thrift_unicast_route_entry=route,
thrift_attr_list=route_attr_list)
def create_nhop(client, addr_family, ip_addr, rif_id):
if addr_family == 0:
addr = sai_thrift_ip_t(ip4=ip_addr)
ipaddr = sai_thrift_ip_address_t(addr_family=0, addr=addr)
else:
addr = sai_thrift_ip_t(ip6=ip_addr)
ipaddr = sai_thrift_ip_address_t(addr_family=1, addr=addr)
nhop_attribute1_value = sai_thrift_attribute_value_t(ipaddr=ipaddr)
nhop_attribute1 = sai_thrift_attribute_t(id=1, value=nhop_attribute1_value)
nhop_attribute2_value = sai_thrift_attribute_value_t(oid=rif_id)
nhop_attribute2 = sai_thrift_attribute_t(id=2, value=nhop_attribute2_value)
nhop_attr_list = [nhop_attribute1, nhop_attribute2]
nhop = client.sai_thrift_create_next_hop(thrift_attr_list=nhop_attr_list)
return nhop
def create_neighbor(client, addr_family, rif_id, ip_addr, dmac):
if addr_family == 0:
addr = sai_thrift_ip_t(ip4=ip_addr)
ipaddr = sai_thrift_ip_address_t(addr_family=0, addr=addr)
else:
addr = sai_thrift_ip_t(ip6=ip_addr)
ipaddr = sai_thrift_ip_address_t(addr_family=1, addr=addr)
neighbor_attribute1_value = sai_thrift_attribute_value_t(mac=dmac)
neighbor_attribute1 = sai_thrift_attribute_t(id=0,
value=neighbor_attribute1_value)
neighbor_attr_list = [neighbor_attribute1]
neighbor_entry = sai_thrift_neighbor_entry_t(rif_id=rif_id,
ip_address=ipaddr)
client.sai_thrift_create_neighbor_entry(neighbor_entry, neighbor_attr_list)
def cfg_switch1():
port_list = []
transport, client = open_connection(25000)
switch_attr_list = client.sai_thrift_get_switch_attribute()
attr_list = switch_attr_list.attr_list
for attr in attr_list:
if attr.id == 0:
print 'max ports: ', attr.value.u32
elif attr.id == 1:
for x in attr.value.objlist.object_id_list:
port_list.append(x)
else:
print 'unknown switch attribute'
port1 = port_list[0]
port2 = port_list[1]
vr = create_virtual_router(client, v4_enabled=1, v6_enabled=1)
attr_value = sai_thrift_attribute_value_t(mac='00:01:00:00:00:03')
attr = sai_thrift_attribute_t(id=17, value=attr_value)
client.sai_thrift_set_switch_attribute(attr)
rif1 = create_router_interface(client, vr, 1, port1, 0, v4_enabled=1,
v6_enabled=1)
attr_value = sai_thrift_attribute_value_t(mac='00:01:00:00:00:04')
attr = sai_thrift_attribute_t(id=17, value=attr_value)
client.sai_thrift_set_switch_attribute(attr)
rif2 = create_router_interface(client, vr, 1, port2, 0, v4_enabled=1,
v6_enabled=1)
nhop1 = create_nhop(client, 0, '172.16.101.5', rif1)
create_neighbor(client, 0, rif1, '172.16.101.5', '00:03:00:00:00:01')
nhop2 = create_nhop(client, 0, '172.16.10.2', rif2)
create_neighbor(client, 0, rif2, '172.16.10.2', '00:02:00:00:00:04')
create_route(client, vr, 0, '172.16.101.5', '255.255.255.255', nhop1)
create_route(client, vr, 0, '172.16.10.2', '255.255.255.255', nhop2)
create_route(client, vr, 0, '172.16.102.0', '255.255.255.0', nhop2)
close_connection(transport)
def cfg_switch2():
port_list = []
transport, client = open_connection(25001)
switch_attr_list = client.sai_thrift_get_switch_attribute()
attr_list = switch_attr_list.attr_list
for attr in attr_list:
if attr.id == 0:
print 'max ports: ', attr.value.u32
elif attr.id == 1:
for x in attr.value.objlist.object_id_list:
port_list.append(x)
else:
print 'unknown switch attribute'
port1 = port_list[0]
port2 = port_list[1]
vr = create_virtual_router(client, v4_enabled=1, v6_enabled=1)
attr_value = sai_thrift_attribute_value_t(mac='00:02:00:00:00:03')
attr = sai_thrift_attribute_t(id=17, value=attr_value)
client.sai_thrift_set_switch_attribute(attr)
rif1 = create_router_interface(client, vr, 1, port1, 0, v4_enabled=1,
v6_enabled=1)
attr_value = sai_thrift_attribute_value_t(mac='00:02:00:00:00:04')
attr = sai_thrift_attribute_t(id=17, value=attr_value)
client.sai_thrift_set_switch_attribute(attr)
rif2 = create_router_interface(client, vr, 1, port2, 0, v4_enabled=1,
v6_enabled=1)
nhop1 = create_nhop(client, 0, '172.16.102.5', rif1)
create_neighbor(client, 0, rif1, '172.16.102.5', '00:04:00:00:00:01')
nhop2 = create_nhop(client, 0, '172.16.10.1', rif2)
create_neighbor(client, 0, rif2, '172.16.10.1', '00:01:00:00:00:04')
create_route(client, vr, 0, '172.16.102.5', '255.255.255.255', nhop1)
create_route(client, vr, 0, '172.16.10.1', '255.255.255.255', nhop2)
create_route(client, vr, 0, '172.16.101.0', '255.255.255.0', nhop2)
close_connection(transport)
def main():
net = Mininet( controller = None )
# add hosts
h1 = net.addHost( 'h1', ip = '172.16.101.5/24', mac = '00:03:00:00:00:01' )
h2 = net.addHost( 'h2', ip = '172.16.102.5/24', mac = '00:04:00:00:00:01' )
# add switch 1
sw1 = net.addSwitch( 'sw1', target_name = "p4dockerswitch",
cls = P4DockerSwitch, sai_port = 25000, pcap_dump = True )
# add switch 2
sw2 = net.addSwitch( 'sw2', target_name = "p4dockerswitch",
cls = P4DockerSwitch, sai_port = 25001, pcap_dump = True )
# add links
if StrictVersion(VERSION) <= StrictVersion('2.2.0') :
net.addLink( sw1, h1, port1 = 1 )
net.addLink( sw1, sw2, port1 = 2, port2 = 2 )
net.addLink( sw2, h2, port1 = 1 )
else:
net.addLink( sw1, h1, port1 = 1, fast=False )
net.addLink( sw1, sw2, port1 = 2, port2 = 2, fast=False )
net.addLink( sw2, h2, port1 = 1, fast=False )
net.start()
print 'Waiting 10 seconds for switches to intialize...'
time.sleep(10)
# configure hosts
h1.setARP( ip = '172.16.101.1', mac = '00:01:00:00:00:03' )
h2.setARP( ip = '172.16.102.1', mac = '00:02:00:00:00:03' )
h1.setDefaultRoute( 'via 172.16.101.1' )
h2.setDefaultRoute( 'via 172.16.102.1' )
# configure switches
cfg_switch1()
cfg_switch2()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extensions supporting Federation."""
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.contrib.federation import utils
from keystone import exception
CONF = config.CONF
@dependency.requires('federation_api')
class IdentityProvider(controller.V3Controller):
"""Identity Provider representation.
Two new class parameters:
- _mutable_parameters - set of parameters that can be changed by users.
Usually used by cls.check_immutable_params()
- _public_parameters - set of parameters that are exposed to the user.
Usually used by cls.filter_params()
"""
collection_name = 'identity_providers'
member_name = 'identity_provider'
_mutable_parameters = frozenset(['description', 'enabled'])
_public_parameters = frozenset(['id', 'enabled', 'description', 'links'])
@classmethod
def check_immutable_params(cls, ref, keys=None):
"""Raise exception when disallowed parameter is stored in the keys.
Check whether the ref dictionary representing a request has only
mutable parameters included. If not, raise an exception. This method
checks only root-level keys from a ref dictionary.
:param ref: a dictionary representing deserialized request to be
stored
:param keys: a set with mutable parameters. If None, use default class
attribute - _mutable_parameters
:raises exception.ImmutableAttributeError
"""
if keys is None:
keys = cls._mutable_parameters
ref_keys = set(ref.keys())
blocked_keys = ref_keys.difference(keys)
if not blocked_keys:
#No immutable parameters changed
return
exception_args = {'target': cls.__name__,
'attribute': blocked_keys.pop()}
raise exception.ImmutableAttributeError(**exception_args)
@classmethod
def filter_params(cls, ref, keys=None):
"""Remove unspecified parameters from the dictionary.
This function removes unspecified parameters from the dictionary. See
check_immutable_parameters for corresponding function that raises
exceptions. This method checks only root-level keys from a ref
dictionary.
:param ref: a dictionary representing deserialized response to be
serialized
:param keys: a set of attribute names, that are allowed in the request.
If None, use the class attribute _public_parameters
"""
if keys is None:
keys = cls._public_parameters
ref_keys = set(ref.keys())
blocked_keys = ref_keys - keys
for blocked_param in blocked_keys:
del ref[blocked_param]
return ref
@classmethod
def base_url(cls, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
path = '/OS-FEDERATION/' + cls.collection_name
return controller.V3Controller.base_url(path=path)
@classmethod
def _add_related_links(cls, ref):
"""Add URLs for entities related with Identity Provider.
Add URLs pointing to:
- protocols tied to the Identity Provider
"""
ref.setdefault('links', {})
base_path = ref['links'].get('self')
if base_path is None:
base_path = '/'.join([IdentityProvider.base_url(), ref['id']])
for name in ['protocols']:
ref['links'][name] = '/'.join([base_path, name])
@classmethod
def _add_self_referential_link(cls, ref):
id = ref.get('id')
self_path = '/'.join([cls.base_url(), id])
ref.setdefault('links', {})
ref['links']['self'] = self_path
@classmethod
def wrap_member(cls, context, ref):
cls._add_self_referential_link(ref)
cls._add_related_links(ref)
return {cls.member_name: ref}
#TODO(marek-denis): Implement, when mapping engine is ready
def _delete_tokens_issued_by_idp(self, idp_id):
"""Delete tokens created upon authentication from an IdP
After the IdP is deregistered, users authenticating via such IdP should
no longer be allowed to use federated services. Thus, delete all the
tokens issued upon authentication from IdP with idp_id id
:param idp_id: id of Identity Provider for which related tokens should
be removed.
"""
raise exception.NotImplemented()
@controller.protected()
def create_identity_provider(self, context, idp_id, identity_provider):
mutable_params = set(['description', 'enabled'])
public_params = set(['id', 'description', 'enabled'])
identity_provider = self._normalize_dict(identity_provider)
identity_provider.setdefault('enabled', False)
IdentityProvider.check_immutable_params(identity_provider,
keys=mutable_params)
idp_ref = self.federation_api.create_idp(idp_id, identity_provider)
idp_ref = IdentityProvider.filter_params(idp_ref, keys=public_params)
response = IdentityProvider.wrap_member(context, idp_ref)
return wsgi.render_response(body=response, status=('201', 'Created'))
@controller.protected()
def list_identity_providers(self, context):
ref = self.federation_api.list_idps()
ref = [self.filter_params(x) for x in ref]
return IdentityProvider.wrap_collection(context, ref)
@controller.protected()
def get_identity_provider(self, context, idp_id):
ref = self.federation_api.get_idp(idp_id)
ref = self.filter_params(ref)
return IdentityProvider.wrap_member(context, ref)
@controller.protected()
def delete_identity_provider(self, context, idp_id):
self.federation_api.delete_idp(idp_id)
@controller.protected()
def update_identity_provider(self, context, idp_id, identity_provider):
identity_provider = self._normalize_dict(identity_provider)
IdentityProvider.check_immutable_params(identity_provider)
idp_ref = self.federation_api.update_idp(idp_id, identity_provider)
return IdentityProvider.wrap_member(context, idp_ref)
@dependency.requires('federation_api')
class FederationProtocol(IdentityProvider):
"""A federation protocol representation.
See IdentityProvider docstring for explanation on _mutable_parameters
and _public_parameters class attributes.
"""
collection_name = 'protocols'
member_name = 'protocol'
_public_parameters = frozenset(['id', 'mapping_id', 'links'])
_mutable_parameters = set(['mapping_id'])
@classmethod
def _add_self_referential_link(cls, ref):
"""Add 'links' entry to the response dictionary.
Calls IdentityProvider.base_url() class method, as it constructs
proper URL along with the 'identity providers' part included.
:param ref: response dictionary
"""
ref.setdefault('links', {})
base_path = ref['links'].get('identity_provider')
if base_path is None:
base_path = [IdentityProvider.base_url(), ref['idp_id']]
base_path = '/'.join(base_path)
self_path = [base_path, 'protocols', ref['id']]
self_path = '/'.join(self_path)
ref['links']['self'] = self_path
@classmethod
def _add_related_links(cls, ref):
"""Add new entries to the 'links' subdictionary in the response.
Adds 'identity_provider' key with URL pointing to related identity
provider as a value.
:param ref: response dictionary
"""
ref.setdefault('links', {})
base_path = '/'.join([IdentityProvider.base_url(), ref['idp_id']])
ref['links']['identity_provider'] = base_path
@classmethod
def wrap_member(cls, context, ref):
cls._add_related_links(ref)
cls._add_self_referential_link(ref)
ref = cls.filter_params(ref)
return {cls.member_name: ref}
@controller.protected()
def create_protocol(self, context, idp_id, protocol_id, protocol):
ref = self._normalize_dict(protocol)
keys = self._mutable_parameters.copy()
FederationProtocol.check_immutable_params(ref, keys=keys)
ref = self.federation_api.create_protocol(idp_id, protocol_id, ref)
response = FederationProtocol.wrap_member(context, ref)
return wsgi.render_response(body=response, status=('201', 'Created'))
@controller.protected()
def update_protocol(self, context, idp_id, protocol_id, protocol):
ref = self._normalize_dict(protocol)
FederationProtocol.check_immutable_params(ref)
ref = self.federation_api.update_protocol(idp_id, protocol_id,
protocol)
return FederationProtocol.wrap_member(context, ref)
@controller.protected()
def get_protocol(self, context, idp_id, protocol_id):
ref = self.federation_api.get_protocol(idp_id, protocol_id)
return FederationProtocol.wrap_member(context, ref)
@controller.protected()
def list_protocols(self, context, idp_id):
protocols_ref = self.federation_api.list_protocols(idp_id)
protocols = list(protocols_ref)
return FederationProtocol.wrap_collection(context, protocols)
@controller.protected()
def delete_protocol(self, context, idp_id, protocol_id):
self.federation_api.delete_protocol(idp_id, protocol_id)
@dependency.requires('federation_api')
class MappingController(controller.V3Controller):
collection_name = 'mappings'
member_name = 'mapping'
@classmethod
def base_url(cls, path=None):
path = '/OS-FEDERATION/' + cls.collection_name
return controller.V3Controller.base_url(path)
@controller.protected()
def create_mapping(self, context, mapping_id, mapping):
ref = self._normalize_dict(mapping)
utils.validate_mapping_structure(ref)
mapping_ref = self.federation_api.create_mapping(mapping_id, ref)
response = MappingController.wrap_member(context, mapping_ref)
return wsgi.render_response(body=response, status=('201', 'Created'))
@controller.protected()
def list_mappings(self, context):
ref = self.federation_api.list_mappings()
return MappingController.wrap_collection(context, ref)
@controller.protected()
def get_mapping(self, context, mapping_id):
ref = self.federation_api.get_mapping(mapping_id)
return MappingController.wrap_member(context, ref)
@controller.protected()
def delete_mapping(self, context, mapping_id):
self.federation_api.delete_mapping(mapping_id)
@controller.protected()
def update_mapping(self, context, mapping_id, mapping):
mapping = self._normalize_dict(mapping)
utils.validate_mapping_structure(mapping)
mapping_ref = self.federation_api.update_mapping(mapping_id, mapping)
return MappingController.wrap_member(context, mapping_ref)
|
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# modify the runtime template for prebuilt engine
#
# Copyright 2014 (C) zhangbin
#
# License: MIT
# ----------------------------------------------------------------------------
'''
modify the runtime template for prebuilt engine
'''
import os
import sys
LUA_TEMPLATE_PATH = "templates/lua-template-runtime"
#
# WIN32_LINK_CPP_LIBS = [
# "libbox2d", "libcocos2d", "libSpine"
# ]
#
# WIN32_LINK_LUA_LIBS = [ "libluacocos2d", "libsimulator" ]
#
# WIN32_LINK_JS_LIBS = [ "libjscocos2d" ]
XCODE_LINK_CPP_LIBS = [
"libcocos2d"
]
XCODE_LINK_LUA_LIBS = [ "libluacocos2d", "libsimulator" ]
XCODE_LINK_JS_LIBS = [ "libjscocos2d", "libsimulator" ]
class TemplateModifier(object):
def __init__(self, engine_path, libs_path, version, is_for_package):
if os.path.isabs(engine_path):
self.engine_path = engine_path
else:
self.engine_path = os.path.abspath(engine_path)
if os.path.isabs(libs_path):
self.libs_path = libs_path
else:
self.libs_path = os.path.abspath(libs_path)
self.version = version
self.is_for_package = is_for_package
def modify_xcode_proj(self, proj_file_path, language):
proj_modifier_path = os.path.join(os.path.dirname(__file__), 'proj_modifier')
sys.path.append(proj_modifier_path)
import modify_pbxproj
pbx_proj = modify_pbxproj.XcodeProject.Load(proj_file_path)
replace_engine_strs = []
if language == "cpp":
targetName = "HelloCpp"
link_libs = XCODE_LINK_CPP_LIBS
replace_engine_strs.append("$(SRCROOT)/../cocos2d")
elif language == "lua":
targetName = "HelloLua"
link_libs = XCODE_LINK_CPP_LIBS + XCODE_LINK_LUA_LIBS
replace_engine_strs.append("$(SRCROOT)/../../cocos2d-x")
else:
targetName = "HelloJavascript"
link_libs = XCODE_LINK_CPP_LIBS + XCODE_LINK_JS_LIBS
replace_engine_strs.append("$(SRCROOT)/../../cocos2d-x")
replace_engine_strs.append("../../cocos2d-x")
ios_target_name = "%s-mobile" % targetName
mac_target_name = "%s-desktop" % targetName
# remove the target dependencies
pbx_proj.remove_proj_reference("cocos2d_libs.xcodeproj")
if language == "lua":
pbx_proj.remove_proj_reference("cocos2d_lua_bindings.xcodeproj")
pbx_proj.remove_proj_reference("libsimulator.xcodeproj")
if language == "js":
pbx_proj.remove_proj_reference("cocos2d_js_bindings.xcodeproj")
pbx_proj.remove_proj_reference("libsimulator.xcodeproj")
pbx_proj.remove_file_by_path("../../cocos2d-x/cocos/scripting/js-bindings/script")
common_group = pbx_proj.get_or_create_group("JS Common")
pbx_proj.add_file_if_doesnt_exist("../../../script", common_group, tree="<group>")
# pbx_proj.remove_group_by_name("JS Common")
# add libraries search path
libs_path = self.libs_path
if self.is_for_package:
libs_path = "/Applications/Cocos/frameworks/%s/prebuilt" % self.version
ios_template_prebuilt_path = os.path.join(libs_path, "ios")
pbx_proj.add_library_search_paths(ios_template_prebuilt_path, target_name=ios_target_name, recursive=False)
mac_template_prebuilt_path = os.path.join(libs_path, "mac")
pbx_proj.add_library_search_paths(mac_template_prebuilt_path, target_name=mac_target_name, recursive=False)
# add libraries for targets
ios_lib_group = pbx_proj.get_or_create_group("ios-libs")
mac_lib_group = pbx_proj.get_or_create_group("mac-libs")
for lib in link_libs:
ios_lib_name = "%s iOS.a" % lib
mac_lib_name = "%s Mac.a" % lib
ios_lib_path = os.path.join(ios_template_prebuilt_path, ios_lib_name)
pbx_proj.add_file_if_doesnt_exist(ios_lib_path, ios_lib_group, tree="<group>", target=ios_target_name)
mac_lib_path = os.path.join(mac_template_prebuilt_path, mac_lib_name)
pbx_proj.add_file_if_doesnt_exist(mac_lib_path, mac_lib_group, tree="<group>", target=mac_target_name)
# add studio resources to the xcode project of cpp template
if language == "cpp":
pbx_proj.remove_file_by_path("CloseNormal.png")
pbx_proj.remove_file_by_path("CloseSelected.png")
pbx_proj.remove_file_by_path("HelloWorld.png")
pbx_proj.remove_file_by_path("Marker Felt.ttf")
pbx_proj.remove_file_by_path("fonts")
pbx_proj.remove_file_by_path("res")
res_group = pbx_proj.get_or_create_group("Resources")
pbx_proj.add_file_if_doesnt_exist("../Resources/res", res_group, tree="<group>")
if pbx_proj.modified:
print("Save xcode project")
pbx_proj.save()
# modify the engine path
f = open(proj_file_path)
file_content = f.read()
f.close()
install_path = self.engine_path
if self.is_for_package:
install_path = "/Applications/Cocos/frameworks/%s" % self.version
for old_engine_path in replace_engine_strs:
file_content = file_content.replace(old_engine_path, install_path)
f = open(proj_file_path, "w")
f.write(file_content)
f.close()
def modify_vs_proj(self, proj_file_path, language):
proj_modifier_path = os.path.join(os.path.dirname(__file__), "proj_modifier")
sys.path.append(proj_modifier_path)
import modify_vcxproj
vcx_proj = modify_vcxproj.VCXProject(proj_file_path)
# remove the project references
vcx_proj.remove_proj_reference()
install_path = self.engine_path
if self.is_for_package:
install_path = "$(COCOS_FRAMEWORKS)\\%s\\" % self.version
copy_libs_cmd = "if not exist \"$(OutDir)\" mkdir \"$(OutDir)\"\n" \
"xcopy /Y /Q \"$(EngineRoot)\\prebuilt\\win32\\*.*\" \"$(OutDir)\"\n"
vcx_proj.set_event_command('PreLinkEvent', copy_libs_cmd, 'debug')
vcx_proj.set_event_command('PreLinkEvent', copy_libs_cmd, 'release')
if language == "js":
custom_step_event = vcx_proj.get_event_command('CustomBuildStep')
custom_step_event.replace("$(ProjectDir)..\\..\\cocos2d-x\\cocos\\scripting\\js-bindings\\script",
"$(ProjectDir)..\\..\\..\\script")
vcx_proj.set_event_command("CustomBuildStep", custom_step_event, create_new=False)
vcx_proj.remove_predefine_macro("_DEBUG", 'debug')
#
# copy_libs_cmd = "if not exist \"$(OutDir)\" mkdir \"$(OutDir)\"\n" \
# "xcopy /Y /Q \"$(EngineRoot)tools\\framework-compile\\libs\\windows\\*.*\" \"$(OutDir)\"\n"
# if self.is_for_package:
# copy_libs_cmd = "if not exist \"$(OutDir)\" mkdir \"$(OutDir)\"\n" \
# "xcopy /Y /Q \"%s\\prebuilt\\win32\\*.*\" \"$(OutDir)\"\n" % install_path
# if language == "cpp":
# copy_libs_cmd = copy_libs_cmd + "xcopy \"$(ProjectDir)..\\Resources\" \"$(OutDir)\" /D /E /I /F /Y\n"
#
# vcx_proj.set_event_command("PreLinkEvent", copy_libs_cmd)
#
# if language == "lua":
# link_cmd = "libcmt.lib;%(IgnoreSpecificDefaultLibraries)"
# vcx_proj.set_item("Link", "IgnoreSpecificDefaultLibraries", link_cmd)
#
# debug_prebuild = vcx_proj.get_event_command("PreBuildEvent", "debug")
# debug_prebuild = debug_prebuild.replace("$(ProjectDir)..\\..\\cocos2d-x\\cocos\\scripting\\js-bindings\\script",
# "$(ProjectDir)..\\..\\..\\script")
# vcx_proj.set_event_command("PreBuildEvent", debug_prebuild, "debug")
#
# release_prebuild = vcx_proj.get_event_command("PreBuildEvent", "release")
# release_prebuild = release_prebuild.replace("$(ProjectDir)..\\..\\cocos2d-x\\cocos\\scripting\\js-bindings\\script",
# "$(ProjectDir)..\\..\\..\\script")
# vcx_proj.set_event_command("PreBuildEvent", release_prebuild, "release")
vcx_proj.save()
replace_strs = []
if self.is_for_package:
replace_strs.append("$(EngineRoot)")
if language == "cpp":
# link_libs = WIN32_LINK_CPP_LIBS
replace_strs.append("$(ProjectDir)..\\cocos2d")
replace_strs.append("..\\cocos2d")
elif language == "lua":
# link_libs = WIN32_LINK_CPP_LIBS + WIN32_LINK_LUA_LIBS
replace_strs.append("$(ProjectDir)..\\..\\cocos2d-x")
replace_strs.append("..\\..\\cocos2d-x")
else:
# link_libs = WIN32_LINK_CPP_LIBS + WIN32_LINK_JS_LIBS
replace_strs.append("$(ProjectDir)..\\..\\cocos2d-x")
replace_strs.append("..\\..\\cocos2d-x")
# modify the Runtime.cpp
vcx_proj_path = os.path.dirname(proj_file_path)
cpp_path = os.path.join(vcx_proj_path, os.path.pardir, "Classes/runtime/Runtime.cpp")
if os.path.exists(cpp_path):
f = open(cpp_path)
file_content = f.read()
f.close()
file_content = file_content.replace("#ifndef _DEBUG", "#ifndef COCOS2D_DEBUG")
f = open(cpp_path, "w")
f.write(file_content)
f.close()
f = open(proj_file_path)
file_content = f.read()
f.close()
if language == "lua":
# replace the "lua\lua;" to "lua\luajit;"
file_content = file_content.replace("lua\\lua;", "lua\\luajit\\include;")
file_content = file_content.replace("MultiThreadedDebugDLL", "MultiThreadedDLL")
for str in replace_strs:
file_content = file_content.replace(str, install_path)
file_content = file_content.replace('%s\\' % install_path, install_path)
f = open(proj_file_path, "w")
f.write(file_content)
f.close()
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from xml.dom import minidom
from writers import gpo_editor_writer, xml_formatted_writer
class AdmxElementType:
'''The different types of ADMX elements that can be used to display a policy.
This is related to the 'type' field in policy_templates.json, but there isn't
a perfect 1:1 mapping. This class is also used when writing ADML files, to
ensure that the ADML generated from policy_templates.json is compatible with
the ADMX generated from policy_templates.json"""
'''
MAIN = 1
STRING = 2
MULTI_STRING = 3
INT = 4
ENUM = 5
LIST = 6
GROUP = 7
@staticmethod
def GetType(policy, allow_multi_strings=False):
'''Returns the ADMX element type that should be used for the given policy.
This logic is shared between the ADMX writer and the ADML writer, to ensure
that the ADMX and ADML generated from policy_tempates.json are compatible.
Args:
policy: A dict describing the policy, as found in policy_templates.json.
allow_multi_strings: If true, the use of multi-line textbox elements is
allowed, so this function will sometimes return MULTI_STRING. If false
it falls back to single-line textboxes instead by returning STRING.
Returns:
One of the enum values of AdmxElementType.
Raises:
Exception: If policy['type'] is not recognized.
'''
policy_type = policy['type']
policy_example = policy.get('example_value')
# TODO(olsen): Some policies are defined in policy_templates.json as type
# string, but the string is actually a JSON object. We should change the
# schema so they are 'dict' or similar, but until then, we use this
# heuristic to decide whether they are actually JSON and so could benefit
# from being displayed to the user as a multi-line string:
if (policy_type == 'string' and allow_multi_strings and
policy_example is not None and policy_example.strip().startswith('{')):
return AdmxElementType.MULTI_STRING
admx_element_type = AdmxElementType._POLICY_TYPE_MAP.get(policy_type)
if admx_element_type is None:
raise Exception('Unknown policy type %s.' % policy_type)
if (admx_element_type == AdmxElementType.MULTI_STRING and
not allow_multi_strings):
return AdmxElementType.STRING
return admx_element_type
AdmxElementType._POLICY_TYPE_MAP = {
'main': AdmxElementType.MAIN,
'string': AdmxElementType.STRING,
'dict': AdmxElementType.MULTI_STRING,
'external': AdmxElementType.MULTI_STRING,
'int': AdmxElementType.INT,
'int-enum': AdmxElementType.ENUM,
'string-enum': AdmxElementType.ENUM,
'list': AdmxElementType.LIST,
'string-enum-list': AdmxElementType.LIST,
'group': AdmxElementType.GROUP
}
def GetWriter(config):
'''Factory method for instanciating the ADMXWriter. Every Writer needs a
GetWriter method because the TemplateFormatter uses this method to
instantiate a Writer.
'''
return ADMXWriter(['win', 'win7'], config)
class ADMXWriter(xml_formatted_writer.XMLFormattedWriter,
gpo_editor_writer.GpoEditorWriter):
'''Class for generating an ADMX policy template. It is used by the
PolicyTemplateGenerator to write the admx file.
'''
# DOM root node of the generated ADMX document.
_doc = None
# The ADMX "policies" element that contains the ADMX "policy" elements that
# are generated.
_active_policies_elem = None
def Init(self):
# Shortcut to platform-specific ADMX/ADM specific configuration.
assert len(self.platforms) <= 2
self.winconfig = self.config['win_config'][self.platforms[0]]
def _AdmlString(self, name):
'''Creates a reference to the named string in an ADML file.
Args:
name: Name of the referenced ADML string.
'''
name = name.replace('.', '_')
return '$(string.' + name + ')'
def _AdmlStringExplain(self, name):
'''Creates a reference to the named explanation string in an ADML file.
Args:
name: Name of the referenced ADML explanation.
'''
name = name.replace('.', '_')
return '$(string.' + name + '_Explain)'
def _AdmlPresentation(self, name):
'''Creates a reference to the named presentation element in an ADML file.
Args:
name: Name of the referenced ADML presentation element.
'''
return '$(presentation.' + name + ')'
def _AddPolicyNamespaces(self, parent, prefix, namespace):
'''Generates the ADMX "policyNamespace" element and adds the elements to the
passed parent element. The namespace of the generated ADMX document is
define via the ADMX "target" element. Used namespaces are declared with an
ADMX "using" element. ADMX "target" and "using" elements are children of the
ADMX "policyNamespace" element.
Args:
parent: The parent node to which all generated elements are added.
prefix: A logical name that can be used in the generated ADMX document to
refere to this namespace.
namespace: Namespace of the generated ADMX document.
'''
policy_namespaces_elem = self.AddElement(parent, 'policyNamespaces')
attributes = {
'prefix': prefix,
'namespace': namespace,
}
self.AddElement(policy_namespaces_elem, 'target', attributes)
if 'admx_using_namespaces' in self.config:
prefix_namespace_map = self.config['admx_using_namespaces']
for prefix in prefix_namespace_map:
attributes = {
'prefix': prefix,
'namespace': prefix_namespace_map[prefix],
}
self.AddElement(policy_namespaces_elem, 'using', attributes)
attributes = {
'prefix': 'windows',
'namespace': 'Microsoft.Policies.Windows',
}
self.AddElement(policy_namespaces_elem, 'using', attributes)
def _AddCategory(self, parent, name, display_name, parent_category_name=None):
'''Adds an ADMX category element to the passed parent node. The following
snippet shows an example of a category element where "chromium" is the value
of the parameter name:
<category displayName="$(string.chromium)" name="chromium"/>
Each parent node can have only one category with a given name. Adding the
same category again with the same attributes is ignored, but adding it
again with different attributes is an error.
Args:
parent: The parent node to which all generated elements are added.
name: Name of the category.
display_name: Display name of the category.
parent_category_name: Name of the parent category. Defaults to None.
'''
existing = filter(lambda e: e.getAttribute('name') == name,
parent.getElementsByTagName('category'))
if existing:
assert len(existing) == 1
assert existing[0].getAttribute('name') == name
assert existing[0].getAttribute('displayName') == display_name
return
attributes = {
'name': name,
'displayName': display_name,
}
category_elem = self.AddElement(parent, 'category', attributes)
if parent_category_name:
attributes = {'ref': parent_category_name}
self.AddElement(category_elem, 'parentCategory', attributes)
def _AddCategories(self, categories):
'''Generates the ADMX "categories" element and adds it to the categories
main node. The "categories" element defines the category for the policies
defined in this ADMX document. Here is an example of an ADMX "categories"
element:
<categories>
<category displayName="$(string.googlechrome)" name="googlechrome">
<parentCategory ref="Google:Cat_Google"/>
</category>
</categories>
Args:
categories_path: The categories path e.g. ['google', 'googlechrome']. For
each level in the path a "category" element will be generated, unless
the level contains a ':', in which case it is treated as external
references and no element is generated. Except for the root level, each
level refers to its parent. Since the root level category has no parent
it does not require a parent reference.
'''
category_name = None
for category in categories:
parent_category_name = category_name
category_name = category
if (":" not in category_name):
self._AddCategory(self._categories_elem, category_name,
self._AdmlString(category_name), parent_category_name)
def _AddSupportedOn(self, parent, supported_os_list):
'''Generates the "supportedOn" ADMX element and adds it to the passed
parent node. The "supportedOn" element contains information about supported
Windows OS versions. The following code snippet contains an example of a
"supportedOn" element:
<supportedOn>
<definitions>
<definition name="$(supported_os)"
displayName="$(string.$(supported_os))"/>
</definitions>
...
</supportedOn>
Args:
parent: The parent element to which all generated elements are added.
supported_os: List with all supported Win OSes.
'''
supported_on_elem = self.AddElement(parent, 'supportedOn')
definitions_elem = self.AddElement(supported_on_elem, 'definitions')
for supported_os in supported_os_list:
attributes = {
'name': supported_os,
'displayName': self._AdmlString(supported_os)
}
self.AddElement(definitions_elem, 'definition', attributes)
def _AddStringPolicy(self, parent, name, id=None):
'''Generates ADMX elements for a String-Policy and adds them to the
passed parent node.
'''
attributes = {
'id': id or name,
'valueName': name,
'maxLength': '1000000',
}
self.AddElement(parent, 'text', attributes)
def _AddMultiStringPolicy(self, parent, name):
'''Generates ADMX elements for a multi-line String-Policy and adds them to
the passed parent node.
'''
# We currently also show a single-line textbox - see http://crbug/829328
self._AddStringPolicy(parent, name, id=name + '_Legacy')
attributes = {
'id': name,
'valueName': name,
'maxLength': '1000000',
}
self.AddElement(parent, 'multiText', attributes)
def _AddIntPolicy(self, parent, policy):
'''Generates ADMX elements for an Int-Policy and adds them to the passed
parent node.
'''
#default max value for an integer
max = 2000000000
min = 0
if self.PolicyHasRestrictions(policy):
schema = policy['schema']
if 'minimum' in schema and schema['minimum'] >= 0:
min = schema['minimum']
if 'maximum' in schema and schema['maximum'] >= 0:
max = schema['maximum']
assert type(min) == int
assert type(max) == int
attributes = {
'id': policy['name'],
'valueName': policy['name'],
'maxValue': str(max),
'minValue': str(min),
}
self.AddElement(parent, 'decimal', attributes)
def _AddEnumPolicy(self, parent, policy):
'''Generates ADMX elements for an Enum-Policy and adds them to the
passed parent element.
'''
name = policy['name']
items = policy['items']
attributes = {
'id': name,
'valueName': name,
}
enum_elem = self.AddElement(parent, 'enum', attributes)
for item in items:
attributes = {'displayName': self._AdmlString(name + "_" + item['name'])}
item_elem = self.AddElement(enum_elem, 'item', attributes)
value_elem = self.AddElement(item_elem, 'value')
value_string = str(item['value'])
if policy['type'] == 'int-enum':
self.AddElement(value_elem, 'decimal', {'value': value_string})
else:
self.AddElement(value_elem, 'string', {}, value_string)
def _AddListPolicy(self, parent, key, name):
'''Generates ADMX XML elements for a List-Policy and adds them to the
passed parent element.
'''
attributes = {
# The ID must be in sync with ID of the corresponding element in the
# ADML file.
'id': name + 'Desc',
'valuePrefix': '',
'key': key + '\\' + name,
}
self.AddElement(parent, 'list', attributes)
def _AddMainPolicy(self, parent):
'''Generates ADMX elements for a Main-Policy amd adds them to the
passed parent element.
'''
enabled_value_elem = self.AddElement(parent, 'enabledValue')
self.AddElement(enabled_value_elem, 'decimal', {'value': '1'})
disabled_value_elem = self.AddElement(parent, 'disabledValue')
self.AddElement(disabled_value_elem, 'decimal', {'value': '0'})
def PolicyHasRestrictions(self, policy):
if 'schema' in policy:
return any(keyword in policy['schema'] \
for keyword in ['minimum', 'maximum'])
return False
def _GetElements(self, policy_group_elem):
'''Returns the ADMX "elements" child from an ADMX "policy" element. If the
"policy" element has no "elements" child yet, a new child is created.
Args:
policy_group_elem: The ADMX "policy" element from which the child element
"elements" is returned.
Raises:
Exception: The policy_group_elem does not contain a ADMX "policy" element.
'''
if policy_group_elem.tagName != 'policy':
raise Exception('Expected a "policy" element but got a "%s" element' %
policy_group_elem.tagName)
elements_list = policy_group_elem.getElementsByTagName('elements')
if len(elements_list) == 0:
return self.AddElement(policy_group_elem, 'elements')
elif len(elements_list) == 1:
return elements_list[0]
else:
raise Exception('There is supposed to be only one "elements" node but'
' there are %s.' % str(len(elements_list)))
def _GetAdmxElementType(self, policy):
'''Returns the ADMX element type for a particular Policy.'''
return AdmxElementType.GetType(policy, allow_multi_strings=False)
def _WritePolicy(self, policy, name, key, parent):
'''Generates ADMX elements for a Policy.'''
policies_elem = self._active_policies_elem
policy_name = policy['name']
attributes = {
'name': name,
'class': self.GetClass(policy),
'displayName': self._AdmlString(policy_name),
'explainText': self._AdmlStringExplain(policy_name),
'presentation': self._AdmlPresentation(policy_name),
'key': key,
}
is_win7_only = self.IsPolicyOnWin7Only(policy)
supported_key = ('win_supported_os_win7'
if is_win7_only else 'win_supported_os')
supported_on_text = self.config[supported_key]
# Store the current "policy" AMDX element in self for later use by the
# WritePolicy method.
policy_elem = self.AddElement(policies_elem, 'policy', attributes)
self.AddElement(policy_elem, 'parentCategory', {'ref': parent})
self.AddElement(policy_elem, 'supportedOn', {'ref': supported_on_text})
element_type = self._GetAdmxElementType(policy)
if element_type == AdmxElementType.MAIN:
self.AddAttribute(policy_elem, 'valueName', policy_name)
self._AddMainPolicy(policy_elem)
elif element_type == AdmxElementType.STRING:
parent = self._GetElements(policy_elem)
self._AddStringPolicy(parent, policy_name)
elif element_type == AdmxElementType.MULTI_STRING:
parent = self._GetElements(policy_elem)
self._AddMultiStringPolicy(parent, policy_name)
elif element_type == AdmxElementType.INT:
parent = self._GetElements(policy_elem)
self._AddIntPolicy(parent, policy)
elif element_type == AdmxElementType.ENUM:
parent = self._GetElements(policy_elem)
self._AddEnumPolicy(parent, policy)
elif element_type == AdmxElementType.LIST:
parent = self._GetElements(policy_elem)
self._AddListPolicy(parent, key, policy_name)
elif element_type == AdmxElementType.GROUP:
pass
else:
raise Exception('Unknown element type %s.' % element_type)
def WritePolicy(self, policy):
if self.CanBeMandatory(policy):
self._WritePolicy(policy, policy['name'],
self.winconfig['reg_mandatory_key_name'],
self._active_mandatory_policy_group_name)
def WriteRecommendedPolicy(self, policy):
self._WritePolicy(policy, policy['name'] + '_recommended',
self.winconfig['reg_recommended_key_name'],
self._active_recommended_policy_group_name)
def _BeginPolicyGroup(self, group, name, parent):
'''Generates ADMX elements for a Policy-Group.
'''
attributes = {
'name': name,
'displayName': self._AdmlString(group['name'] + '_group'),
}
category_elem = self.AddElement(self._categories_elem, 'category',
attributes)
attributes = {'ref': parent}
self.AddElement(category_elem, 'parentCategory', attributes)
def BeginPolicyGroup(self, group):
self._BeginPolicyGroup(group, group['name'],
self.winconfig['mandatory_category_path'][-1])
self._active_mandatory_policy_group_name = group['name']
def EndPolicyGroup(self):
self._active_mandatory_policy_group_name = \
self.winconfig['mandatory_category_path'][-1]
def BeginRecommendedPolicyGroup(self, group):
self._BeginPolicyGroup(group, group['name'] + '_recommended',
self.winconfig['recommended_category_path'][-1])
self._active_recommended_policy_group_name = group['name'] + '_recommended'
def EndRecommendedPolicyGroup(self):
self._active_recommended_policy_group_name = \
self.winconfig['recommended_category_path'][-1]
def BeginTemplate(self):
'''Generates the skeleton of the ADMX template. An ADMX template contains
an ADMX "PolicyDefinitions" element with four child nodes: "policies"
"policyNamspaces", "resources", "supportedOn" and "categories"
'''
dom_impl = minidom.getDOMImplementation('')
self._doc = dom_impl.createDocument(None, 'policyDefinitions', None)
if self._GetChromiumVersionString() is not None:
self.AddComment(self._doc.documentElement, self.config['build'] + \
' version: ' + self._GetChromiumVersionString())
policy_definitions_elem = self._doc.documentElement
policy_definitions_elem.attributes['revision'] = '1.0'
policy_definitions_elem.attributes['schemaVersion'] = '1.0'
self._AddPolicyNamespaces(policy_definitions_elem,
self.config['admx_prefix'],
self.winconfig['namespace'])
self.AddElement(policy_definitions_elem, 'resources',
{'minRequiredRevision': '1.0'})
self._AddSupportedOn(
policy_definitions_elem,
[self.config['win_supported_os'], self.config['win_supported_os_win7']])
self._categories_elem = self.AddElement(policy_definitions_elem,
'categories')
self._AddCategories(self.winconfig['mandatory_category_path'])
self._AddCategories(self.winconfig['recommended_category_path'])
self._active_policies_elem = self.AddElement(policy_definitions_elem,
'policies')
self._active_mandatory_policy_group_name = \
self.winconfig['mandatory_category_path'][-1]
self._active_recommended_policy_group_name = \
self.winconfig['recommended_category_path'][-1]
def GetTemplateText(self):
return self.ToPrettyXml(self._doc)
def GetClass(self, policy):
return 'Both'
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pytest
import subprocess
import time
import ray
from ray.utils import _random_string
from ray.tests.utils import (run_and_get_output, run_string_as_driver,
run_string_as_driver_nonblocking)
def test_error_isolation(call_ray_start):
redis_address = call_ray_start
# Connect a driver to the Ray cluster.
ray.init(redis_address=redis_address)
# There shouldn't be any errors yet.
assert len(ray.errors()) == 0
error_string1 = "error_string1"
error_string2 = "error_string2"
@ray.remote
def f():
raise Exception(error_string1)
# Run a remote function that throws an error.
with pytest.raises(Exception):
ray.get(f.remote())
# Wait for the error to appear in Redis.
while len(ray.errors()) != 1:
time.sleep(0.1)
print("Waiting for error to appear.")
# Make sure we got the error.
assert len(ray.errors()) == 1
assert error_string1 in ray.errors()[0]["message"]
# Start another driver and make sure that it does not receive this
# error. Make the other driver throw an error, and make sure it
# receives that error.
driver_script = """
import ray
import time
ray.init(redis_address="{}")
time.sleep(1)
assert len(ray.errors()) == 0
@ray.remote
def f():
raise Exception("{}")
try:
ray.get(f.remote())
except Exception as e:
pass
while len(ray.errors()) != 1:
print(len(ray.errors()))
time.sleep(0.1)
assert len(ray.errors()) == 1
assert "{}" in ray.errors()[0]["message"]
print("success")
""".format(redis_address, error_string2, error_string2)
out = run_string_as_driver(driver_script)
# Make sure the other driver succeeded.
assert "success" in out
# Make sure that the other error message doesn't show up for this
# driver.
assert len(ray.errors()) == 1
assert error_string1 in ray.errors()[0]["message"]
def test_remote_function_isolation(call_ray_start):
# This test will run multiple remote functions with the same names in
# two different drivers. Connect a driver to the Ray cluster.
redis_address = call_ray_start
ray.init(redis_address=redis_address)
# Start another driver and make sure that it can define and call its
# own commands with the same names.
driver_script = """
import ray
import time
ray.init(redis_address="{}")
@ray.remote
def f():
return 3
@ray.remote
def g(x, y):
return 4
for _ in range(10000):
result = ray.get([f.remote(), g.remote(0, 0)])
assert result == [3, 4]
print("success")
""".format(redis_address)
out = run_string_as_driver(driver_script)
@ray.remote
def f():
return 1
@ray.remote
def g(x):
return 2
for _ in range(10000):
result = ray.get([f.remote(), g.remote(0)])
assert result == [1, 2]
# Make sure the other driver succeeded.
assert "success" in out
def test_driver_exiting_quickly(call_ray_start):
# This test will create some drivers that submit some tasks and then
# exit without waiting for the tasks to complete.
redis_address = call_ray_start
ray.init(redis_address=redis_address)
# Define a driver that creates an actor and exits.
driver_script1 = """
import ray
ray.init(redis_address="{}")
@ray.remote
class Foo(object):
def __init__(self):
pass
Foo.remote()
print("success")
""".format(redis_address)
# Define a driver that creates some tasks and exits.
driver_script2 = """
import ray
ray.init(redis_address="{}")
@ray.remote
def f():
return 1
f.remote()
print("success")
""".format(redis_address)
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
out = run_string_as_driver(driver_script1)
# Make sure the first driver ran to completion.
assert "success" in out
out = run_string_as_driver(driver_script2)
# Make sure the first driver ran to completion.
assert "success" in out
def test_receive_late_worker_logs():
# Make sure that log messages from tasks appear in the stdout even if the
# script exits quickly.
log_message = "some helpful debugging message"
# Define a driver that creates a task that prints something, ensures that
# the task runs, and then exits.
driver_script = """
import ray
import random
import time
log_message = "{}"
@ray.remote
class Actor(object):
def log(self):
print(log_message)
@ray.remote
def f():
print(log_message)
ray.init(num_cpus=2)
a = Actor.remote()
ray.get([a.log.remote(), f.remote()])
ray.get([a.log.remote(), f.remote()])
""".format(log_message)
for _ in range(2):
out = run_string_as_driver(driver_script)
assert out.count(log_message) == 4
@pytest.mark.parametrize(
"call_ray_start", ["ray start --head --num-cpus=1 --num-gpus=1"],
indirect=True)
def test_drivers_release_resources(call_ray_start):
redis_address = call_ray_start
# Define a driver that creates an actor and exits.
driver_script1 = """
import time
import ray
ray.init(redis_address="{}")
@ray.remote
def f(duration):
time.sleep(duration)
@ray.remote(num_gpus=1)
def g(duration):
time.sleep(duration)
@ray.remote(num_gpus=1)
class Foo(object):
def __init__(self):
pass
# Make sure some resources are available for us to run tasks.
ray.get(f.remote(0))
ray.get(g.remote(0))
# Start a bunch of actors and tasks that use resources. These should all be
# cleaned up when this driver exits.
foos = [Foo.remote() for _ in range(100)]
[f.remote(10 ** 6) for _ in range(100)]
print("success")
""".format(redis_address)
driver_script2 = (driver_script1 +
"import sys\nsys.stdout.flush()\ntime.sleep(10 ** 6)\n")
def wait_for_success_output(process_handle, timeout=10):
# Wait until the process prints "success" and then return.
start_time = time.time()
while time.time() - start_time < timeout:
output_line = ray.utils.decode(
process_handle.stdout.readline()).strip()
print(output_line)
if output_line == "success":
return
raise Exception("Timed out waiting for process to print success.")
# Make sure we can run this driver repeatedly, which means that resources
# are getting released in between.
for _ in range(5):
out = run_string_as_driver(driver_script1)
# Make sure the first driver ran to completion.
assert "success" in out
# Also make sure that this works when the driver exits ungracefully.
process_handle = run_string_as_driver_nonblocking(driver_script2)
wait_for_success_output(process_handle)
# Kill the process ungracefully.
process_handle.kill()
def test_calling_start_ray_head():
# Test that we can call start-ray.sh with various command line
# parameters. TODO(rkn): This test only tests the --head code path. We
# should also test the non-head node code path.
# Test starting Ray with no arguments.
run_and_get_output(["ray", "start", "--head"])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with a redis port specified.
run_and_get_output(["ray", "start", "--head", "--redis-port", "6379"])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with a node IP address specified.
run_and_get_output(
["ray", "start", "--head", "--node-ip-address", "127.0.0.1"])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with the object manager and node manager ports
# specified.
run_and_get_output([
"ray", "start", "--head", "--object-manager-port", "12345",
"--node-manager-port", "54321"
])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with the number of CPUs specified.
run_and_get_output(["ray", "start", "--head", "--num-cpus", "2"])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with the number of GPUs specified.
run_and_get_output(["ray", "start", "--head", "--num-gpus", "100"])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with the max redis clients specified.
run_and_get_output(
["ray", "start", "--head", "--redis-max-clients", "100"])
subprocess.Popen(["ray", "stop"]).wait()
if "RAY_USE_NEW_GCS" not in os.environ:
# Test starting Ray with redis shard ports specified.
run_and_get_output([
"ray", "start", "--head", "--redis-shard-ports", "6380,6381,6382"
])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with all arguments specified.
run_and_get_output([
"ray", "start", "--head", "--redis-port", "6379",
"--redis-shard-ports", "6380,6381,6382", "--object-manager-port",
"12345", "--num-cpus", "2", "--num-gpus", "0",
"--redis-max-clients", "100", "--resources", "{\"Custom\": 1}"
])
subprocess.Popen(["ray", "stop"]).wait()
# Test starting Ray with invalid arguments.
with pytest.raises(Exception):
run_and_get_output(
["ray", "start", "--head", "--redis-address", "127.0.0.1:6379"])
subprocess.Popen(["ray", "stop"]).wait()
@pytest.mark.parametrize(
"call_ray_start", [
"ray start --head --num-cpus=1 " +
"--node-ip-address=localhost --redis-port=6379"
],
indirect=True)
def test_using_hostnames(call_ray_start):
ray.init(node_ip_address="localhost", redis_address="localhost:6379")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_connecting_in_local_case(ray_start_regular):
address_info = ray_start_regular
# Define a driver that just connects to Redis.
driver_script = """
import ray
ray.init(redis_address="{}")
print("success")
""".format(address_info["redis_address"])
out = run_string_as_driver(driver_script)
# Make sure the other driver succeeded.
assert "success" in out
def test_run_driver_twice(ray_start_regular):
# We used to have issue 2165 and 2288:
# https://github.com/ray-project/ray/issues/2165
# https://github.com/ray-project/ray/issues/2288
# both complain that driver will hang when run for the second time.
# This test is used to verify the fix for above issue, it will run the
# same driver for twice and verify whether both of them succeed.
address_info = ray_start_regular
driver_script = """
import ray
import ray.tune as tune
import os
import time
def train_func(config, reporter): # add a reporter arg
for i in range(2):
time.sleep(0.1)
reporter(timesteps_total=i, mean_accuracy=i+97) # report metrics
os.environ["TUNE_RESUME_PROMPT_OFF"] = "True"
ray.init(redis_address="{}")
ray.tune.register_trainable("train_func", train_func)
tune.run_experiments({{
"my_experiment": {{
"run": "train_func",
"stop": {{"mean_accuracy": 99}},
"config": {{
"layer1": {{
"class_name": tune.grid_search(["a"]),
"config": {{"lr": tune.grid_search([1, 2])}}
}},
}},
"local_dir": os.path.expanduser("~/tmp")
}}
}})
print("success")
""".format(address_info["redis_address"])
for i in range(2):
out = run_string_as_driver(driver_script)
assert "success" in out
def test_driver_exiting_when_worker_blocked(call_ray_start):
# This test will create some drivers that submit some tasks and then
# exit without waiting for the tasks to complete.
redis_address = call_ray_start
ray.init(redis_address=redis_address)
# Define a driver that creates two tasks, one that runs forever and the
# other blocked on the first.
driver_script = """
import time
import ray
ray.init(redis_address="{}")
@ray.remote
def f():
time.sleep(10**6)
@ray.remote
def g():
ray.get(f.remote())
g.remote()
time.sleep(1)
print("success")
""".format(redis_address)
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
out = run_string_as_driver(driver_script)
# Make sure the first driver ran to completion.
assert "success" in out
nonexistent_id_bytes = _random_string()
nonexistent_id_hex = ray.utils.binary_to_hex(nonexistent_id_bytes)
# Define a driver that creates one task that depends on a nonexistent
# object. This task will be queued as waiting to execute.
driver_script = """
import time
import ray
ray.init(redis_address="{}")
@ray.remote
def g(x):
return
g.remote(ray.ObjectID(ray.utils.hex_to_binary("{}")))
time.sleep(1)
print("success")
""".format(redis_address, nonexistent_id_hex)
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
out = run_string_as_driver(driver_script)
# Simulate the nonexistent dependency becoming available.
ray.worker.global_worker.put_object(
ray.ObjectID(nonexistent_id_bytes), None)
# Make sure the first driver ran to completion.
assert "success" in out
@ray.remote
def f():
return 1
# Make sure we can still talk with the raylet.
ray.get(f.remote())
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseReshapeTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_2x3x4(self):
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1],
[1, 1, 3], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6())
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reshape(sp_input, shape=(1, 5, 2, 3))
self.assertAllEqual((1, 5, 2, 3), sp_output.get_shape())
def testSameShape(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShape(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [5, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testWorksWellWithTfShape(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
shape = array_ops.shape(sp_input) # tf.shape generates int32 output
sp_output = sparse_ops.sparse_reshape(sp_input, shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShapeWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [-1, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedNewShapeSameRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, 10])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testFeedNewShapeSameRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testUpRank(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [2, 3, 5])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, -1, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedDownRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, 4])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedDownRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedMultipleInferredDims(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1, -1])
with self.assertRaisesOpError("only one output dimension may be -1"):
sess.run(sp_output, {sp_input: input_val})
def testProvideStaticallyMismatchedSizes(self):
input_val = self._SparseTensorValue_5x6()
sp_input = sparse_tensor.SparseTensor.from_value(input_val)
with self.assertRaisesRegexp(ValueError, "Cannot reshape"):
sparse_ops.sparse_reshape(sp_input, [4, 7])
def testFeedMismatchedSizes(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, 7])
with self.assertRaisesOpError(
"Input to reshape is a tensor with 30 dense values"):
sess.run(sp_output, {sp_input: input_val})
def testFeedMismatchedSizesWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1])
with self.assertRaisesOpError("requested shape requires a multiple"):
sess.run(sp_output, {sp_input: input_val})
def testFeedPartialShapes(self):
with self.test_session(use_gpu=False):
# Incorporate new rank into shape information if known
sp_input = self._SparseTensorPlaceholder()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Incorporate known shape information about input indices in output
# indices
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Even if new_shape has no shape information, we know the ranks of
# output indices and shape
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
new_shape = array_ops.placeholder(dtypes.int64)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [None])
def testFeedDenseReshapeSemantics(self):
with self.test_session(use_gpu=False) as sess:
# Compute a random rank-5 initial shape and new shape, randomly sparsify
# it, and check that the output of SparseReshape has the same semantics
# as a dense reshape.
factors = np.array([2] * 4 + [3] * 4 + [5] * 4) # 810k total elements
orig_rank = np.random.randint(2, 7)
orig_map = np.random.randint(orig_rank, size=factors.shape)
orig_shape = [np.prod(factors[orig_map == d]) for d in range(orig_rank)]
new_rank = np.random.randint(2, 7)
new_map = np.random.randint(new_rank, size=factors.shape)
new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)]
orig_dense = np.random.uniform(size=orig_shape)
orig_indices = np.transpose(np.nonzero(orig_dense < 0.5))
orig_values = orig_dense[orig_dense < 0.5]
new_dense = np.reshape(orig_dense, new_shape)
new_indices = np.transpose(np.nonzero(new_dense < 0.5))
new_values = new_dense[new_dense < 0.5]
sp_input = self._SparseTensorPlaceholder()
input_val = sparse_tensor.SparseTensorValue(orig_indices, orig_values,
orig_shape)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, new_indices)
self.assertAllEqual(output_val.values, new_values)
self.assertAllEqual(output_val.dense_shape, new_shape)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2012 Vaibhav Bajpai
# Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import getpass
import os
import re
import six
import sys
import socket
import threading
from binascii import hexlify
try:
import selectors
except ImportError:
import selectors2 as selectors
from ncclient.capabilities import Capabilities
from ncclient.logging_ import SessionLoggerAdapter
import paramiko
from ncclient.transport.errors import AuthenticationError, SessionCloseError, SSHError, SSHUnknownHostError, NetconfFramingError
from ncclient.transport.session import Session
from ncclient.transport.session import NetconfBase
import logging
logger = logging.getLogger("ncclient.transport.ssh")
PORT_NETCONF_DEFAULT = 830
PORT_SSH_DEFAULT = 22
BUF_SIZE = 4096
# v1.0: RFC 4742
MSG_DELIM = "]]>]]>"
MSG_DELIM_LEN = len(MSG_DELIM)
# v1.1: RFC 6242
END_DELIM = '\n##\n'
TICK = 0.1
#
# Define delimiters for chunks and messages for netconf 1.1 chunk enoding.
# When matched:
#
# * result.group(0) will contain whole matched string
# * result.group(1) will contain the digit string for a chunk
# * result.group(2) will be defined if '##' found
#
RE_NC11_DELIM = re.compile(br'\n(?:#([0-9]+)|(##))\n')
def default_unknown_host_cb(host, fingerprint):
"""An unknown host callback returns `True` if it finds the key acceptable, and `False` if not.
This default callback always returns `False`, which would lead to :meth:`connect` raising a :exc:`SSHUnknownHost` exception.
Supply another valid callback if you need to verify the host key programmatically.
*host* is the hostname that needs to be verified
*fingerprint* is a hex string representing the host key fingerprint, colon-delimited e.g. `"4b:69:6c:72:6f:79:20:77:61:73:20:68:65:72:65:21"`
"""
return False
def _colonify(fp):
fp = fp.decode('UTF-8')
finga = fp[:2]
for idx in range(2, len(fp), 2):
finga += ":" + fp[idx:idx+2]
return finga
if sys.version < '3':
def textify(buf):
return buf
else:
def textify(buf):
return buf.decode('UTF-8')
if sys.version < '3':
from six import StringIO
else:
from io import BytesIO as StringIO
class SSHSession(Session):
"Implements a :rfc:`4742` NETCONF session over SSH."
def __init__(self, device_handler):
capabilities = Capabilities(device_handler.get_capabilities())
Session.__init__(self, capabilities)
self._host = None
self._host_keys = paramiko.HostKeys()
self._transport = None
self._connected = False
self._channel = None
self._channel_id = None
self._channel_name = None
self._buffer = StringIO()
# parsing-related, see _parse()
self._device_handler = device_handler
self._parsing_state10 = 0
self._parsing_pos10 = 0
self._parsing_pos11 = 0
self._parsing_state11 = 0
self._expchunksize = 0
self._curchunksize = 0
self._inendpos = 0
self._size_num_list = []
self._message_list = []
self._closing = threading.Event()
self.logger = SessionLoggerAdapter(logger, {'session': self})
def _dispatch_message(self, raw):
self.logger.info("Received:\n%s", raw)
return super(SSHSession, self)._dispatch_message(raw)
def _parse(self):
"Messages ae delimited by MSG_DELIM. The buffer could have grown by a maximum of BUF_SIZE bytes everytime this method is called. Retains state across method calls and if a byte has been read it will not be considered again."
return self._parse10()
def _parse10(self):
"""Messages are delimited by MSG_DELIM. The buffer could have grown by
a maximum of BUF_SIZE bytes everytime this method is called. Retains
state across method calls and if a chunk has been read it will not be
considered again."""
self.logger.debug("parsing netconf v1.0")
buf = self._buffer
buf.seek(self._parsing_pos10)
if MSG_DELIM in buf.read().decode('UTF-8'):
buf.seek(0)
msg, _, remaining = buf.read().decode('UTF-8').partition(MSG_DELIM)
msg = msg.strip()
if sys.version < '3':
self._dispatch_message(msg.encode())
else:
self._dispatch_message(msg)
# create new buffer which contains remaining of old buffer
self._buffer = StringIO()
self._buffer.write(remaining.encode())
self._parsing_pos10 = 0
if len(remaining) > 0:
# There could be another entire message in the
# buffer, so we should try to parse again.
self.logger.debug('Trying another round of parsing since there is still data')
self._parse10()
else:
# handle case that MSG_DELIM is split over two chunks
self._parsing_pos10 = buf.tell() - MSG_DELIM_LEN
if self._parsing_pos10 < 0:
self._parsing_pos10 = 0
def _parse11(self):
"""Messages are split into chunks. Chunks and messages are delimited
by the regex #RE_NC11_DELIM defined earlier in this file. Each
time we get called here either a chunk delimiter or an
end-of-message delimiter should be found iff there is enough
data. If there is not enough data, we will wait for more. If a
delimiter is found in the wrong place, a #NetconfFramingError
will be raised."""
self.logger.debug("_parse11: starting")
# suck in whole string that we have (this is what we will work on in
# this function) and initialize a couple of useful values
self._buffer.seek(0, os.SEEK_SET)
data = self._buffer.getvalue()
data_len = len(data)
start = 0
self.logger.debug('_parse11: working with buffer of %d bytes', data_len)
while True and start < data_len:
# match to see if we found at least some kind of delimiter
self.logger.debug('_parse11: matching from %d bytes from start of buffer', start)
re_result = RE_NC11_DELIM.match(data[start:])
if not re_result:
# not found any kind of delimiter just break; this should only
# ever happen if we just have the first few characters of a
# message such that we don't yet have a full delimiter
self.logger.debug('_parse11: no delimiter found, buffer="%s"', data[start:].decode())
break
# save useful variables for reuse
re_start = re_result.start()
re_end = re_result.end()
self.logger.debug('_parse11: regular expression start=%d, end=%d', re_start, re_end)
# If the regex doesn't start at the beginning of the buffer,
# we're in trouble, so throw an error
if re_start != 0:
raise NetconfFramingError('_parse11: delimiter not at start of match buffer', data[start:])
if re_result.group(2):
# we've found the end of the message, need to form up
# whole message, save back remainder (if any) to buffer
# and dispatch the message
start += re_end
message = ''.join(self._message_list)
self._message_list = []
self.logger.debug('_parse11: found end of message delimiter')
self._dispatch_message(message)
break
elif re_result.group(1):
# we've found a chunk delimiter, and group(2) is the digit
# string that will tell us how many bytes past the end of
# where it was found that we need to have available to
# save the next chunk off
self.logger.debug('_parse11: found chunk delimiter')
digits = int(re_result.group(1))
self.logger.debug('_parse11: chunk size %d bytes', digits)
if (data_len-start) >= (re_end + digits):
# we have enough data for the chunk
fragment = textify(data[start+re_end:start+re_end+digits])
self._message_list.append(fragment)
start += re_end + digits
self.logger.debug('_parse11: appending %d bytes', digits)
self.logger.debug('_parse11: fragment = "%s"', fragment)
else:
# we don't have enough bytes, just break out for now
# after updating start pointer to start of new chunk
start += re_start
self.logger.debug('_parse11: not enough data for chunk yet')
self.logger.debug('_parse11: setting start to %d', start)
break
# Now out of the loop, need to see if we need to save back any content
if start > 0:
self.logger.debug(
'_parse11: saving back rest of message after %d bytes, original size %d',
start, data_len)
self._buffer = StringIO(data[start:])
if start < data_len:
self.logger.debug('_parse11: still have data, may have another full message!')
self._parse11()
self.logger.debug('_parse11: ending')
def load_known_hosts(self, filename=None):
"""Load host keys from an openssh :file:`known_hosts`-style file. Can
be called multiple times.
If *filename* is not specified, looks in the default locations i.e. :file:`~/.ssh/known_hosts` and :file:`~/ssh/known_hosts` for Windows.
"""
if filename is None:
filename = os.path.expanduser('~/.ssh/known_hosts')
try:
self._host_keys.load(filename)
except IOError:
# for windows
filename = os.path.expanduser('~/ssh/known_hosts')
try:
self._host_keys.load(filename)
except IOError:
pass
else:
self._host_keys.load(filename)
def close(self):
self._closing.set()
if self._transport.is_active():
self._transport.close()
# Wait for the transport thread to close.
while self.is_alive() and (self is not threading.current_thread()):
self.join(10)
if self._channel:
self._channel.close()
self._channel = None
self._connected = False
# REMEMBER to update transport.rst if sig. changes, since it is hardcoded there
def connect(
self,
host,
port = PORT_NETCONF_DEFAULT,
timeout = None,
unknown_host_cb = default_unknown_host_cb,
username = None,
password = None,
key_filename = None,
allow_agent = True,
hostkey_verify = True,
hostkey_b64 = None,
look_for_keys = True,
ssh_config = None,
sock_fd = None,
bind_addr = None):
"""Connect via SSH and initialize the NETCONF session. First attempts the publickey authentication method and then password authentication.
To disable attempting publickey authentication altogether, call with *allow_agent* and *look_for_keys* as `False`.
*host* is the hostname or IP address to connect to
*port* is by default 830 (PORT_NETCONF_DEFAULT), but some devices use the default SSH port of 22 (PORT_SSH_DEFAULT) so this may need to be specified
*timeout* is an optional timeout for socket connect
*unknown_host_cb* is called when the server host key is not recognized. It takes two arguments, the hostname and the fingerprint (see the signature of :func:`default_unknown_host_cb`)
*username* is the username to use for SSH authentication
*password* is the password used if using password authentication, or the passphrase to use for unlocking keys that require it
*key_filename* is a filename where a the private key to be used can be found
*allow_agent* enables querying SSH agent (if found) for keys
*hostkey_verify* enables hostkey verification from ~/.ssh/known_hosts
*hostkey_b64* only connect when server presents a public hostkey matching this (obtain from server /etc/ssh/ssh_host_*pub or ssh-keyscan)
*look_for_keys* enables looking in the usual locations for ssh keys (e.g. :file:`~/.ssh/id_*`)
*ssh_config* enables parsing of an OpenSSH configuration file, if set to its path, e.g. :file:`~/.ssh/config` or to True (in this case, use :file:`~/.ssh/config`).
*sock_fd* is an already open socket which shall be used for this connection. Useful for NETCONF outbound ssh. Use host=None together with a valid sock_fd number
*bind_addr* is a (local) source IP address to use, must be reachable from the remote device.
"""
if not (host or sock_fd):
raise SSHError("Missing host or socket fd")
self._host = host
# Optionally, parse .ssh/config
config = {}
if ssh_config is True:
ssh_config = "~/.ssh/config" if sys.platform != "win32" else "~/ssh/config"
if ssh_config is not None:
config = paramiko.SSHConfig()
with open(os.path.expanduser(ssh_config)) as ssh_config_file_obj:
config.parse(ssh_config_file_obj)
# Save default Paramiko SSH port so it can be reverted
paramiko_default_ssh_port = paramiko.config.SSH_PORT
# Change the default SSH port to the port specified by the user so expand_variables
# replaces %p with the passed in port rather than 22 (the defauld paramiko.config.SSH_PORT)
paramiko.config.SSH_PORT = port
config = config.lookup(host)
# paramiko.config.SSHconfig::expand_variables is called by lookup so we can set the SSH port
# back to the default
paramiko.config.SSH_PORT = paramiko_default_ssh_port
host = config.get("hostname", host)
if username is None:
username = config.get("user")
if key_filename is None:
key_filename = config.get("identityfile")
if hostkey_verify:
userknownhostsfile = config.get("userknownhostsfile")
if userknownhostsfile:
self.load_known_hosts(os.path.expanduser(userknownhostsfile))
if timeout is None:
timeout = config.get("connecttimeout")
if timeout:
timeout = int(timeout)
if username is None:
username = getpass.getuser()
if sock_fd is None:
proxycommand = config.get("proxycommand")
if proxycommand:
self.logger.debug("Configuring Proxy. %s", proxycommand)
if not isinstance(proxycommand, six.string_types):
proxycommand = [os.path.expanduser(elem) for elem in proxycommand]
else:
proxycommand = os.path.expanduser(proxycommand)
sock = paramiko.proxy.ProxyCommand(proxycommand)
else:
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
except socket.error:
continue
try:
if bind_addr:
sock.bind((bind_addr, 0))
sock.connect(sa)
except socket.error:
sock.close()
continue
break
else:
raise SSHError("Could not open socket to %s:%s" % (host, port))
else:
if sys.version_info[0] < 3:
s = socket.fromfd(int(sock_fd), socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, _sock=s)
else:
sock = socket.fromfd(int(sock_fd), socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
self._transport = paramiko.Transport(sock)
self._transport.set_log_channel(logger.name)
if config.get("compression") == 'yes':
self._transport.use_compression()
if hostkey_b64:
# If we need to connect with a specific hostkey, negotiate for only its type
hostkey_obj = None
for key_cls in [paramiko.DSSKey, paramiko.Ed25519Key, paramiko.RSAKey, paramiko.ECDSAKey]:
try:
hostkey_obj = key_cls(data=base64.b64decode(hostkey_b64))
except paramiko.SSHException:
# Not a key of this type - try the next
pass
if not hostkey_obj:
# We've tried all known host key types and haven't found a suitable one to use - bail
raise SSHError("Couldn't find suitable paramiko key class for host key %s" % hostkey_b64)
self._transport._preferred_keys = [hostkey_obj.get_name()]
elif self._host_keys:
# Else set preferred host keys to those we possess for the host
# (avoids situation where known_hosts contains a valid key for the host, but that key type is not selected during negotiation)
if port == PORT_SSH_DEFAULT:
known_hosts_lookup = host
else:
known_hosts_lookup = '[%s]:%s' % (host, port)
known_host_keys_for_this_host = self._host_keys.lookup(known_hosts_lookup)
if known_host_keys_for_this_host:
self._transport._preferred_keys = [x.key.get_name() for x in known_host_keys_for_this_host._entries]
# Connect
try:
self._transport.start_client()
except paramiko.SSHException as e:
raise SSHError('Negotiation failed: %s' % e)
server_key_obj = self._transport.get_remote_server_key()
fingerprint = _colonify(hexlify(server_key_obj.get_fingerprint()))
if hostkey_verify:
is_known_host = False
# For looking up entries for nonstandard (22) ssh ports in known_hosts
# we enclose host in brackets and append port number
if port == PORT_SSH_DEFAULT:
known_hosts_lookup = host
else:
known_hosts_lookup = '[%s]:%s' % (host, port)
if hostkey_b64:
# If hostkey specified, remote host /must/ use that hostkey
if(hostkey_obj.get_name() == server_key_obj.get_name() and hostkey_obj.asbytes() == server_key_obj.asbytes()):
is_known_host = True
else:
# Check known_hosts
is_known_host = self._host_keys.check(known_hosts_lookup, server_key_obj)
if not is_known_host and not unknown_host_cb(host, fingerprint):
raise SSHUnknownHostError(known_hosts_lookup, fingerprint)
# Authenticating with our private key/identity
if key_filename is None:
key_filenames = []
elif isinstance(key_filename, (str, bytes)):
key_filenames = [key_filename]
else:
key_filenames = key_filename
self._auth(username, password, key_filenames, allow_agent, look_for_keys)
self._connected = True # there was no error authenticating
self._closing.clear()
# TODO: leopoul: Review, test, and if needed rewrite this part
subsystem_names = self._device_handler.get_ssh_subsystem_names()
for subname in subsystem_names:
self._channel = self._transport.open_session()
self._channel_id = self._channel.get_id()
channel_name = "%s-subsystem-%s" % (subname, str(self._channel_id))
self._channel.set_name(channel_name)
try:
self._channel.invoke_subsystem(subname)
except paramiko.SSHException as e:
self.logger.info("%s (subsystem request rejected)", e)
handle_exception = self._device_handler.handle_connection_exceptions(self)
# Ignore the exception, since we continue to try the different
# subsystem names until we find one that can connect.
# have to handle exception for each vendor here
if not handle_exception:
continue
self._channel_name = self._channel.get_name()
self._post_connect()
return
raise SSHError("Could not open connection, possibly due to unacceptable"
" SSH subsystem name.")
def _auth(self, username, password, key_filenames, allow_agent,
look_for_keys):
saved_exception = None
for key_filename in key_filenames:
for cls in (paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey):
try:
key = cls.from_private_key_file(key_filename, password)
self.logger.debug("Trying key %s from %s",
hexlify(key.get_fingerprint()),
key_filename)
self._transport.auth_publickey(username, key)
return
except Exception as e:
saved_exception = e
self.logger.debug(e)
if allow_agent:
for key in paramiko.Agent().get_keys():
try:
self.logger.debug("Trying SSH agent key %s",
hexlify(key.get_fingerprint()))
self._transport.auth_publickey(username, key)
return
except Exception as e:
saved_exception = e
self.logger.debug(e)
keyfiles = []
if look_for_keys:
rsa_key = os.path.expanduser("~/.ssh/id_rsa")
dsa_key = os.path.expanduser("~/.ssh/id_dsa")
ecdsa_key = os.path.expanduser("~/.ssh/id_ecdsa")
if os.path.isfile(rsa_key):
keyfiles.append((paramiko.RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((paramiko.DSSKey, dsa_key))
if os.path.isfile(ecdsa_key):
keyfiles.append((paramiko.ECDSAKey, ecdsa_key))
# look in ~/ssh/ for windows users:
rsa_key = os.path.expanduser("~/ssh/id_rsa")
dsa_key = os.path.expanduser("~/ssh/id_dsa")
ecdsa_key = os.path.expanduser("~/ssh/id_ecdsa")
if os.path.isfile(rsa_key):
keyfiles.append((paramiko.RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((paramiko.DSSKey, dsa_key))
if os.path.isfile(ecdsa_key):
keyfiles.append((paramiko.ECDSAKey, ecdsa_key))
for cls, filename in keyfiles:
try:
key = cls.from_private_key_file(filename, password)
self.logger.debug("Trying discovered key %s in %s",
hexlify(key.get_fingerprint()), filename)
self._transport.auth_publickey(username, key)
return
except Exception as e:
saved_exception = e
self.logger.debug(e)
if password is not None:
try:
self._transport.auth_password(username, password)
return
except Exception as e:
saved_exception = e
self.logger.debug(e)
if saved_exception is not None:
# need pep-3134 to do this right
raise AuthenticationError(repr(saved_exception))
raise AuthenticationError("No authentication methods available")
def run(self):
chan = self._channel
q = self._q
def start_delim(data_len): return '\n#%s\n' % (data_len)
try:
s = selectors.DefaultSelector()
s.register(chan, selectors.EVENT_READ)
self.logger.debug('selector type = %s', s.__class__.__name__)
while True:
# Will wakeup evey TICK seconds to check if something
# to send, more quickly if something to read (due to
# select returning chan in readable list).
events = s.select(timeout=TICK)
if events:
data = chan.recv(BUF_SIZE)
if data:
self._buffer.seek(0, os.SEEK_END)
self._buffer.write(data)
if self._base == NetconfBase.BASE_11:
self._parse11()
else:
self._parse10()
elif self._closing.is_set():
# End of session, expected
break
else:
# End of session, unexpected
raise SessionCloseError(self._buffer.getvalue())
if not q.empty() and chan.send_ready():
self.logger.debug("Sending message")
data = q.get()
if self._base == NetconfBase.BASE_11:
data = "%s%s%s" % (start_delim(len(data)), data, END_DELIM)
else:
data = "%s%s" % (data, MSG_DELIM)
self.logger.info("Sending:\n%s", data)
while data:
n = chan.send(data)
if n <= 0:
raise SessionCloseError(self._buffer.getvalue(), data)
data = data[n:]
except Exception as e:
self.logger.debug("Broke out of main loop, error=%r", e)
self._dispatch_error(e)
self.close()
@property
def host(self):
"""Host this session is connected to, or None if not connected."""
if hasattr(self, '_host'):
return self._host
return None
@property
def transport(self):
"Underlying `paramiko.Transport <http://www.lag.net/paramiko/docs/paramiko.Transport-class.html>`_ object. This makes it possible to call methods like :meth:`~paramiko.Transport.set_keepalive` on it."
return self._transport
|
|
import sys
import time
import unittest
import mock
from promstats import Stats, _PY_35
version_info = sys.version_info
def _patch_url_opener(test_case):
"""A shortcut for patching URL opener on different Py versions."""
if version_info.major >= 3:
patch_module = 'urllib.request.OpenerDirector.open'
else:
patch_module = 'urllib2.OpenerDirector.open'
return mock.patch(patch_module)(test_case)
class StatsTestCase(unittest.TestCase):
def _assertMetricsCollected(self, metric_instance, etalon_data):
"""Verify the collected metrics against etalon data."""
collected = metric_instance.collect()[0]
collected_samples = collected.samples
for metric_value in etalon_data:
self.assertIn(metric_value, collected_samples)
def test_increment(self):
stats = Stats('greasyspoon')
stats.increment('bacon', tags=['spam:eggs', 'scrambled_eggs:ham'])
bacon_metric = stats.metrics.get('bacon')
self.assertEqual(bacon_metric._type, 'counter')
self.assertSetEqual(
set(bacon_metric._labelnames), {'scrambled_eggs', 'spam'})
self._assertMetricsCollected(
bacon_metric,
[('bacon', {'scrambled_eggs': 'ham', 'spam': 'eggs'}, 1.0)]
)
def test_increment_value(self):
stats = Stats('greasyspoon')
stats.increment(
'bacon', value=9, tags=['spam:eggs', 'scrambled_eggs:ham'])
bacon_metric = stats.metrics.get('bacon')
self.assertEqual(bacon_metric._type, 'counter')
self.assertSetEqual(
set(bacon_metric._labelnames), {'scrambled_eggs', 'spam'})
self._assertMetricsCollected(
bacon_metric,
[('bacon', {'scrambled_eggs': 'ham', 'spam': 'eggs'}, 9.0)]
)
def test_increment_wrong_tag(self):
stats = Stats('greasyspoon')
with mock.patch('promstats._util.logger.warning') as warn:
stats.increment('bacon', ['spam:eggs', 'a'])
warn.assert_called_with(
'Your tag %s is not in proper format, skipping it.'
' It should be a semicolon-delimited string,'
' representing a key-value pair.',
'a',
)
def test_gauge(self):
stats = Stats('greasyspoon')
stats.gauge(
'bacon', 2.71828, ['spam:eggs', 'scrambled_eggs:ham'])
stats.gauge(
'bacon', 3.14159, ['spam:sausage', 'scrambled_eggs:eggs'])
bacon_metric = stats.metrics.get('bacon')
self.assertEqual(bacon_metric._type, 'gauge')
self.assertSetEqual(
set(bacon_metric._labelnames), {'scrambled_eggs', 'spam'})
self._assertMetricsCollected(
bacon_metric,
[
(
'bacon',
{'scrambled_eggs': 'ham', 'spam': 'eggs'}, 2.71828),
(
'bacon',
{'scrambled_eggs': 'eggs', 'spam': 'sausage'}, 3.14159),
],
)
def test_gauge_wo_tags(self):
stats = Stats('greasyspoon')
stats.gauge('bacon', 2.71828)
bacon_metric = stats.metrics.get('bacon')
self._assertMetricsCollected(bacon_metric, [('bacon', {}, 2.71828)])
def test_summary(self):
stats = Stats('greasyspoon')
stats.summary(
'bacon',
2.71828,
['spam:eggs', 'scrambled_eggs:ham'],
)
stats.summary(
'bacon',
3.14159,
['spam:sausage', 'scrambled_eggs:eggs'],
)
bacon_metric = stats.metrics.get('bacon')
self.assertEqual(bacon_metric._type, 'summary')
self.assertSetEqual(
set(bacon_metric._labelnames), {'scrambled_eggs', 'spam'})
self._assertMetricsCollected(
bacon_metric,
[
(
'bacon_count',
{'scrambled_eggs': 'eggs', 'spam': 'sausage'},
1.0,
),
(
'bacon_sum',
{'scrambled_eggs': 'eggs', 'spam': 'sausage'},
3.14159,
),
(
'bacon_count',
{'scrambled_eggs': 'ham', 'spam': 'eggs'},
1.0,
),
(
'bacon_sum',
{'scrambled_eggs': 'ham', 'spam': 'eggs'},
2.71828,
),
],
)
def test_histogram(self):
stats = Stats('greasyspoon')
stats.histogram(
'bacon', 2.71828, ['spam:eggs', 'scrambled_eggs:ham'])
stats.histogram(
'bacon', 3.14159, ['spam:sausage', 'scrambled_eggs:eggs'])
bacon_metric = stats.metrics.get('bacon')
self.assertEqual(
bacon_metric._type, 'histogram')
self.assertSetEqual(
set(bacon_metric._labelnames), {'scrambled_eggs', 'spam'})
self._assertMetricsCollected(
bacon_metric,
[
('bacon_bucket',
{'le': '0.005', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.01', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.025', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.05', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.075', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.1', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.25', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.5', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '0.75', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '1.0', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '2.5', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
0.0),
('bacon_bucket',
{'le': '5.0', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
1.0),
('bacon_bucket',
{'le': '7.5', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
1.0),
('bacon_bucket',
{'le': '10.0', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
1.0),
('bacon_bucket',
{'le': '+Inf', 'spam': 'sausage', 'scrambled_eggs': 'eggs'},
1.0),
('bacon_count', {'spam': 'sausage', 'scrambled_eggs': 'eggs'},
1.0),
('bacon_sum', {'spam': 'sausage', 'scrambled_eggs': 'eggs'},
3.14159),
('bacon_bucket',
{'le': '0.005', 'spam': 'eggs', 'scrambled_eggs': 'ham'},
0.0),
('bacon_bucket',
{'le': '0.01', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '0.025', 'spam': 'eggs', 'scrambled_eggs': 'ham'},
0.0),
('bacon_bucket',
{'le': '0.05', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '0.075', 'spam': 'eggs', 'scrambled_eggs': 'ham'},
0.0),
('bacon_bucket',
{'le': '0.1', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '0.25', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '0.5', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '0.75', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '1.0', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '2.5', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 0.0),
('bacon_bucket',
{'le': '5.0', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 1.0),
('bacon_bucket',
{'le': '7.5', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 1.0),
('bacon_bucket',
{'le': '10.0', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 1.0),
('bacon_bucket',
{'le': '+Inf', 'spam': 'eggs', 'scrambled_eggs': 'ham'}, 1.0),
('bacon_count', {'spam': 'eggs', 'scrambled_eggs': 'ham'},
1.0),
('bacon_sum', {'spam': 'eggs', 'scrambled_eggs': 'ham'},
2.71828)]
)
def test_timed(self):
stats = Stats('greasyspoon')
@stats.timed(
'bacon',
['sausage:eggs', 'ham:scrambled_eggs'],
verbose_name='Number of eggs in a basket.')
def measured():
time.sleep(1)
measured()
bacon_metric = stats.metrics.get('bacon')
self.assertEqual(bacon_metric._type, 'histogram')
self.assertSetEqual(set(bacon_metric._labelnames), {'sausage', 'ham'})
# The last entry in samples - a concrete timing.
self.assertGreater(bacon_metric.collect()[0].samples[-1][-1], 1)
def test_timed_repr(self):
stats = Stats('greasyspoon')
timer = stats.timed(
'bacon', ['sausage:eggs', 'ham:scrambled_eggs'],
verbose_name='Number of eggs in a basket.')
self.assertEqual(repr(timer), 'Timer')
timer.__enter__()
self.assertRegexpMatches(repr(timer), 'Timer<([0-9.e+-]+)>')
def func():
pass # pragma: no cover
timer(func)
self.assertRegexpMatches(repr(timer), 'Timer<([0-9.e+-]+)>\[func\]')
def test_timed_context(self):
stats = Stats('greasyspoon')
ret_value = mock.Mock()
def measured():
time.sleep(1)
return ret_value
with stats.timed(
'bacon',
['sausage:eggs', 'ham:scrambled_eggs'],
verbose_name='Number of eggs in a basket.'):
res = measured()
self.assertIs(res, ret_value)
bacon_metric = stats.metrics.get('bacon')
self.assertEqual(bacon_metric._type, 'histogram')
self.assertSetEqual(set(bacon_metric._labelnames), {'sausage', 'ham'})
# The last entry in samples - a concrete timing.
self.assertGreater(bacon_metric.collect()[0].samples[-1][-1], 1)
def test_push_custom_handler(self):
pushgateway_handler = mock.MagicMock()
stats = Stats('greasyspoon', pushgateway_handler=pushgateway_handler)
stats.increment(
'sausage',
['bacon:scrambled_eggs'],
verbose_name='Name of sausages eat.',
)
stats.push()
pushgateway_handler.assert_called_once_with(
data=b'# HELP sausage Name of sausages eat.\n'
b'# TYPE sausage counter\n'
b'sausage{bacon="scrambled_eggs"} 1.0\n',
headers=[
(
'Content-Type',
'text/plain; version=0.0.4; charset=utf-8',
),
],
method='POST',
timeout=None,
url='http://localhost:9112/metrics/job/greasyspoon',
)
@_patch_url_opener
def test_push_custom_uri(self, request_open):
mocked_response = mock.MagicMock()
mocked_response.code = 200
request_open.return_value = mocked_response
stats = Stats(
'greasyspoon',
pushgateway_uri='http://pushgateway:2000/',
)
stats.increment(
'sausage',
['bacon:scrambled_eggs'],
verbose_name='Name of sausages eat.',
)
stats.push()
self.assertEqual(
request_open.call_args[0][0].get_full_url(),
'http://pushgateway:2000//metrics/job/greasyspoon'
)
@_patch_url_opener
def test_no_tags_given_to_histogram(self, _):
"""Tests against the bug fixed in 0.1.1"""
stats = Stats('greasyspoon')
stats.histogram('sausage', 1.0)
self.assertIn('sausage', stats.metrics)
@_patch_url_opener
def test_pushadd(self, request_open):
mocked_response = mock.MagicMock()
mocked_response.code = 200
request_open.return_value = mocked_response
stats = Stats('greasyspoon')
stats.pushgateway_uri = 'magicponies:65630'
stats.increment(
'sausage',
['bacon:scrambled_eggs'],
verbose_name='Name of sausages eat.',
)
stats.histogram(
'bacon', 2.71828, ['spam:eggs', 'scrambled_eggs:ham'])
stats.histogram(
'bacon', 3.14159, ['spam:sausage', 'scrambled_eggs:eggs'])
stats.gauge(
'spam',
2.718281828,
['sausage:bacon'],
verbose_name='Number of spam emails sent.',
)
stats.pushadd()
stats_push_request = request_open.call_args[0][0]
self.assertEqual(stats_push_request.get_method(), 'PUT')
self.assertEqual(
'http://magicponies:65630/metrics/job/greasyspoon',
stats_push_request.get_full_url(),
)
sent_data = stats_push_request.data
self.assertIn(
b'HELP sausage Name of sausages eat.\n# TYPE sausage counter',
sent_data,
)
self.assertIn(
b'HELP bacon \n# TYPE bacon histogram',
sent_data,
)
self.assertIn(
b'HELP spam Number of spam emails sent.\n# TYPE spam gauge',
sent_data,
)
@_patch_url_opener
def test_push(self, request_open):
mocked_response = mock.MagicMock()
mocked_response.code = 200
request_open.return_value = mocked_response
stats = Stats('greasyspoon')
stats.pushgateway_uri = 'magicponies:65630'
stats.increment(
'sausage',
['bacon:scrambled_eggs'], verbose_name='Name of sausages eat.')
stats.histogram(
'bacon',
2.71828,
['spam:eggs', 'scrambled_eggs:ham'])
stats.histogram(
'bacon',
3.14159,
['spam:sausage', 'scrambled_eggs:eggs'])
stats.gauge(
'spam',
2.718281828,
['sausage:bacon'],
verbose_name='Number of spam emails sent.')
stats.push()
stats_push_request = request_open.call_args[0][0]
self.assertEqual(stats_push_request.get_method(), 'POST')
self.assertEqual(
'http://magicponies:65630/metrics/job/greasyspoon',
stats_push_request.get_full_url())
sent_data = stats_push_request.data
self.assertIn(
b'HELP sausage Name of sausages eat.\n# TYPE sausage counter',
sent_data,
)
self.assertIn(
b'HELP bacon \n# TYPE bacon histogram',
sent_data,
)
self.assertIn(
b'HELP spam Number of spam emails sent.\n# TYPE spam gauge',
sent_data,
)
if _PY_35:
from ._async import AsyncTestMixin
test_asynctimed = AsyncTestMixin.test_asynctimed
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
"""Build Environment used for isolation during sdist building
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
import logging
import os
import sys
import textwrap
from collections import OrderedDict
from distutils.sysconfig import get_python_lib
from sysconfig import get_paths
from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet
from pip import __file__ as pip_location
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.ui import open_spinner
if MYPY_CHECK_RUNNING:
from typing import Tuple, Set, Iterable, Optional, List
from pip._internal.index.package_finder import PackageFinder
logger = logging.getLogger(__name__)
class _Prefix:
def __init__(self, path):
# type: (str) -> None
self.path = path
self.setup = False
self.bin_dir = get_paths(
'nt' if os.name == 'nt' else 'posix_prefix',
vars={'base': path, 'platbase': path}
)['scripts']
# Note: prefer distutils' sysconfig to get the
# library paths so PyPy is correctly supported.
purelib = get_python_lib(plat_specific=False, prefix=path)
platlib = get_python_lib(plat_specific=True, prefix=path)
if purelib == platlib:
self.lib_dirs = [purelib]
else:
self.lib_dirs = [purelib, platlib]
class BuildEnvironment(object):
"""Creates and manages an isolated environment to install build deps
"""
def __init__(self):
# type: () -> None
self._temp_dir = TempDirectory(kind="build-env")
self._prefixes = OrderedDict((
(name, _Prefix(os.path.join(self._temp_dir.path, name)))
for name in ('normal', 'overlay')
))
self._bin_dirs = [] # type: List[str]
self._lib_dirs = [] # type: List[str]
for prefix in reversed(list(self._prefixes.values())):
self._bin_dirs.append(prefix.bin_dir)
self._lib_dirs.extend(prefix.lib_dirs)
# Customize site to:
# - ensure .pth files are honored
# - prevent access to system site packages
system_sites = {
os.path.normcase(site) for site in (
get_python_lib(plat_specific=False),
get_python_lib(plat_specific=True),
)
}
self._site_dir = os.path.join(self._temp_dir.path, 'site')
if not os.path.exists(self._site_dir):
os.mkdir(self._site_dir)
with open(os.path.join(self._site_dir, 'sitecustomize.py'), 'w') as fp:
fp.write(textwrap.dedent(
'''
import os, site, sys
# First, drop system-sites related paths.
original_sys_path = sys.path[:]
known_paths = set()
for path in {system_sites!r}:
site.addsitedir(path, known_paths=known_paths)
system_paths = set(
os.path.normcase(path)
for path in sys.path[len(original_sys_path):]
)
original_sys_path = [
path for path in original_sys_path
if os.path.normcase(path) not in system_paths
]
sys.path = original_sys_path
# Second, add lib directories.
# ensuring .pth file are processed.
for path in {lib_dirs!r}:
assert not path in sys.path
site.addsitedir(path)
'''
).format(system_sites=system_sites, lib_dirs=self._lib_dirs))
def __enter__(self):
self._save_env = {
name: os.environ.get(name, None)
for name in ('PATH', 'PYTHONNOUSERSITE', 'PYTHONPATH')
}
path = self._bin_dirs[:]
old_path = self._save_env['PATH']
if old_path:
path.extend(old_path.split(os.pathsep))
pythonpath = [self._site_dir]
os.environ.update({
'PATH': os.pathsep.join(path),
'PYTHONNOUSERSITE': '1',
'PYTHONPATH': os.pathsep.join(pythonpath),
})
def __exit__(self, exc_type, exc_val, exc_tb):
for varname, old_value in self._save_env.items():
if old_value is None:
os.environ.pop(varname, None)
else:
os.environ[varname] = old_value
def cleanup(self):
# type: () -> None
self._temp_dir.cleanup()
def check_requirements(self, reqs):
# type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]]
"""Return 2 sets:
- conflicting requirements: set of (installed, wanted) reqs tuples
- missing requirements: set of reqs
"""
missing = set()
conflicting = set()
if reqs:
ws = WorkingSet(self._lib_dirs)
for req in reqs:
try:
if ws.find(Requirement.parse(req)) is None:
missing.add(req)
except VersionConflict as e:
conflicting.add((str(e.args[0].as_requirement()),
str(e.args[1])))
return conflicting, missing
def install_requirements(
self,
finder, # type: PackageFinder
requirements, # type: Iterable[str]
prefix_as_string, # type: str
message # type: Optional[str]
):
# type: (...) -> None
prefix = self._prefixes[prefix_as_string]
assert not prefix.setup
prefix.setup = True
if not requirements:
return
args = [
sys.executable, os.path.dirname(pip_location), 'install',
'--ignore-installed', '--no-user', '--prefix', prefix.path,
'--no-warn-script-location',
] # type: List[str]
if logger.getEffectiveLevel() <= logging.DEBUG:
args.append('-v')
for format_control in ('no_binary', 'only_binary'):
formats = getattr(finder.format_control, format_control)
args.extend(('--' + format_control.replace('_', '-'),
','.join(sorted(formats or {':none:'}))))
index_urls = finder.index_urls
if index_urls:
args.extend(['-i', index_urls[0]])
for extra_index in index_urls[1:]:
args.extend(['--extra-index-url', extra_index])
else:
args.append('--no-index')
for link in finder.find_links:
args.extend(['--find-links', link])
for host in finder.trusted_hosts:
args.extend(['--trusted-host', host])
if finder.allow_all_prereleases:
args.append('--pre')
args.append('--')
args.extend(requirements)
with open_spinner(message) as spinner:
call_subprocess(args, spinner=spinner)
class NoOpBuildEnvironment(BuildEnvironment):
"""A no-op drop-in replacement for BuildEnvironment
"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def cleanup(self):
pass
def install_requirements(self, finder, requirements, prefix, message):
raise NotImplementedError()
|
|
import signal
import sys
import unittest
from unittest import mock
import asyncio
from asyncio import base_subprocess
from asyncio import subprocess
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
if sys.platform != 'win32':
from asyncio import unix_events
# Program blocking
PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)']
# Program copying input to output
PROGRAM_CAT = [
sys.executable, '-c',
';'.join(('import sys',
'data = sys.stdin.buffer.read()',
'sys.stdout.buffer.write(data)'))]
class TestSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, *args, **kwargs):
self._proc = mock.Mock()
self._proc.stdin = None
self._proc.stdout = None
self._proc.stderr = None
class SubprocessTransportTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
self.set_event_loop(self.loop)
def create_transport(self, waiter=None):
protocol = mock.Mock()
protocol.connection_made._is_coroutine = False
protocol.process_exited._is_coroutine = False
transport = TestSubprocessTransport(
self.loop, protocol, ['test'], False,
None, None, None, 0, waiter=waiter)
return (transport, protocol)
def test_proc_exited(self):
waiter = asyncio.Future(loop=self.loop)
transport, protocol = self.create_transport(waiter)
transport._process_exited(6)
self.loop.run_until_complete(waiter)
self.assertEqual(transport.get_returncode(), 6)
self.assertTrue(protocol.connection_made.called)
self.assertTrue(protocol.process_exited.called)
self.assertTrue(protocol.connection_lost.called)
self.assertEqual(protocol.connection_lost.call_args[0], (None,))
self.assertFalse(transport._closed)
self.assertIsNone(transport._loop)
self.assertIsNone(transport._proc)
self.assertIsNone(transport._protocol)
# methods must raise ProcessLookupError if the process exited
self.assertRaises(ProcessLookupError,
transport.send_signal, signal.SIGTERM)
self.assertRaises(ProcessLookupError, transport.terminate)
self.assertRaises(ProcessLookupError, transport.kill)
transport.close()
class SubprocessMixin:
def test_stdin_stdout(self):
args = PROGRAM_CAT
@asyncio.coroutine
def run(data):
proc = yield from asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
loop=self.loop)
# feed data
proc.stdin.write(data)
yield from proc.stdin.drain()
proc.stdin.close()
# get output and exitcode
data = yield from proc.stdout.read()
exitcode = yield from proc.wait()
return (exitcode, data)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
exitcode, stdout = self.loop.run_until_complete(task)
self.assertEqual(exitcode, 0)
self.assertEqual(stdout, b'some data')
def test_communicate(self):
args = PROGRAM_CAT
@asyncio.coroutine
def run(data):
proc = yield from asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
loop=self.loop)
stdout, stderr = yield from proc.communicate(data)
return proc.returncode, stdout
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
exitcode, stdout = self.loop.run_until_complete(task)
self.assertEqual(exitcode, 0)
self.assertEqual(stdout, b'some data')
def test_shell(self):
create = asyncio.create_subprocess_shell('exit 7',
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 7)
def test_start_new_session(self):
# start the new process in a new session
create = asyncio.create_subprocess_shell('exit 8',
start_new_session=True,
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 8)
def test_kill(self):
args = PROGRAM_BLOCKED
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
proc = self.loop.run_until_complete(create)
proc.kill()
returncode = self.loop.run_until_complete(proc.wait())
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_terminate(self):
args = PROGRAM_BLOCKED
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
proc = self.loop.run_until_complete(create)
proc.terminate()
returncode = self.loop.run_until_complete(proc.wait())
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_send_signal(self):
code = 'import time; print("sleeping", flush=True); time.sleep(3600)'
args = [sys.executable, '-c', code]
create = asyncio.create_subprocess_exec(*args,
stdout=subprocess.PIPE,
loop=self.loop)
proc = self.loop.run_until_complete(create)
@asyncio.coroutine
def send_signal(proc):
# basic synchronization to wait until the program is sleeping
line = yield from proc.stdout.readline()
self.assertEqual(line, b'sleeping\n')
proc.send_signal(signal.SIGHUP)
returncode = (yield from proc.wait())
return returncode
returncode = self.loop.run_until_complete(send_signal(proc))
self.assertEqual(-signal.SIGHUP, returncode)
def prepare_broken_pipe_test(self):
# buffer large enough to feed the whole pipe buffer
large_data = b'x' * support.PIPE_MAX_SIZE
# the program ends before the stdin can be feeded
create = asyncio.create_subprocess_exec(
sys.executable, '-c', 'pass',
stdin=subprocess.PIPE,
loop=self.loop)
proc = self.loop.run_until_complete(create)
return (proc, large_data)
def test_stdin_broken_pipe(self):
proc, large_data = self.prepare_broken_pipe_test()
@asyncio.coroutine
def write_stdin(proc, data):
proc.stdin.write(data)
yield from proc.stdin.drain()
coro = write_stdin(proc, large_data)
# drain() must raise BrokenPipeError or ConnectionResetError
with test_utils.disable_logger():
self.assertRaises((BrokenPipeError, ConnectionResetError),
self.loop.run_until_complete, coro)
self.loop.run_until_complete(proc.wait())
def test_communicate_ignore_broken_pipe(self):
proc, large_data = self.prepare_broken_pipe_test()
# communicate() must ignore BrokenPipeError when feeding stdin
with test_utils.disable_logger():
self.loop.run_until_complete(proc.communicate(large_data))
self.loop.run_until_complete(proc.wait())
def test_pause_reading(self):
limit = 10
size = (limit * 2 + 1)
@asyncio.coroutine
def test_pause_reading():
code = '\n'.join((
'import sys',
'sys.stdout.write("x" * %s)' % size,
'sys.stdout.flush()',
))
connect_read_pipe = self.loop.connect_read_pipe
@asyncio.coroutine
def connect_read_pipe_mock(*args, **kw):
transport, protocol = yield from connect_read_pipe(*args, **kw)
transport.pause_reading = mock.Mock()
transport.resume_reading = mock.Mock()
return (transport, protocol)
self.loop.connect_read_pipe = connect_read_pipe_mock
proc = yield from asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
limit=limit,
loop=self.loop)
stdout_transport = proc._transport.get_pipe_transport(1)
stdout, stderr = yield from proc.communicate()
# The child process produced more than limit bytes of output,
# the stream reader transport should pause the protocol to not
# allocate too much memory.
return (stdout, stdout_transport)
# Issue #22685: Ensure that the stream reader pauses the protocol
# when the child process produces too much data
stdout, transport = self.loop.run_until_complete(test_pause_reading())
self.assertEqual(stdout, b'x' * size)
self.assertTrue(transport.pause_reading.called)
self.assertTrue(transport.resume_reading.called)
def test_stdin_not_inheritable(self):
# Tulip issue #209: stdin must not be inheritable, otherwise
# the Process.communicate() hangs
@asyncio.coroutine
def len_message(message):
code = 'import sys; data = sys.stdin.read(); print(len(data))'
proc = yield from asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
close_fds=False,
loop=self.loop)
stdout, stderr = yield from proc.communicate(message)
exitcode = yield from proc.wait()
return (stdout, exitcode)
output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
self.assertEqual(output.rstrip(), b'3')
self.assertEqual(exitcode, 0)
def test_cancel_process_wait(self):
# Issue #23140: cancel Process.wait()
@asyncio.coroutine
def cancel_wait():
proc = yield from asyncio.create_subprocess_exec(
*PROGRAM_BLOCKED,
loop=self.loop)
# Create an internal future waiting on the process exit
task = self.loop.create_task(proc.wait())
self.loop.call_soon(task.cancel)
try:
yield from task
except asyncio.CancelledError:
pass
# Cancel the future
task.cancel()
# Kill the process and wait until it is done
proc.kill()
yield from proc.wait()
self.loop.run_until_complete(cancel_wait())
def test_cancel_make_subprocess_transport_exec(self):
@asyncio.coroutine
def cancel_make_transport():
coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED,
loop=self.loop)
task = self.loop.create_task(coro)
self.loop.call_soon(task.cancel)
try:
yield from task
except asyncio.CancelledError:
pass
# ignore the log:
# "Exception during subprocess creation, kill the subprocess"
with test_utils.disable_logger():
self.loop.run_until_complete(cancel_make_transport())
def test_cancel_post_init(self):
@asyncio.coroutine
def cancel_make_transport():
coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
task = self.loop.create_task(coro)
self.loop.call_soon(task.cancel)
try:
yield from task
except asyncio.CancelledError:
pass
# ignore the log:
# "Exception during subprocess creation, kill the subprocess"
with test_utils.disable_logger():
self.loop.run_until_complete(cancel_make_transport())
test_utils.run_briefly(self.loop)
def test_close_kill_running(self):
@asyncio.coroutine
def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
transport, protocol = yield from create
kill_called = False
def kill():
nonlocal kill_called
kill_called = True
orig_kill()
proc = transport.get_extra_info('subprocess')
orig_kill = proc.kill
proc.kill = kill
returncode = transport.get_returncode()
transport.close()
yield from transport._wait()
return (returncode, kill_called)
# Ignore "Close running child process: kill ..." log
with test_utils.disable_logger():
returncode, killed = self.loop.run_until_complete(kill_running())
self.assertIsNone(returncode)
# transport.close() must kill the process if it is still running
self.assertTrue(killed)
test_utils.run_briefly(self.loop)
def test_close_dont_kill_finished(self):
@asyncio.coroutine
def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
transport, protocol = yield from create
proc = transport.get_extra_info('subprocess')
# kill the process (but asyncio is not notified immediatly)
proc.kill()
proc.wait()
proc.kill = mock.Mock()
proc_returncode = proc.poll()
transport_returncode = transport.get_returncode()
transport.close()
return (proc_returncode, transport_returncode, proc.kill.called)
# Ignore "Unknown child process pid ..." log of SafeChildWatcher,
# emitted because the test already consumes the exit status:
# proc.wait()
with test_utils.disable_logger():
result = self.loop.run_until_complete(kill_running())
test_utils.run_briefly(self.loop)
proc_returncode, transport_return_code, killed = result
self.assertIsNotNone(proc_returncode)
self.assertIsNone(transport_return_code)
# transport.close() must not kill the process if it finished, even if
# the transport was not notified yet
self.assertFalse(killed)
if sys.platform != 'win32':
# Unix
class SubprocessWatcherMixin(SubprocessMixin):
Watcher = None
def setUp(self):
policy = asyncio.get_event_loop_policy()
self.loop = policy.new_event_loop()
self.set_event_loop(self.loop)
watcher = self.Watcher()
watcher.attach_loop(self.loop)
policy.set_child_watcher(watcher)
self.addCleanup(policy.set_child_watcher, None)
class SubprocessSafeWatcherTests(SubprocessWatcherMixin,
test_utils.TestCase):
Watcher = unix_events.SafeChildWatcher
class SubprocessFastWatcherTests(SubprocessWatcherMixin,
test_utils.TestCase):
Watcher = unix_events.FastChildWatcher
else:
# Windows
class SubprocessProactorTests(SubprocessMixin, test_utils.TestCase):
def setUp(self):
self.loop = asyncio.ProactorEventLoop()
self.set_event_loop(self.loop)
if __name__ == '__main__':
unittest.main()
|
|
import re
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.contacts import Contacts
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch, metrics
from kivy.lang import Builder
# lazy imports for factory so that widgets can be used in kv
Factory.register('InstallWizard',
module='electrum_gui.kivy.uix.dialogs.installwizard')
Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.uix.checkbox import CheckBox
from kivy.uix.switch import Switch
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def on_quotes(self, d):
#Logger.info("on_quotes")
pass
def on_history(self, d):
#Logger.info("on_history")
if self.history_screen:
self.history_screen.update()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self.update_status()
if self.history_screen:
self.history_screen.update()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
if self.history_screen:
self.history_screen.update()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
wallet = ObjectProperty(None)
'''Holds the electrum wallet
:attr:`wallet` is a `ObjectProperty` defaults to None.
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.qrscanner = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
super(ElectrumWindow, self).__init__(**kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
#self.config = self.gui_object.config
self.contacts = Contacts(self.electrum_config)
self.invoices = InvoiceStore(self.electrum_config)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet =\
Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status =\
Clock.create_trigger(self.update_status, .5)
self._trigger_notify_transactions = \
Clock.create_trigger(self.notify_transactions, 5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def on_pr(self, pr):
if pr.verify(self.contacts):
key = self.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.bitcoin import base_decode
from electrum.transaction import Transaction
try:
text = base_decode(data, None, base=43).encode('hex')
tx = Transaction(text)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if self.send_screen is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.renpy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
Logger.info("dpi: {} {}".format(metrics.dpi, metrics.dpi_rounded))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.renpy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def load_wallet_by_name(self, wallet_path):
if not wallet_path:
return
config = self.electrum_config
storage = WalletStorage(wallet_path)
Logger.info('Electrum: Check for existing wallet')
if storage.file_exists:
wallet = Wallet(storage)
action = wallet.get_action()
else:
action = 'new'
if action is not None:
# start installation wizard
Logger.debug('Electrum: Wallet not found. Launching install wizard')
wizard = Factory.InstallWizard(config, self.network, storage)
wizard.bind(on_wizard_complete=lambda instance, wallet: self.load_wallet(wallet))
wizard.run(action)
else:
self.load_wallet(wallet)
self.on_resume()
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.wallet.stop_threads()
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
self.is_exit = False
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
if self._settings_dialog is None:
from uix.dialogs.settings import SettingsDialog
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/electrum.png"
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction']
self.network.register_callback(self.on_network, interests)
#self.wallet = None
self.tabs = self.root.ids['tabs']
def on_network(self, event, *args):
if event == 'updated':
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_notify_transactions(*args)
@profiler
def load_wallet(self, wallet):
self.stop_wallet()
self.wallet = wallet
self.wallet.start_threads(self.network)
self.current_account = self.wallet.storage.get('current_account', None)
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
self.notify_transactions()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
self.status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
self.status = _("Synchronizing...")
elif server_lag > 1:
self.status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_account_balance(self.current_account)
text = self.format_amount(c+x+u)
self.status = str(text.strip() + ' ' + self.base_unit)
else:
self.status = _("Not connected")
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
amount, fee = self.wallet.get_max_amount(self.electrum_config, inputs, addr, None)
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
#if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@profiler
def notify_transactions(self, *dt):
if not self.network or not self.network.is_connected():
return
# temporarily disabled for merge
return
iface = self.network
ptfn = iface.pending_transactions_for_notifications
if len(ptfn) > 0:
# Combine the transactions if there are more then three
tx_amount = len(ptfn)
if(tx_amount >= 3):
total_amount = 0
for tx in ptfn:
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
total_amount += v
self.notify(_("{txs}s new transactions received. Total amount"
"received in the new transactions {amount}s"
"{unit}s").format(txs=tx_amount,
amount=self.format_amount(total_amount),
unit=self.base_unit()))
iface.pending_transactions_for_notifications = []
else:
for tx in iface.pending_transactions_for_notifications:
if tx:
iface.pending_transactions_for_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
self.notify(
_("{txs} new transaction received. {amount} {unit}").
format(txs=tx_amount, amount=self.format_amount(v),
unit=self.base_unit))
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
import os
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.qrscanner:
self.qrscanner.stop()
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.qrscanner and qrscanner.get_parent_window():
self.qrscanner.start()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, txid):
self.show_info(txid)
if ok and pr:
pr.set_paid(tx.hash())
self.invoices.save()
self.update_tab('invoices')
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.use_encryption:
self.password_dialog(msg, f, args)
else:
apply(f, args + (None,))
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.use_encryption and password is None:
return
try:
seed = self.wallet.get_seed(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
def change_password(self, cb):
if self.wallet.use_encryption:
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.use_encryption:
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
def callback(pw):
Clock.schedule_once(lambda x: apply(f, args + (pw,)), 0.1)
if self._password_dialog is None:
from uix.dialogs.password_dialog import PasswordDialog
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
|
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2015 Dave Jones <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The color module defines a class for representing a color, along with various
ancillary classes which can be used to manipulate aspects of a color.
.. note::
All classes in this module are available from the :mod:`picamera` namespace
without having to import :mod:`picamera.color` directly.
The following classes are defined in the module:
Color
=====
.. autoclass:: Color
:members:
Red
===
.. autoclass:: Red
:members:
Green
=====
.. autoclass:: Green
:members:
Blue
====
.. autoclass:: Blue
:members:
Hue
===
.. autoclass:: Hue
:members:
Saturation
==========
.. autoclass:: Saturation
:members:
Lightness
=========
.. autoclass:: Lightness
:members:
"""
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's zip equivalent to Py3's
try:
from itertools import izip as zip
except ImportError:
pass
# Make Py2's str and range equivalent to Py3's
str = type('')
import colorsys
from math import pi, sqrt
from fractions import Fraction
from collections import namedtuple
# From the CSS Color Module Level 3 specification, section 4.3
# <http://www.w3.org/TR/css3-color/#svg-color>
NAMED_COLORS = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgreen': '#006400',
'darkgrey': '#a9a9a9',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'grey': '#808080',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgreen': '#90ee90',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
class Red(float):
"""
Represents the red component of a :class:`Color` for use in
transformations. Instances of this class can be constructed directly with a
float value, or by querying the :attr:`Color.red` attribute. Addition,
subtraction, and multiplication are supported with :class:`Color`
instances. For example::
>>> Color.from_rgb(0, 0, 0) + Red(0.5)
<Color "#7f0000">
>>> Color('#f00') - Color('#900').red
<Color "#660000">
>>> (Red(0.1) * Color('red')).red
Red(0.1)
"""
def __repr__(self):
return "Red(%s)" % self
class Green(float):
"""
Represents the green component of a :class:`Color` for use in
transformations. Instances of this class can be constructed directly with
a float value, or by querying the :attr:`Color.green` attribute. Addition,
subtraction, and multiplication are supported with :class:`Color`
instances. For example::
>>> Color(0, 0, 0) + Green(0.1)
<Color "#001900">
>>> Color.from_yuv(1, -0.4, -0.6) - Green(1)
<Color "#50002f">
>>> (Green(0.5) * Color('white')).rgb
(Red(1.0), Green(0.5), Blue(1.0))
"""
def __repr__(self):
return "Green(%s)" % self
class Blue(float):
"""
Represents the blue component of a :class:`Color` for use in
transformations. Instances of this class can be constructed directly with
a float value, or by querying the :attr:`Color.blue` attribute. Addition,
subtraction, and multiplication are supported with :class:`Color`
instances. For example::
>>> Color(0, 0, 0) + Blue(0.2)
<Color "#000033">
>>> Color.from_hls(0.5, 0.5, 1.0) - Blue(1)
<Color "#00fe00">
>>> Blue(0.9) * Color('white')
<Color "#ffffe5">
"""
def __repr__(self):
return "Blue(%s)" % self
class Hue(float):
"""
Represents the hue of a :class:`Color` for use in transformations.
Instances of this class can be constructed directly with a float value in
the range [0.0, 1.0) representing an angle around the `HSL hue wheel`_. As
this is a circular mapping, 0.0 and 1.0 effectively mean the same thing,
i.e. out of range values will be normalized into the range [0.0, 1.0).
The class can also be constructed with the keyword arguments ``deg`` or
``rad`` if you wish to specify the hue value in degrees or radians instead,
respectively. Instances can also be constructed by querying the
:attr:`Color.hue` attribute.
Addition, subtraction, and multiplication are supported with :class:`Color`
instances. For example::
>>> Color(1, 0, 0).hls
(0.0, 0.5, 1.0)
>>> (Color(1, 0, 0) + Hue(deg=180)).hls
(0.5, 0.5, 1.0)
Note that whilst multiplication by a :class:`Hue` doesn't make much sense,
it is still supported. However, the circular nature of a hue value can lead
to suprising effects. In particular, since 1.0 is equivalent to 0.0 the
following may be observed::
>>> (Hue(1.0) * Color.from_hls(0.5, 0.5, 1.0)).hls
(0.0, 0.5, 1.0)
.. _HSL hue wheel: https://en.wikipedia.org/wiki/Hue
"""
def __new__(cls, n=None, deg=None, rad=None):
if n is not None:
return super(Hue, cls).__new__(cls, n % 1.0)
elif deg is not None:
return super(Hue, cls).__new__(cls, (deg / 360.0) % 1.0)
elif rad is not None:
return super(Hue, cls).__new__(cls, (rad / (2 * pi)) % 1.0)
else:
raise ValueError('You must specify a value, or deg or rad')
def __repr__(self):
return "Hue(deg=%s)" % self.deg
@property
def deg(self):
return self * 360.0
@property
def rad(self):
return self * 2 * pi
class Lightness(float):
"""
Represents the lightness of a :class:`Color` for use in transformations.
Instances of this class can be constructed directly with a float value, or
by querying the :attr:`Color.lightness` attribute. Addition, subtraction,
and multiplication are supported with :class:`Color` instances. For
example::
>>> Color(0, 0, 0) + Lightness(0.1)
<Color "#191919">
>>> Color.from_rgb_bytes(0x80, 0x80, 0) - Lightness(0.2)
<Color "#191900">
>>> Lightness(0.9) * Color('wheat')
<Color "#f0cd8d">
"""
def __repr__(self):
return "Lightness(%s)" % self
class Saturation(float):
"""
Represents the saturation of a :class:`Color` for use in transformations.
Instances of this class can be constructed directly with a float value, or
by querying the :attr:`Color.saturation` attribute. Addition, subtraction,
and multiplication are supported with :class:`Color` instances. For
example::
>>> Color(0.9, 0.9, 0.6) + Saturation(0.1)
<Color "#ebeb92">
>>> Color('red') - Saturation(1)
<Color "#7f7f7f">
>>> Saturation(0.5) * Color('wheat')
<Color "#e4d9c3">
"""
def __repr__(self):
return "Lightness(%s)" % self
clamp_float = lambda v: max(0.0, min(1.0, v))
clamp_bytes = lambda v: max(0, min(255, v))
make_linear = lambda c: c / 12.92 if c <= 0.04045 else ((c + 0.055) / 1.055) ** 2.4
matrix_mult = lambda m, n: (
sum(mval * nval for mval, nval in zip(mrow, n))
for mrow in m
)
class Color(namedtuple('Color', ('red', 'green', 'blue'))):
"""
The Color class is a tuple which represents a color as red, green, and
blue components.
The class has a flexible constructor which allows you to create an instance
from a variety of color systems including `RGB`_, `Y'UV`_, `Y'IQ`_, `HLS`_,
and `HSV`_. There are also explicit constructors for each of these systems
to allow you to force the use of a system in your code. For example, an
instance of :class:`Color` can be constructed in any of the following
ways::
>>> Color('#f00')
<Color "#ff0000">
>>> Color('green')
<Color "#008000">
>>> Color(0, 0, 1)
<Color "#0000ff">
>>> Color(hue=0, saturation=1, value=0.5)
<Color "#7f0000">
>>> Color(y=0.4, u=-0.05, v=0.615)
<Color "#ff0f4c">
The specific forms that the default constructor will accept are enumerated
below:
+------------------------------+------------------------------------------+
| Style | Description |
+==============================+==========================================+
| Single positional parameter | Equivalent to calling |
| | :meth:`Color.from_string`. |
+------------------------------+------------------------------------------+
| Three positional parameters | Equivalent to calling |
| | :meth:`Color.from_rgb` if all three |
| | parameters are between 0.0 and 1.0, or |
| | :meth:`Color.from_rgb_bytes` otherwise. |
+------------------------------+ |
| Three named parameters, | |
| "r", "g", "b" | |
+------------------------------+ |
| Three named parameters, | |
| "red", "green", "blue" | |
+------------------------------+------------------------------------------+
| Three named parameters, | Equivalent to calling |
| "y", "u", "v" | :meth:`Color.from_yuv` if "y" is between |
| | 0.0 and 1.0, "u" is between -0.436 and |
| | 0.436, and "v" is between -0.615 and |
| | 0.615, or :meth:`Color.from_yuv_bytes` |
| | otherwise. |
+------------------------------+------------------------------------------+
| Three named parameters, | Equivalent to calling |
| "y", "i", "q" | :meth:`Color.from_yiq`. |
+------------------------------+------------------------------------------+
| Three named parameters, | Equivalent to calling |
| "h", "l", "s" | :meth:`Color.from_hls`. |
+------------------------------+ |
| Three named parameters, | |
| "hue", "lightness", | |
| "saturation" | |
+------------------------------+------------------------------------------+
| Three named parameters | Equivalent to calling |
| "h", "s", "v" | :meth:`Color.from_hsv` |
+------------------------------+ |
| Three named parameters | |
| "hue", "saturation", "value" | |
+------------------------------+------------------------------------------+
If the constructor parameters do not conform to any of the variants in the
table above, a :exc:`ValueError` will be thrown.
Internally, the color is *always* represented as 3 float values
corresponding to the red, green, and blue components of the color. These
values take a value from 0.0 to 1.0 (least to full intensity). The class
provides several attributes which can be used to convert one color system
into another::
>>> Color('#f00').hls
(0.0, 0.5, 1.0)
>>> Color.from_string('green').hue
Hue(deg=120.0)
>>> Color.from_rgb_bytes(0, 0, 255).yuv
(0.114, 0.435912, -0.099978)
As :class:`Color` derives from tuple, instances are immutable. While this
provides the advantage that they can be used as keys in a dict, it does
mean that colors themselves cannot be directly manipulated (e.g. by
reducing the red component).
However, several auxilliary classes in the module provide the ability to
perform simple transformations of colors via operators which produce a new
:class:`Color` instance. For example::
>>> Color('red') - Red(0.5)
<Color "#7f0000">
>>> Color('green') + Red(0.5)
<Color "#7f8000">
>>> Color.from_hls(0.5, 0.5, 1.0)
<Color "#00feff">
>>> Color.from_hls(0.5, 0.5, 1.0) * Lightness(0.8)
<Color "#00cbcc">
>>> (Color.from_hls(0.5, 0.5, 1.0) * Lightness(0.8)).hls
(0.5, 0.4, 1.0)
From the last example above one can see that even attributes not directly
stored by the color (such as lightness) can be manipulated in this fashion.
In this case a :class:`Color` instance is constructed from HLS (hue,
lightness, saturation) values with a lightness of 0.5. This is multiplied
by a :class:`Lightness` instance with a value of 0.8 which constructs a new
:class:`Color` with the same hue and saturation, but a lightness of 0.5 *
0.8 = 0.4.
If an instance is converted to a string (with :func:`str`) it will return a
string containing the 7-character HTML code for the color (e.g. "#ff0000"
for red). As can be seen in the examples above, a similar representation is
returned for :func:`repr`.
.. _RGB: https://en.wikipedia.org/wiki/RGB_color_space
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. _Y'IQ: https://en.wikipedia.org/wiki/YIQ
.. _HLS: https://en.wikipedia.org/wiki/HSL_and_HSV
.. _HSV: https://en.wikipedia.org/wiki/HSL_and_HSV
"""
def __new__(cls, *args, **kwargs):
def from_rgb(r, g, b):
if 0.0 <= r <= 1.0 and 0.0 <= g <= 1.0 and 0.0 <= b <= 1.0:
return cls.from_rgb(r, g, b)
else:
return cls.from_rgb_bytes(r, g, b)
def from_yuv(y, u, v):
if 0.0 <= y <= 1.0 and -0.436 <= u <= 0.436 and -0.615 <= v <= 0.615:
return cls.from_yuv(y, u, v)
else:
return cls.from_yuv_bytes(y, u, v)
if kwargs:
try:
return {
frozenset('rgb'): from_rgb,
frozenset('yuv'): from_yuv,
frozenset('yiq'): cls.from_yiq,
frozenset('hls'): cls.from_hls,
frozenset('hsv'): cls.from_hsv,
frozenset(('red', 'green', 'blue')):
lambda red, green, blue: from_rgb(red, green, blue),
frozenset(('hue', 'lightness', 'saturation')):
lambda hue, lightness, saturation: cls.from_hls(hue, lightness, saturation),
frozenset(('hue', 'saturation', 'value')):
lambda hue, saturation, value: cls.from_hsv(hue, saturation, value),
}[frozenset(kwargs.keys())](**kwargs)
except KeyError:
pass
else:
if len(args) == 1:
return cls.from_string(args[0])
elif len(args) == 3:
return from_rgb(*args)
raise ValueError('Unable to construct Color from provided arguments')
@classmethod
def from_string(cls, s):
"""
Construct a :class:`Color` from a 4 or 7 character CSS-like
representation (e.g. "#f00" or "#ff0000" for red), or from one of the
named colors (e.g. "green" or "wheat") from the `CSS standard`_. Any
other string format will result in a :exc:`ValueError`.
.. _CSS standard: http://www.w3.org/TR/css3-color/#svg-color
"""
if isinstance(s, bytes):
s = s.decode('ascii')
if s.startswith('#'):
if len(s) == 7:
return cls.from_rgb_bytes(
int(s[1:3], base=16),
int(s[3:5], base=16),
int(s[5:7], base=16)
)
elif len(s) == 4:
return cls.from_rgb_bytes(
int(s[1:2], base=16) * 0x11,
int(s[2:3], base=16) * 0x11,
int(s[3:4], base=16) * 0x11
)
raise ValueError('Unrecognized color format "%s"' % s)
try:
return cls.from_string(NAMED_COLORS[s.lower()])
except KeyError:
raise ValueError('Unrecognized color name "%s"' % s)
@classmethod
def from_rgb(cls, r, g, b):
"""
Construct a :class:`Color` from three `RGB`_ float values between 0.0
and 1.0.
"""
return super(Color, cls).__new__(cls, r, g, b)
@classmethod
def from_rgb_bytes(cls, r, g, b):
"""
Construct a :class:`Color` from three `RGB`_ byte values between 0 and
255.
"""
return super(Color, cls).__new__(cls, r / 255.0, g / 255.0, b / 255.0)
@classmethod
def from_yuv(cls, y, u, v):
"""
Construct a :class:`Color` from three `Y'UV`_ float values. The Y value
may be between 0.0 and 1.0. U may be between -0.436 and 0.436, while
V may be between -0.615 and 0.615.
"""
return super(Color, cls).__new__(
cls,
clamp_float(y + 1.14 * v),
clamp_float(y - 0.395 * u - 0.581 * v),
clamp_float(y + 2.033 * u),
)
@classmethod
def from_yuv_bytes(cls, y, u, v):
"""
Construct a :class:`Color` from three `Y'UV`_ byte values between 0 and
255. The U and V values are biased by 128 to prevent negative values as
is typical in video applications. The Y value is biased by 16 for the
same purpose.
"""
c = y - 16
d = u - 128
e = v - 128
return cls.from_rgb_bytes(
clamp_bytes((298 * c + 409 * e + 128) >> 8),
clamp_bytes((298 * c - 100 * d - 208 * e + 128) >> 8),
clamp_bytes((298 * c + 516 * d + 128) >> 8),
)
@classmethod
def from_yiq(cls, y, i, q):
"""
Construct a :class:`Color` from three `Y'IQ`_ float values. Y' can be
between 0.0 and 1.0, while I and Q can be between -1.0 and 1.0.
"""
return super(Color, cls).__new__(cls, *colorsys.yiq_to_rgb(y, i, q))
@classmethod
def from_hls(cls, h, l, s):
"""
Construct a :class:`Color` from `HLS`_ (hue, lightness, saturation)
floats between 0.0 and 1.0.
"""
return super(Color, cls).__new__(cls, *colorsys.hls_to_rgb(h, l, s))
@classmethod
def from_hsv(cls, h, s, v):
"""
Construct a :class:`Color` from `HSV`_ (hue, saturation, value) floats
between 0.0 and 1.0.
"""
return super(Color, cls).__new__(cls, *colorsys.hsv_to_rgb(h, s, v))
def __add__(self, other):
if isinstance(other, Red):
return Color(clamp_float(self.red + other), self.green, self.blue)
elif isinstance(other, Green):
return Color(self.red, clamp_float(self.green + other), self.blue)
elif isinstance(other, Blue):
return Color(self.red, self.green, clamp_float(self.blue + other))
elif isinstance(other, Hue):
h, l, s = self.hls
return Color.from_hls((h + other) % 1.0, l, s)
elif isinstance(other, Lightness):
h, l, s = self.hls
return Color.from_hls(h, clamp_float(l + other), s)
elif isinstance(other, Saturation):
h, l, s = self.hls
return Color.from_hls(h, l, clamp_float(s + other))
return NotImplemented
def __radd__(self, other):
# Addition is commutative
if isinstance(other, (Red, Green, Blue, Hue, Lightness, Saturation)):
return self.__add__(other)
return NotImplemented
def __sub__(self, other):
if isinstance(other, Red):
return Color(clamp_float(self.red - other), self.green, self.blue)
elif isinstance(other, Green):
return Color(self.red, clamp_float(self.green - other), self.blue)
elif isinstance(other, Blue):
return Color(self.red, self.green, clamp_float(self.blue - other))
elif isinstance(other, Hue):
h, l, s = self.hls
return Color.from_hls((h - other) % 1.0, l, s)
elif isinstance(other, Lightness):
h, l, s = self.hls
return Color.from_hls(h, clamp_float(l - other), s)
elif isinstance(other, Saturation):
h, l, s = self.hls
return Color.from_hls(h, l, clamp_float(s - other))
return NotImplemented
def __rsub__(self, other):
if isinstance(other, Red):
return Color(clamp_float(other - self.red), self.green, self.blue)
elif isinstance(other, Green):
return Color(self.red, clamp_float(other - self.green), self.blue)
elif isinstance(other, Blue):
return Color(self.red, self.green, clamp_float(other - self.blue))
elif isinstance(other, Hue):
h, l, s = self.hls
return Color.from_hls((other - h) % 1.0, l, s)
elif isinstance(other, Lightness):
h, l, s = self.hls
return Color.from_hls(h, clamp_float(other - l), s)
elif isinstance(other, Saturation):
h, l, s = self.hls
return Color.from_hls(h, l, clamp_float(other - s))
return NotImplemented
def __mul__(self, other):
if isinstance(other, Red):
return Color(clamp_float(self.red * other), self.green, self.blue)
elif isinstance(other, Green):
return Color(self.red, clamp_float(self.green * other), self.blue)
elif isinstance(other, Blue):
return Color(self.red, self.green, clamp_float(self.blue * other))
elif isinstance(other, Hue):
h, l, s = self.hls
return Color.from_hls((h * other) % 1.0, l, s)
elif isinstance(other, Lightness):
h, l, s = self.hls
return Color.from_hls(h, clamp_float(l * other), s)
elif isinstance(other, Saturation):
h, l, s = self.hls
return Color.from_hls(h, l, clamp_float(s * other))
return NotImplemented
def __rmul__(self, other):
# Multiplication is commutative
if isinstance(other, (Red, Green, Blue, Hue, Lightness, Saturation)):
return self.__mul__(other)
def __str__(self):
return '#%02x%02x%02x' % self.rgb_bytes
def __repr__(self):
return '<Color "%s">' % str(self)
@property
def rgb(self):
"""
Returns a 3-tuple of (red, green, blue) float values (between 0.0 and
1.0).
"""
return (self.red, self.green, self.blue)
@property
def rgb_bytes(self):
"""
Returns a 3-tuple of (red, green, blue) byte values.
"""
return (
int(self.red * 255),
int(self.green * 255),
int(self.blue * 255),
)
@property
def yuv(self):
"""
Returns a 3-tuple of (y, u, v) float values; y values can be between
0.0 and 1.0, u values are between -0.436 and 0.436, and v values are
between -0.615 and 0.615.
"""
r, g, b = self.rgb
y = 0.299 * r + 0.587 * g + 0.114 * b
return (
y,
0.492 * (b - y),
0.877 * (r - y),
)
@property
def yuv_bytes(self):
"""
Returns a 3-tuple of (y, u, v) byte values. Y values are biased by 16
in the result to prevent negatives. U and V values are biased by 128
for the same purpose.
"""
r, g, b = self.rgb_bytes
return (
(( 66 * r + 129 * g + 25 * b + 128) >> 8) + 16,
((-38 * r - 73 * g + 112 * b + 128) >> 8) + 128,
((112 * r - 94 * g - 18 * b + 128) >> 8) + 128,
)
@property
def yiq(self):
"""
Returns a 3-tuple of (y, i, q) float values; y values can be between
0.0 and 1.0, whilst i and q values can be between -1.0 and 1.0.
"""
return colorsys.rgb_to_yiq(self.red, self.green, self.blue)
@property
def cie_xyz(self):
"""
Returns a 3-tuple of (X, Y, Z) float values representing the color in
the `CIE 1931 color space`.
.. _CIE 1931 color space: https://en.wikipedia.org/wiki/CIE_1931_color_space
"""
return tuple(matrix_mult(
((0.4124564, 0.3575761, 0.1804375),
(0.2126729, 0.7151522, 0.0721750),
(0.0193339, 0.1191920, 0.9503041),
),
(make_linear(self.red),
make_linear(self.green),
make_linear(self.blue)
)
))
@property
def cie_lab(self):
"""
Returns a 3-tuple of (L*, a*, b*) float values representing the color
in the `CIE Lab color space` with the `D65 standard illuminant`_.
.. _CIE Lab color space: https://en.wikipedia.org/wiki/Lab_color_space
.. _D65 standard illuminant: https://en.wikipedia.org/wiki/Illuminant_D65
"""
K = Fraction(1, 3) * Fraction(29, 6) ** 2
e = Fraction(6, 29) ** 3
D65 = (0.95047, 1.0, 1.08883)
x, y, z = (n / m for n, m in zip(self.cie_xyz, D65))
fx, fy, fz = (
n ** Fraction(1, 3) if n > e else K * n + Fraction(4, 29)
for n in (x, y, z)
)
return (116 * fy - 16, 500 * (fx - fy), 200 * (fy - fz))
@property
def cie_luv(self):
"""
Returns a 3-tuple of (L*, u*, v*) float values representing the color
in the `CIE Luv color space` with the `D65 standard illuminant`_.
.. _CIE Luv color space: https://en.wikipedia.org/wiki/CIELUV
"""
U = lambda x, y, z: 4 * x / (x + 15 * y + 3 * z)
V = lambda x, y, z: 9 * y / (x + 15 * y + 3 * z)
K = Fraction(29, 3) ** 3
e = Fraction(6, 29) ** 3
D65 = (0.95047, 1.0, 1.08883)
XYZ = self.cie_xyz
yr = XYZ[1] / D65[1]
L = 116 * yr ** Fraction(1, 3) - 16 if yr > e else K * yr
u = 13 * L * (U(*XYZ) - U(*D65))
v = 13 * L * (V(*XYZ) - V(*D65))
return (L, u, v)
@property
def hls(self):
"""
Returns a 3-tuple of (hue, lightness, saturation) float values (between
0.0 and 1.0).
"""
return colorsys.rgb_to_hls(self.red, self.green, self.blue)
@property
def hsv(self):
"""
Returns a 3-tuple of (hue, saturation, value) float values (between 0.0
and 1.0).
"""
return colorsys.rgb_to_hsv(self.red, self.green, self.blue)
@property
def red(self):
"""
Returns the red component of the color as a :class:`Red` instance which
can be used in operations with other :class:`Color` instances.
"""
# super() calls needed here to avoid recursion
return Red(super(Color, self).red)
@property
def green(self):
"""
Returns the green component of the color as a :class:`Green` instance
which can be used in operations with other :class:`Color` instances.
"""
return Green(super(Color, self).green)
@property
def blue(self):
"""
Returns the blue component of the color as a :class:`Blue` instance
which can be used in operations with other :class:`Color` instances.
"""
return Blue(super(Color, self).blue)
@property
def hue(self):
"""
Returns the hue of the color as a :class:`Hue` instance which can be
used in operations with other :class:`Color` instances.
"""
return Hue(self.hls[0])
@property
def lightness(self):
"""
Returns the lightness of the color as a :class:`Lightness` instance
which can be used in operations with other :class:`Color` instances.
"""
return Lightness(self.hls[1])
@property
def saturation(self):
"""
Returns the saturation of the color as a :class:`Saturation` instance
which can be used in operations with other :class:`Color` instances.
"""
return Saturation(self.hls[2])
def difference(self, other, method='cie1976'):
"""
Determines the difference between this color and *other* using the
specified *method*. The *method* is specified as a string, and the
following methods are valid:
* 'euclid' - Calculate the `Euclidian distance`_. This is by far the
fastest method, but also the least accurate in terms of human
perception.
* 'cie1976' - This is the default method. Use the `CIE 1976`_ formula
for calculating the difference between two colors in CIE Lab space.
* 'cie1994' - Use the `CIE 1994`_ formula with the "graphic arts" bias
for calculating the difference.
* 'cie2000' - Use the `CIE 2000`_ formula for calculating the
difference.
* 'cmc1984a' - Use the `CMC l:c`_ formula for calculating the
difference with a 2:1 (accepability) ratio.
* 'cmc1984i' - Use the `CMC l:c`_ formula for calculating the
difference with a 1:1 (imperceptibility) ratio.
Note that the Euclidian distance will be significantly different to
the other calculations; effectively this just measures the distance
between the two colors by treating them as coordinates in a three
dimensional Euclidian space. All other methods are means of calculating
a `Delta E`_ value in which 2.3 is considered a `just-noticeable
difference`_ (JND).
.. _Delta E: https://en.wikipedia.org/wiki/Color_difference
.. _Just Noticeable Difference: https://en.wikipedia.org/wiki/Just-noticeable_difference
.. _Euclidian distance: https://en.wikipedia.org/wiki/Euclidean_distance
.. _CIE 1976: https://en.wikipedia.org/wiki/Color_difference#CIE76
.. _CIE 1994: https://en.wikipedia.org/wiki/Color_difference#CIE94
.. _CIE 2000: https://en.wikipedia.org/wiki/Color_difference#CIEDE2000
.. _CMC l:c: https://en.wikipedia.org/wiki/Color_difference#CMC_l:c_.281984.29
"""
if method == 'euclid':
return sqrt(sum((Cs - Co) ** 2 for Cs, Co in zip(self, other)))
elif method == 'cie1976':
return sqrt(sum((Cs - Co) ** 2 for Cs, Co in zip(self.cie_lab, other.cie_lab)))
elif method == 'cie1994':
raise NotImplementedError
elif method == 'cie2000':
raise NotImplementedError
elif method == 'cmc1984a':
raise NotImplementedError
elif method == 'cmc1984i':
raise NotImplementedError
else:
raise ValueError('invalid method: %s' % method)
|
|
"""
=============================
Generic SpectralModel wrapper
=============================
.. moduleauthor:: Adam Ginsburg <[email protected]>
"""
import numpy as np
from pyspeckit.mpfit import mpfit,mpfitException
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import copy
from astropy import log
import matplotlib.cbook as mpcb
import fitter
from . import mpfit_messages
from pyspeckit.specwarnings import warn
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
except ImportError:
warn( "OrderedDict is required for modeling. If you have python <2.7, install the ordereddict module." )
class SpectralModel(fitter.SimpleFitter):
"""
A wrapper class for a spectra model. Includes internal functions to
generate multi-component models, annotations, integrals, and individual
components. The declaration can be complex, since you should name
individual variables, set limits on them, set the units the fit will be
performed in, and set the annotations to be used. Check out some
of the hyperfine codes (hcn, n2hp) for examples.
"""
def __init__(self, modelfunc, npars,
shortvarnames=("A","\\Delta x","\\sigma"),
fitunits=None,
centroid_par=None,
fwhm_func=None,
fwhm_pars=None,
integral_func=None,
use_lmfit=False, **kwargs):
"""
Spectral Model Initialization
Create a Spectral Model class for data fitting
Parameters
----------
modelfunc : function
the model function to be fitted. Should take an X-axis
(spectroscopic axis) as an input followed by input parameters.
Returns an array with the same shape as the input X-axis
npars : int
number of parameters required by the model
parnames : list (optional)
a list or tuple of the parameter names
parvalues : list (optional)
the initial guesses for the input parameters (defaults to ZEROS)
parlimits : list (optional)
the upper/lower limits for each variable (defaults to ZEROS)
parfixed : list (optional)
Can declare any variables to be fixed (defaults to ZEROS)
parerror : list (optional)
technically an output parameter... hmm (defaults to ZEROS)
partied : list (optional)
not the past tense of party. Can declare, via text, that
some parameters are tied to each other. Defaults to zeros like the
others, but it's not clear if that's a sensible default
fitunits : str (optional)
convert X-axis to these units before passing to model
parsteps : list (optional)
minimum step size for each paremeter (defaults to ZEROS)
npeaks : list (optional)
default number of peaks to assume when fitting (can be overridden)
shortvarnames : list (optional)
TeX names of the variables to use when annotating
Returns
-------
A tuple containing (model best-fit parameters, the model, parameter
errors, chi^2 value)
"""
self.modelfunc = modelfunc
if self.__doc__ is None:
self.__doc__ = modelfunc.__doc__
elif modelfunc.__doc__ is not None:
self.__doc__ += modelfunc.__doc__
self.npars = npars
self.default_npars = npars
self.fitunits = fitunits
# this needs to be set once only
self.shortvarnames = shortvarnames
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
self.use_lmfit = use_lmfit
# default name of parameter that represents the profile centroid
self.centroid_par = centroid_par
# FWHM function and parameters
self.fwhm_func = fwhm_func
self.fwhm_pars = fwhm_pars
# analytic integral function
self.integral_func = integral_func
def __call__(self, *args, **kwargs):
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
return self.fitter(*args,**kwargs)
def make_parinfo(self, **kwargs):
return self._make_parinfo(**kwargs)[0]
def _make_parinfo(self, params=None, parnames=None, parvalues=None,
parlimits=None, parlimited=None, parfixed=None,
parerror=None, partied=None, fitunits=None,
parsteps=None, npeaks=1, parinfo=None, names=None,
values=None, limits=None, limited=None, fixed=None,
error=None, tied=None, steps=None, negamp=None,
limitedmin=None, limitedmax=None, minpars=None,
maxpars=None, vheight=False, debug=False, **kwargs):
"""
Generate a `ParinfoList` that matches the inputs
This code is complicated - it can take inputs in a variety of different
forms with different priority. It will return a `ParinfoList` (and
therefore must have values within parameter ranges)
"""
# for backwards compatibility - partied = tied, etc.
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",","):
shortvarname = varname.replace("par","")
if locals()[shortvarname] is not None:
# HACK! locals() failed for unclear reasons...
exec("%s = %s" % (varname,shortvarname))
if params is not None and parvalues is not None:
raise ValueError("parvalues and params both specified; they're redundant so that's not allowed.")
elif params is not None and parvalues is None:
parvalues = params
if parnames is not None:
self.parnames = parnames
elif parnames is None and self.parnames is not None:
parnames = self.parnames
elif self.default_parinfo is not None and parnames is None:
parnames = [p['parname'] for p in self.default_parinfo]
if limitedmin is not None:
if limitedmax is not None:
parlimited = zip(limitedmin,limitedmax)
else:
parlimited = zip(limitedmin,(False,)*len(parnames))
elif limitedmax is not None:
parlimited = zip((False,)*len(parnames),limitedmax)
elif self.default_parinfo is not None and parlimited is None:
parlimited = [p['limited'] for p in self.default_parinfo]
if minpars is not None:
if maxpars is not None:
parlimits = zip(minpars,maxpars)
else:
parlimits = zip(minpars,(False,)*len(parnames))
elif maxpars is not None:
parlimits = zip((False,)*len(parnames),maxpars)
elif self.default_parinfo is not None and parlimits is None:
parlimits = [p['limits'] for p in self.default_parinfo]
self.npeaks = npeaks
# the height / parvalue popping needs to be done before the temp_pardict is set in order to make sure
# that the height guess isn't assigned to the amplitude
self.vheight = vheight
if (vheight and len(self.parinfo) == self.default_npars and
len(parvalues) == self.default_npars + 1):
# if the right number of parameters are passed, the first is the height
self.parinfo = [ {'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
elif vheight and len(self.parinfo) == self.default_npars and len(parvalues) == self.default_npars:
# if you're one par short, guess zero
self.parinfo = [ {'n':0, 'value': 0, 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
elif vheight and len(self.parinfo) == self.default_npars+1 and len(parvalues) == self.default_npars+1:
# the right numbers are passed *AND* there is already a height param
self.parinfo = [ {'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
#heightparnum = (i for i,s in self.parinfo if 'HEIGHT' in s['parname'])
#for hpn in heightparnum:
# self.parinfo[hpn]['value'] = parvalues[0]
elif vheight:
raise ValueError('VHEIGHT is specified but a case was found that did not allow it to be included.')
else:
self.parinfo = []
if debug: log.debug("After VHEIGHT parse len(parinfo): %i vheight: %s" % (len(self.parinfo), vheight))
# this is a clever way to turn the parameter lists into a dict of lists
# clever = hard to read
temp_pardict = OrderedDict([(varname, np.zeros(self.npars*self.npeaks, dtype='bool'))
if locals()[varname] is None else (varname, list(locals()[varname]) )
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")])
temp_pardict['parlimits'] = parlimits if parlimits is not None else [(0,0)] * (self.npars*self.npeaks)
temp_pardict['parlimited'] = parlimited if parlimited is not None else [(False,False)] * (self.npars*self.npeaks)
for k,v in temp_pardict.iteritems():
if (self.npars*self.npeaks) / len(v) > 1:
temp_pardict[k] = list(v) * ((self.npars*self.npeaks) / len(v))
# generate the parinfo dict
# note that 'tied' must be a blank string (i.e. ""), not False, if it is not set
# parlimited, parfixed, and parlimits are all two-element items (tuples or lists)
self.parinfo += [ {'n':ii+self.npars*jj+vheight,
'value':float(temp_pardict['parvalues'][ii+self.npars*jj]),
'step':temp_pardict['parsteps'][ii+self.npars*jj],
'limits':temp_pardict['parlimits'][ii+self.npars*jj],
'limited':temp_pardict['parlimited'][ii+self.npars*jj],
'fixed':temp_pardict['parfixed'][ii+self.npars*jj],
'parname':temp_pardict['parnames'][ii].upper()+"%0i" % jj,
'error':float(temp_pardict['parerror'][ii+self.npars*jj]),
'tied':temp_pardict['partied'][ii+self.npars*jj] if temp_pardict['partied'][ii+self.npars*jj] else ""}
for jj in xrange(self.npeaks)
for ii in xrange(self.npars) ] # order matters!
if debug: log.debug("After Generation step len(parinfo): %i vheight: %s" % (len(self.parinfo), vheight))
if debug > True: import pdb; pdb.set_trace()
# special keyword to specify emission/absorption lines
if negamp is not None:
if negamp:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (p['limited'][0], True)
p['limits'] = (p['limits'][0], 0)
else:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (True, p['limited'][1])
p['limits'] = (0, p['limits'][1])
# This is effectively an override of all that junk above (3/11/2012)
# Much of it is probably unnecessary, but it was easier to do this than
# rewrite the above
self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# New feature: scaleability
for par in self.parinfo:
if par.parname.lower().strip('0123456789') in ('amplitude','amp'):
par.scaleable = True
return self.parinfo, kwargs
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Simple wrapper to deal with N independent peaks for a given spectral model
"""
if pars is None:
pars = self.parinfo
elif not isinstance(pars, ParinfoList):
try:
partemp = copy.copy(self.parinfo)
partemp._from_Parameters(pars)
pars = partemp
except AttributeError:
if debug:
log.debug("Reading pars as LMPar failed.")
if debug > 1:
import pdb; pdb.set_trace()
pass
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
else:
parvals = list(pars)
if debug:
log.debug("pars to n_modelfunc: {0}, parvals:{1}".format(pars, parvals))
def L(x):
v = np.zeros(len(x))
if self.vheight:
v += parvals[0]
# use len(pars) instead of self.npeaks because we want this to work
# independent of the current best fit
for jj in xrange((len(parvals)-self.vheight)/self.npars):
lower_parind = jj*self.npars+self.vheight
upper_parind = (jj+1)*self.npars+self.vheight
v += self.modelfunc(x, *parvals[lower_parind:upper_parind], **kwargs)
return v
return L
def mpfitfun(self,x,y,err=None):
"""
Wrapper function to compute the fit residuals in an mpfit-friendly format
"""
if err is None:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))
return [0,residuals]
else:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))/err
return [0,residuals]
return f
def lmfitfun(self,x,y,err=None,debug=False):
"""
Wrapper function to compute the fit residuals in an lmfit-friendly format
"""
def f(p):
#pars = [par.value for par in p.values()]
kwargs = {}
kwargs.update(self.modelfunc_kwargs)
if debug: log.debug("Pars, kwarg keys: {0},{1}".format(p,kwargs.keys()))
if err is None:
return (y-self.n_modelfunc(p,**kwargs)(x))
else:
return (y-self.n_modelfunc(p,**kwargs)(x))/err
return f
def lmfitter(self, xax, data, err=None, parinfo=None, quiet=True, debug=False, **kwargs):
"""
Use lmfit instead of mpfit to do the fitting
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
If false, print out some messages about the fitting
"""
try:
import lmfit
except ImportError as e:
raise ImportError( "Could not import lmfit, try using mpfit instead." )
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'convert_to_unit') and self.fitunits is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
xax.convert_to_unit(self.fitunits, quiet=quiet)
elif self.fitunits is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
if debug:
log.debug(parinfo)
LMParams = parinfo.as_Parameters()
if debug:
log.debug("LMParams: "+"\n".join([repr(p) for p in LMParams.values()]))
log.debug("parinfo: {0}".format(parinfo))
minimizer = lmfit.minimize(self.lmfitfun(xax,np.array(data),err,debug=debug),LMParams,**kwargs)
if not quiet:
log.info("There were %i function evaluations" % (minimizer.nfev))
#modelpars = [p.value for p in parinfo.values()]
#modelerrs = [p.stderr for p in parinfo.values() if p.stderr is not None else 0]
self.LMParams = LMParams
self.parinfo._from_Parameters(LMParams)
if debug:
log.debug(LMParams)
log.debug(parinfo)
self.mp = minimizer
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
modelkwargs = {}
modelkwargs.update(self.modelfunc_kwargs)
self.model = self.n_modelfunc(self.parinfo, **modelkwargs)(xax)
if hasattr(minimizer,'chisqr'):
chi2 = minimizer.chisqr
else:
try:
chi2 = (((data-self.model)/err)**2).sum()
except TypeError:
chi2 = ((data-self.model)**2).sum()
if np.isnan(chi2):
warn( "Warning: chi^2 is nan" )
if hasattr(self.mp,'ier') and self.mp.ier not in [1,2,3,4]:
log.warning("Fitter failed: %s, %s" % (self.mp.message, self.mp.lmdif_message))
return self.mpp,self.model,self.mpperr,chi2
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, **kwargs):
"""
Run the fitter using mpfit.
kwargs will be passed to _make_parinfo and mpfit.
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
pass to mpfit. If False, will print out the parameter values for
each iteration of the fitter
veryverbose : bool
print out a variety of mpfit output parameters
debug : bool
raise an exception (rather than a warning) if chi^2 is nan
"""
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
else:
if debug: log.debug("Using user-specified parinfo dict")
# clean out disallowed kwargs (don't want to pass them to mpfit)
#throwaway, kwargs = self._make_parinfo(debug=debug, **kwargs)
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'as_unit') and self.fitunits is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
# xax.convert_to_unit(self.fitunits, quiet=quiet)
xax = xax.as_unit(self.fitunits, quiet=quiet, **kwargs)
elif self.fitunits is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
if debug:
for p in parinfo: log.debug( p )
log.debug( "\n".join(["%s %i: tied: %s value: %s" % (p['parname'],p['n'],p['tied'],p['value']) for p in parinfo]) )
mp = mpfit(self.mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,**kwargs)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
if "parameters are not within PARINFO limits" in mp.errmsg:
log.warn( parinfo )
raise mpfitException(mp.errmsg)
for i,(p,e) in enumerate(zip(mpp,mpperr)):
self.parinfo[i]['value'] = p
self.parinfo[i]['error'] = e
if veryverbose:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
for i,p in enumerate(mpp):
log.info("{0}: {1} +/- {2}".format(self.parinfo[i]['parname'],
p,mpperr[i]))
log.info("Chi2: {0} Reduced Chi2: {1} DOF:{2}".format(mp.fnorm,
mp.fnorm/(len(data)-len(mpp)),
len(data)-len(mpp)))
self.mp = mp
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_modelfunc(self.parinfo,**self.modelfunc_kwargs)(xax)
if debug:
log.debug("Modelpars: {0}".format(self.mpp))
if np.isnan(chi2):
if debug:
raise ValueError("Error: chi^2 is nan")
else:
log.warn("Warning: chi^2 is nan")
return mpp,self.model,mpperr,chi2
def slope(self, xinp):
"""
Find the local slope of the model at location x
(x must be in xax's units)
"""
if hasattr(self, 'model'):
dm = np.diff(self.model)
# convert requested x to pixels
xpix = self.xax.x_to_pix(xinp)
dmx = np.average(dm[xpix-1:xpix+1])
if np.isfinite(dmx):
return dmx
else:
return 0
def annotations(self, shortvarnames=None, debug=False):
"""
Return a list of TeX-formatted labels
The values and errors are formatted so that only the significant digits
are displayed. Rounding is performed using the decimal package.
Parameters
----------
shortvarnames : list
A list of variable names (tex is allowed) to include in the
annotations. Defaults to self.shortvarnames
Examples
--------
>>> # Annotate a Gaussian
>>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
"""
from decimal import Decimal # for formatting
svn = self.shortvarnames if shortvarnames is None else shortvarnames
# if pars need to be replicated....
if len(svn) < self.npeaks*self.npars:
svn = svn * self.npeaks
parvals = self.parinfo.values
parerrs = self.parinfo.errors
loop_list = [(parvals[ii+jj*self.npars+self.vheight],
parerrs[ii+jj*self.npars+self.vheight],
svn[ii+jj*self.npars],
self.parinfo.fixed[ii+jj*self.npars+self.vheight],
jj)
for jj in range(self.npeaks) for ii in range(self.npars)]
label_list = []
for (value, error, varname, fixed, varnumber) in loop_list:
if debug: log.debug("".join(value, error, varname, fixed, varnumber))
if fixed or error==0:
label = ("$%s(%i)$=%8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.6g" % (value)) )))
else:
label = ("$%s(%i)$=%8s $\\pm$ %8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.2g" % (min(np.abs([value,error])))) ),
Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),))
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self, xarr, pars, **kwargs):
"""
Return a numpy ndarray of shape [npeaks x modelshape] of the
independent components of the fits
"""
modelcomponents = np.array(
[self.modelfunc(xarr,
*pars[i*self.npars:(i+1)*self.npars],
**dict(self.modelfunc_kwargs.items()+kwargs.items()))
for i in range(self.npeaks)])
if len(modelcomponents.shape) == 3:
newshape = [modelcomponents.shape[0]*modelcomponents.shape[1], modelcomponents.shape[2]]
modelcomponents = np.reshape(modelcomponents, newshape)
return modelcomponents
def integral(self, modelpars, dx=None, **kwargs):
"""
Extremely simple integrator:
IGNORES modelpars;
just sums self.model
"""
if dx is not None:
return (self.model*dx).sum()
else:
return self.model.sum()
def analytic_integral(self, modelpars=None, npeaks=None, npars=None):
"""
Placeholder for analyic integrals; these must be defined for individual models
"""
if self.integral_func is None:
raise NotImplementedError("Analytic integrals must be implemented independently for each model type")
# all of these parameters are allowed to be overwritten
if modelpars is None:
modelpars = self.parinfo.values
if npeaks is None:
npeaks = self.npeaks
if npars is None:
npars = self.npars
return np.sum([
self.integral_func(modelpars[npars*ii:npars*(1+ii)])
for ii in xrange(npeaks)])
def component_integrals(self, xarr, dx=None):
"""
Compute the integrals of each component
"""
components = self.components(xarr, self.parinfo.values)
if dx is None:
dx = 1
integrals = [com.sum()*dx for com in components]
return integrals
def analytic_fwhm(self, parinfo=None):
"""
Return the FWHMa of the model components *if* a fwhm_func has been
defined
Done with incomprehensible list comprehensions instead of nested for
loops... readability sacrificed for speed and simplicity. This is
unpythonic.
"""
if self.fwhm_func is None and self.fwhm_pars is None:
raise TypeError("fwhm_func not implemented for model %s" % self.__name__)
if parinfo is None:
parinfo = self.parinfo
fwhm = [self.fwhm_func(
*[self.parinfo[str.upper(p+'%i' % n)] for p in self.fwhm_pars]
)
for n in xrange(self.npeaks)]
return fwhm
def analytic_centroids(self, centroidpar=None):
"""
Return the *analytic* centroids of the model components
Parameters
----------
centroidpar : None or string
The name of the parameter in the fit that represents the centroid
*some models have default centroid parameters - these will be used
if centroidpar is unspecified*
Returns
-------
List of the centroid values (even if there's only 1)
"""
if centroidpar is None:
centroidpar = self.centroid_par
centr = [par.value
for par in self.parinfo
if str.upper(centroidpar) in par.parname]
return centr
def computed_centroid(self, xarr=None):
"""
Return the *computed* centroid of the model
Parameters
----------
xarr : None or np.ndarray
The X coordinates of the model over which the centroid should be
computed. If unspecified, the centroid will be in pixel units
"""
if xarr is None:
xarr = np.arange(self.model.size)
centr = (self.model*xarr).sum() / self.model.sum()
return centr
def logp(self, xarr, data, error, pars=None):
"""
Return the log probability of the model. If the parameter is out of
range, return -inf
"""
if pars is None:
pars = self.parinfo
else:
parinfo = copy.copy(self.parinfo)
for value,parameter in zip(pars,parinfo):
try:
parameter.value = value
except ValueError:
return -np.inf
model = self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
difference = np.abs(data-model)
# prob = 1/(2*np.pi)**0.5/error * exp(-difference**2/(2.*error**2))
#logprob = np.log(1./(2.*np.pi)**0.5/error) * (-difference**2/(2.*error**2))
logprob = (-difference**2/(2.*error**2))
totallogprob = np.sum(logprob)
return totallogprob
def get_emcee_sampler(self, xarr, data, error, **kwargs):
"""
Get an emcee walker for the data & model
Parameters
----------
xarr : pyspeckit.units.SpectroscopicAxis
data : np.ndarray
error : np.ndarray
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_sampler = sp.specfit.fitter.get_emcee_sampler(sp.xarr, sp.data, sp.error)
>>> p0 = sp.specfit.parinfo
>>> emcee_sampler.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
raise NotImplementedError("emcee's metropolis-hastings sampler is not implemented; use pymc")
sampler = emcee.MHSampler(self.npars*self.npeaks+self.vheight, probfunc, **kwargs)
return sampler
def get_emcee_ensemblesampler(self, xarr, data, error, nwalkers, **kwargs):
"""
Get an emcee walker ensemble for the data & model
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> nwalkers = sp.specfit.fitter.npars * 2
>>> emcee_ensemble = sp.specfit.fitter.get_emcee_ensemblesampler(sp.xarr, sp.data, sp.error, nwalkers)
>>> p0 = np.array([sp.specfit.parinfo.values] * nwalkers)
>>> p0 *= np.random.randn(*p0.shape) / 10. + 1.0
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
sampler = emcee.EnsembleSampler(nwalkers,
self.npars*self.npeaks+self.vheight,
probfunc, **kwargs)
return sampler
def get_pymc(self, xarr, data, error, use_fitted_values=False, inf=np.inf,
use_adaptive=False, return_dict=False, **kwargs):
"""
Create a pymc MCMC sampler. Defaults to 'uninformative' priors
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error)
>>> MCwithpriors = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error, use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
old_errsettings = np.geterr()
try:
import pymc
finally:
# pymc breaks error settings
np.seterr(**old_errsettings)
#def lowerlimit_like(x,lolim):
# "lower limit (log likelihood - set very positive for unacceptable values)"
# return (x>=lolim) / 1e10
#def upperlimit_like(x,uplim):
# "upper limit"
# return (x<=uplim) / 1e10
#LoLim = pymc.distributions.stochastic_from_dist('lolim', logp=lowerlimit_like, dtype=np.float, mv=False)
#UpLim = pymc.distributions.stochastic_from_dist('uplim', logp=upperlimit_like, dtype=np.float, mv=False)
funcdict = {}
# very, very worrisome: pymc changes the values of parinfo
parcopy = copy.deepcopy(self.parinfo)
for par in parcopy:
lolim = par.limits[0] if par.limited[0] else -inf
uplim = par.limits[1] if par.limited[1] else inf
if par.fixed:
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, par.value, par.value, value=par.value)
elif use_fitted_values:
if par.error > 0:
if any(par.limited):
try:
funcdict[par.parname] = pymc.distributions.TruncatedNormal(par.parname, par.value, 1./par.error**2, lolim, uplim)
except AttributeError:
# old versions used this?
funcdict[par.parname] = pymc.distributions.TruncNorm(par.parname, par.value, 1./par.error**2, lolim, uplim)
else:
funcdict[par.parname] = pymc.distributions.Normal(par.parname, par.value, 1./par.error**2)
else:
if any(par.limited):
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lolim, uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
elif any(par.limited):
lolim = par.limits[0] if par.limited[0] else -1e10
uplim = par.limits[1] if par.limited[1] else 1e10
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lower=lolim, upper=uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
d = dict(funcdict)
def modelfunc(xarr, pars=parcopy, **kwargs):
for k,v in kwargs.iteritems():
if k in pars.keys():
pars[k].value = v
return self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
funcdict['xarr'] = xarr
funcdet=pymc.Deterministic(name='f',eval=modelfunc,parents=funcdict,doc="The model function")
d['f'] = funcdet
datamodel = pymc.distributions.Normal('data',mu=funcdet,tau=1/np.asarray(error)**2,observed=True,value=np.asarray(data))
d['data']=datamodel
if return_dict:
return d
mc = pymc.MCMC(d)
if use_adaptive:
mc.use_step_method(pymc.AdaptiveMetropolis,[d[p] for p in self.parinfo.names])
return mc
class AstropyModel(SpectralModel):
def __init__(self, model, shortvarnames=None, **kwargs):
"""
Override the SpectralModel initialization
"""
if hasattr(self,__doc__): # how do you extend a docstring really?
self.__doc__ += SpectralModel.__doc__
if shortvarnames is None:
shortvarnames = model.param_names
super(AstropyModel,self).__init__(model, len(model.parameters),
shortvarnames=shortvarnames,
model=model,
**kwargs)
self.mp = None
self.vheight = False
self.npeaks = 1
def _make_parinfo(self, model=None):
self.parinfo = ParinfoList([
Parinfo(parname=name,value=value)
for name,value in zip(model.param_names,model.parameters)])
return self.parinfo, {}
def _parse_parinfo(self, parinfo):
"""
Parse a ParinfoList into astropy.models parameters
"""
if len(parinfo) > self.npars:
if len(parinfo) % self.npars != 0:
raise ValueError("Need to have an integer number of models")
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, params=None, npeaks=None, **kwargs):
import astropy.models as models
if npeaks is not None and npeaks > 1:
raise NotImplementedError("Astropy models cannot be used to fit multiple peaks yet")
if parinfo is not None:
self._parse_parinfo(parinfo)
if params is not None:
self.modelfunc.parameters = params
self.astropy_fitter = models.fitting.NonLinearLSQFitter(self.modelfunc)
if err is None:
self.astropy_fitter(xax, data, **kwargs)
else:
self.astropy_fitter(xax, data, weights=1./err**2, **kwargs)
mpp = self.astropy_fitter.fitpars
cov = self.astropy_fitter.covar
if cov is None:
mpperr = np.zeros(len(mpp))
else:
mpperr = cov.diagonal()
self.model = self.astropy_fitter.model(xax)
if err is None:
chi2 = ((data-self.model)**2).sum()
else:
chi2 = ((data-self.model)**2/err**2).sum()
# update object paramters
self.modelfunc.parameters = mpp
self._make_parinfo(self.modelfunc)
return mpp,self.model,mpperr,chi2
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Only deals with single-peak functions
"""
try:
self._parse_parinfo(pars)
except AttributeError:
self.modelfunc.parameters = pars
return self.modelfunc
|
|
"""A script testing the extraction pipeline of RHEA
Steps
1) Initialise Format, Extractor and RadialVelocity
2) Define file paths for science, flat and dark frames
3) Extract/import spectra
4) Create/import reference spectra
5) Calculate radial velocities
6) Plot radial velocities
"""
import numpy as np
try:
import pyfits
except:
import astropy.io.fits as pyfits
import pymfe
import glob
from astropy.time import Time
import astropy.coordinates as coordinates
from astropy.coordinates import SkyCoord
from astropy import units as u
import PyAstronomy.pyasl as pyasl
import pdb
#===============================================================================
# Parameters/Constants/Variables/Initialisation
#===============================================================================
# Constants/Variables
do_bcor = False
med_cut = 0.6
plot_title = "gammaCrucis"
coord = SkyCoord('01 44 04.08338 -15 56 14.9262',unit=(u.hourangle, u.deg))
# Initialise objects
rhea2_format = pymfe.rhea.Format()
#rhea2_format.fib_image_width_in_pix = 7.0 #Attempted over-write as a test
rhea2_extract = pymfe.Extractor(rhea2_format, transpose_data=False)
xx, wave, blaze = rhea2_format.spectral_format()
rv = pymfe.rv.RadialVelocity()
#Q-factor test showed that with 10 orders, we should be getting 4m/s rms per frame
# 3e8/5e3/np.sqrt(4e4*0.3*2000*10)
# c/(Q*sqrt(Ncounts*ccdgain*npix_per_order*norders)
#dd = pyfits.getdata(files[0])
#plt.imshow(dd.T, aspect='auto',cmap=cm.gray,interpolation='nearest')
#plt.plot(xx.T + dd.shape[1]/2)
#===============================================================================
# File paths (Observations, Flats and Darks, save/load directories)
#===============================================================================
# Science Frames
#star = "gammaCrucis"
#star = "tauCeti"
star = "thar"
#star = "sun"
base_path = "/priv/mulga1/jbento/rhea2_data/20160221_sun/"
files = glob.glob(base_path + "*" + star + "*[0123456789].fit*") #FIT for non-Th/Ar
# Flats and Darks
#star_dark = pyfits.getdata(base_path + "Dark frames\\Masterdark_target.fit")
star_dark = pyfits.getdata(base_path + "20151130_Masterdark_thar.fit")
#flat_dark = pyfits.getdata(base_path + "Dark frames\\Masterdark_flat.fit")
flat_files = [base_path + "20151130_Masterflat_calibrated.fit"]*len(files)
files.sort()
# Remove bad section... only for sun #!!!MJI: Dodgy. Why remove for raw *and* extracted files?
#files.pop(912)
#files.pop(912)
#files.pop(912)
print len(files)
# Set to len(0) arrays when extracting ThAr
#star_dark = np.empty(0)
flat_dark = np.empty(0)
flat_files = np.empty(0)
# Extracted spectra output
out_path = "/priv/mulga1/mireland/rhea/Solar_Extracted/"
extracted_files = glob.glob(out_path + "*" + star + "*[0123456789]_extracted.fits")
extracted_files.sort()
#extracted_files.pop(912)
#extracted_files.pop(912)
#extracted_files.pop(912)
print len(extracted_files)
# Saved reference spectrum
ref_path = out_path + "reference_spectrum_74gammaCrucis.fits"
ref_path = out_path + "reference_spectrum_17_thar.fits"
# RV csv output
base_rv_path = out_path + star
#===============================================================================
# Extract and save spectra
#===============================================================================
# Extract spectra
#fluxes, vars, bcors, mjds = rv.extract_spectra(files, rhea2_extract,
# star_dark=star_dark,
# flat_files=flat_files,
# flat_dark=flat_dark,
# coord=coord, do_bcor=do_bcor)
# Save spectra (Make sure to save "wave" generated from rhea2_format)
#rv.save_fluxes(files, fluxes, vars, bcors, wave, mjds, out_path)
#===============================================================================
# Create and save/import reference spectrum
#===============================================================================
# OPTION 1: Create and save a new reference spectrum
# Load the first 10 observations to use as a reference
#!!!MJI Dodgy thing 1: to hack the extracted_files used, there are TWO
# locations below to hack it.
#fluxes, vars, wave, bcors, mjds = rv.load_fluxes(extracted_files)
#wave_ref, ref_spect = rv.create_ref_spect(wave, fluxes, vars, bcors,
# med_cut=med_cut,gauss_sdev=1.0)
#rv.save_ref_spect(extracted_files, ref_spect, vars, wave_ref, bcors, mjds,
# out_path, star)
# OPTION 2: Import a pre-existing reference spectrum
ref_spect, vars_ref, wave_ref, bcors_ref, mjds_ref = rv.load_ref_spect(ref_path)
#===============================================================================
# Barycentrically correct based on the sun's location from moment to moment
#===============================================================================
# This loop is messy and there is probably a nicer way to do this...but it works
# The Linux servers are not happy with opening much more than 100 files,
# crashing and displaying a too many files warning. This is despite each .fits
# file being closed when the data have been loaded from it. A similar issue does
# not occur when initially extracting the files (975 were extracted in one go
# with no issues).
# Parameters to process files in batches of "increment"
num_files = len(extracted_files)
num_rvs_extracted = 0
increment = 100
low = 0
high = increment
all_rvs_calculated = False
# Will be concatenated at end to give final arrays
rv_list = []
rv_sig_list = []
bcors_list = []
mjds_list = []
# Obviously cannot open more files than exist
if high > num_files:
high = num_files
while not all_rvs_calculated:
num_rvs_extracted += high - low
# Load in a segment of files
fluxes, vars, wave, bcors, mjds = rv.load_fluxes(extracted_files[low:high])
# bcors = []
# Calculate the barycentric correction for each observation, based on the
# instantaneous position of the sun
# for t in mjds:
# time = Time(t, format="mjd")
# coord = SkyCoord(coordinates.get_sun(time))
# location = location=('151.2094','-33.865',100.0)
#
# bcors.append(1e3*pyasl.helcorr(float(location[0]), float(location[1]),
# location[2], coord.ra.deg, coord.dec.deg, time.jd)[0] )
nf = fluxes.shape[0]
nm = fluxes.shape[1]
ny = fluxes.shape[2]
# Calculate the RVs
rvs, rv_sigs, fitted_spects = rv.calculate_rv_shift(wave_ref, ref_spect, fluxes, vars,
bcors, wave, return_fitted_spects=True, bad_threshold=20)
rv_list.append(rvs)
rv_sig_list.append(rv_sigs)
bcors_list.append(bcors)
mjds_list.append(mjds)
# Move to next segment
low += increment
high += increment
if high > num_files:
high = num_files
if num_rvs_extracted == num_files:
all_rvs_calculated = True
# Done, join together and save
all_rvs = np.concatenate(rv_list)
all_rv_sigs = np.concatenate(rv_sig_list)
all_bcors = np.concatenate(bcors_list)
all_mjds = np.concatenate(mjds_list)
#===============================================================================
# Save the extracted radial velocities
#===============================================================================
# Save RVs
bcor_rvs = all_rvs - all_bcors.repeat(nm).reshape( (num_files,nm) )
rv.save_rvs(all_rvs, all_rv_sigs, all_bcors, all_mjds, bcor_rvs, base_rv_path)
#Some plotting code...
#plt.plot(wave[13], fluxes[0][13])
#plt.plot(wave[13], fitted_spects[0][13])
#plt.xlabel('Wavelength')
#plt.ylabel('Flux')
|
|
import hashlib
import warnings
import logging
import io
import unittest
import ssl
import socket
from itertools import chain
from mock import patch, Mock
from urllib3 import add_stderr_logger, disable_warnings
from urllib3.util.request import make_headers, rewind_body, _FAILEDTELL
from urllib3.util.retry import Retry
from urllib3.util.timeout import Timeout
from urllib3.util.url import (
get_host,
parse_url,
split_first,
Url,
)
from urllib3.util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
_const_compare_digest_backport,
)
from urllib3.exceptions import (
LocationParseError,
TimeoutStateError,
InsecureRequestWarning,
SNIMissingWarning,
InvalidHeader,
UnrewindableBodyError,
)
from urllib3.util.connection import (
allowed_gai_family,
_has_ipv6
)
from urllib3.util import is_fp_closed, ssl_
from urllib3.packages import six
from . import clear_warnings
# This number represents a time in seconds, it doesn't mean anything in
# isolation. Setting to a high-ish value to avoid conflicts with the smaller
# numbers used for timeouts
TIMEOUT_EPOCH = 1000
class TestUtil(unittest.TestCase):
def test_get_host(self):
url_host_map = {
# Hosts
'http://google.com/mail': ('http', 'google.com', None),
'http://google.com/mail/': ('http', 'google.com', None),
'google.com/mail': ('http', 'google.com', None),
'http://google.com/': ('http', 'google.com', None),
'http://google.com': ('http', 'google.com', None),
'http://www.google.com': ('http', 'www.google.com', None),
'http://mail.google.com': ('http', 'mail.google.com', None),
'http://google.com:8000/mail/': ('http', 'google.com', 8000),
'http://google.com:8000': ('http', 'google.com', 8000),
'https://google.com': ('https', 'google.com', None),
'https://google.com:8000': ('https', 'google.com', 8000),
'http://user:[email protected]:1234': ('http', '127.0.0.1', 1234),
'http://google.com/foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com?foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com#foo=http://bar:42/baz': ('http', 'google.com', None),
# IPv4
'173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7/test': ('http', '173.194.35.7', None),
'http://173.194.35.7:80': ('http', '173.194.35.7', 80),
'http://173.194.35.7:80/test': ('http', '173.194.35.7', 80),
# IPv6
'[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]/test': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]:80': ('http', '[2a00:1450:4001:c01::67]', 80),
'http://[2a00:1450:4001:c01::67]:80/test': ('http', '[2a00:1450:4001:c01::67]', 80),
# More IPv6 from http://www.ietf.org/rfc/rfc2732.txt
'http://[fedc:ba98:7654:3210:fedc:ba98:7654:3210]:8000/index.html': (
'http', '[fedc:ba98:7654:3210:fedc:ba98:7654:3210]', 8000),
'http://[1080:0:0:0:8:800:200c:417a]/index.html': (
'http', '[1080:0:0:0:8:800:200c:417a]', None),
'http://[3ffe:2a00:100:7031::1]': ('http', '[3ffe:2a00:100:7031::1]', None),
'http://[1080::8:800:200c:417a]/foo': ('http', '[1080::8:800:200c:417a]', None),
'http://[::192.9.5.5]/ipng': ('http', '[::192.9.5.5]', None),
'http://[::ffff:129.144.52.38]:42/index.html': ('http', '[::ffff:129.144.52.38]', 42),
'http://[2010:836b:4179::836b:4179]': ('http', '[2010:836b:4179::836b:4179]', None),
}
for url, expected_host in url_host_map.items():
returned_host = get_host(url)
self.assertEqual(returned_host, expected_host)
def test_invalid_host(self):
# TODO: Add more tests
invalid_host = [
'http://google.com:foo',
'http://::1/',
'http://::1:80/',
'http://google.com:-80',
six.u('http://google.com:\xb2\xb2'), # \xb2 = ^2
]
for location in invalid_host:
self.assertRaises(LocationParseError, get_host, location)
def test_host_normalization(self):
"""
Asserts the scheme and hosts with a normalizable scheme are
converted to lower-case.
"""
url_host_map = {
# Hosts
'HTTP://GOOGLE.COM/mail/': ('http', 'google.com', None),
'GOogle.COM/mail': ('http', 'google.com', None),
'HTTP://GoOgLe.CoM:8000/mail/': ('http', 'google.com', 8000),
'HTTP://user:[email protected]:1234': ('http', 'example.com', 1234),
'173.194.35.7': ('http', '173.194.35.7', None),
'HTTP://173.194.35.7': ('http', '173.194.35.7', None),
'HTTP://[2a00:1450:4001:c01::67]:80/test': ('http', '[2a00:1450:4001:c01::67]', 80),
'HTTP://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8000/index.html': (
'http', '[fedc:ba98:7654:3210:fedc:ba98:7654:3210]', 8000),
'HTTPS://[1080:0:0:0:8:800:200c:417A]/index.html': (
'https', '[1080:0:0:0:8:800:200c:417a]', None),
'abOut://eXamPlE.com?info=1': ('about', 'eXamPlE.com', None),
'http+UNIX://%2fvar%2frun%2fSOCKET/path': (
'http+unix', '%2fvar%2frun%2fSOCKET', None),
}
for url, expected_host in url_host_map.items():
returned_host = get_host(url)
self.assertEqual(returned_host, expected_host)
def test_parse_url_normalization(self):
"""Assert parse_url normalizes the scheme/host, and only the scheme/host"""
test_urls = [
('HTTP://GOOGLE.COM/MAIL/', 'http://google.com/MAIL/'),
('HTTP://JeremyCline:[email protected]:8080/',
'http://JeremyCline:[email protected]:8080/'),
('HTTPS://Example.Com/?Key=Value', 'https://example.com/?Key=Value'),
('Https://Example.Com/#Fragment', 'https://example.com/#Fragment'),
]
for url, expected_normalized_url in test_urls:
actual_normalized_url = parse_url(url).url
self.assertEqual(actual_normalized_url, expected_normalized_url)
parse_url_host_map = [
('http://google.com/mail', Url('http', host='google.com', path='/mail')),
('http://google.com/mail/', Url('http', host='google.com', path='/mail/')),
('http://google.com/mail', Url('http', host='google.com', path='mail')),
('google.com/mail', Url(host='google.com', path='/mail')),
('http://google.com/', Url('http', host='google.com', path='/')),
('http://google.com', Url('http', host='google.com')),
('http://google.com?foo', Url('http', host='google.com', path='', query='foo')),
# Path/query/fragment
('', Url()),
('/', Url(path='/')),
('#?/!google.com/?foo#bar', Url(path='', fragment='?/!google.com/?foo#bar')),
('/foo', Url(path='/foo')),
('/foo?bar=baz', Url(path='/foo', query='bar=baz')),
('/foo?bar=baz#banana?apple/orange', Url(path='/foo',
query='bar=baz',
fragment='banana?apple/orange')),
# Port
('http://google.com/', Url('http', host='google.com', path='/')),
('http://google.com:80/', Url('http', host='google.com', port=80, path='/')),
('http://google.com:80', Url('http', host='google.com', port=80)),
# Auth
('http://foo:bar@localhost/', Url('http', auth='foo:bar', host='localhost', path='/')),
('http://foo@localhost/', Url('http', auth='foo', host='localhost', path='/')),
('http://foo:bar@baz@localhost/', Url('http',
auth='foo:bar@baz',
host='localhost',
path='/')),
('http://@', Url('http', host=None, auth=''))
]
non_round_tripping_parse_url_host_map = {
# Path/query/fragment
'?': Url(path='', query=''),
'#': Url(path='', fragment=''),
# Empty Port
'http://google.com:': Url('http', host='google.com'),
'http://google.com:/': Url('http', host='google.com', path='/'),
}
def test_parse_url(self):
for url, expected_Url in chain(self.parse_url_host_map,
self.non_round_tripping_parse_url_host_map.items()):
returned_Url = parse_url(url)
self.assertEqual(returned_Url, expected_Url)
def test_unparse_url(self):
for url, expected_Url in self.parse_url_host_map:
self.assertEqual(url, expected_Url.url)
def test_parse_url_invalid_IPv6(self):
self.assertRaises(ValueError, parse_url, '[::1')
def test_Url_str(self):
U = Url('http', host='google.com')
self.assertEqual(str(U), U.url)
def test_request_uri(self):
url_host_map = {
'http://google.com/mail': '/mail',
'http://google.com/mail/': '/mail/',
'http://google.com/': '/',
'http://google.com': '/',
'': '/',
'/': '/',
'?': '/?',
'#': '/',
'/foo?bar=baz': '/foo?bar=baz',
}
for url, expected_request_uri in url_host_map.items():
returned_url = parse_url(url)
self.assertEqual(returned_url.request_uri, expected_request_uri)
def test_netloc(self):
url_netloc_map = {
'http://google.com/mail': 'google.com',
'http://google.com:80/mail': 'google.com:80',
'google.com/foobar': 'google.com',
'google.com:12345': 'google.com:12345',
}
for url, expected_netloc in url_netloc_map.items():
self.assertEqual(parse_url(url).netloc, expected_netloc)
def test_make_headers(self):
self.assertEqual(
make_headers(accept_encoding=True),
{'accept-encoding': 'gzip,deflate'})
self.assertEqual(
make_headers(accept_encoding='foo,bar'),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=['foo', 'bar']),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=True, user_agent='banana'),
{'accept-encoding': 'gzip,deflate', 'user-agent': 'banana'})
self.assertEqual(
make_headers(user_agent='banana'),
{'user-agent': 'banana'})
self.assertEqual(
make_headers(keep_alive=True),
{'connection': 'keep-alive'})
self.assertEqual(
make_headers(basic_auth='foo:bar'),
{'authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(proxy_basic_auth='foo:bar'),
{'proxy-authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(disable_cache=True),
{'cache-control': 'no-cache'})
def test_rewind_body(self):
body = io.BytesIO(b'test data')
self.assertEqual(body.read(), b'test data')
# Assert the file object has been consumed
self.assertEqual(body.read(), b'')
# Rewind it back to just be b'data'
rewind_body(body, 5)
self.assertEqual(body.read(), b'data')
def test_rewind_body_failed_tell(self):
body = io.BytesIO(b'test data')
body.read() # Consume body
# Simulate failed tell()
body_pos = _FAILEDTELL
self.assertRaises(UnrewindableBodyError, rewind_body, body, body_pos)
def test_rewind_body_bad_position(self):
body = io.BytesIO(b'test data')
body.read() # Consume body
# Pass non-integer position
self.assertRaises(ValueError, rewind_body, body, None)
self.assertRaises(ValueError, rewind_body, body, object())
def test_rewind_body_failed_seek(self):
class BadSeek():
def seek(self, pos, offset=0):
raise IOError
self.assertRaises(UnrewindableBodyError, rewind_body, BadSeek(), 2)
def test_split_first(self):
test_cases = {
('abcd', 'b'): ('a', 'cd', 'b'),
('abcd', 'cb'): ('a', 'cd', 'b'),
('abcd', ''): ('abcd', '', None),
('abcd', 'a'): ('', 'bcd', 'a'),
('abcd', 'ab'): ('', 'bcd', 'a'),
}
for input, expected in test_cases.items():
output = split_first(*input)
self.assertEqual(output, expected)
def test_add_stderr_logger(self):
handler = add_stderr_logger(level=logging.INFO) # Don't actually print debug
logger = logging.getLogger('urllib3')
self.assertTrue(handler in logger.handlers)
logger.debug('Testing add_stderr_logger')
logger.removeHandler(handler)
def test_disable_warnings(self):
with warnings.catch_warnings(record=True) as w:
clear_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
disable_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
def _make_time_pass(self, seconds, timeout, time_mock):
""" Make some time pass for the timeout object """
time_mock.return_value = TIMEOUT_EPOCH
timeout.start_connect()
time_mock.return_value = TIMEOUT_EPOCH + seconds
return timeout
def test_invalid_timeouts(self):
try:
Timeout(total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(connect=2, total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(read=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(connect=False)
self.fail("boolean values should throw exception")
except ValueError as e:
self.assertTrue('cannot be a boolean' in str(e))
try:
Timeout(read=True)
self.fail("boolean values should throw exception")
except ValueError as e:
self.assertTrue('cannot be a boolean' in str(e))
try:
Timeout(connect=0)
self.fail("value <= 0 should throw exception")
except ValueError as e:
self.assertTrue('less than or equal' in str(e))
try:
Timeout(read="foo")
self.fail("string value should not be allowed")
except ValueError as e:
self.assertTrue('int, float or None' in str(e))
@patch('urllib3.util.timeout.current_time')
def test_timeout(self, current_time):
timeout = Timeout(total=3)
# make 'no time' elapse
timeout = self._make_time_pass(seconds=0, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 3)
self.assertEqual(timeout.connect_timeout, 3)
timeout = Timeout(total=3, connect=2)
self.assertEqual(timeout.connect_timeout, 2)
timeout = Timeout()
self.assertEqual(timeout.connect_timeout, Timeout.DEFAULT_TIMEOUT)
# Connect takes 5 seconds, leaving 5 seconds for read
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=5, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 5)
# Connect takes 2 seconds, read timeout still 7 seconds
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=2, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=10, read=7)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=None, read=None, connect=None)
self.assertEqual(timeout.connect_timeout, None)
self.assertEqual(timeout.read_timeout, None)
self.assertEqual(timeout.total, None)
timeout = Timeout(5)
self.assertEqual(timeout.total, 5)
def test_timeout_str(self):
timeout = Timeout(connect=1, read=2, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=2, total=3)")
timeout = Timeout(connect=1, read=None, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=None, total=3)")
@patch('urllib3.util.timeout.current_time')
def test_timeout_elapsed(self, current_time):
current_time.return_value = TIMEOUT_EPOCH
timeout = Timeout(total=3)
self.assertRaises(TimeoutStateError, timeout.get_connect_duration)
timeout.start_connect()
self.assertRaises(TimeoutStateError, timeout.start_connect)
current_time.return_value = TIMEOUT_EPOCH + 2
self.assertEqual(timeout.get_connect_duration(), 2)
current_time.return_value = TIMEOUT_EPOCH + 37
self.assertEqual(timeout.get_connect_duration(), 37)
def test_resolve_cert_reqs(self):
self.assertEqual(resolve_cert_reqs(None), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_NONE), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_REQUIRED), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('REQUIRED'), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('CERT_REQUIRED'), ssl.CERT_REQUIRED)
def test_resolve_ssl_version(self):
self.assertEqual(resolve_ssl_version(ssl.PROTOCOL_TLSv1), ssl.PROTOCOL_TLSv1)
self.assertEqual(resolve_ssl_version("PROTOCOL_TLSv1"), ssl.PROTOCOL_TLSv1)
self.assertEqual(resolve_ssl_version("TLSv1"), ssl.PROTOCOL_TLSv1)
self.assertEqual(resolve_ssl_version(ssl.PROTOCOL_SSLv23), ssl.PROTOCOL_SSLv23)
def test_is_fp_closed_object_supports_closed(self):
class ClosedFile(object):
@property
def closed(self):
return True
self.assertTrue(is_fp_closed(ClosedFile()))
def test_is_fp_closed_object_has_none_fp(self):
class NoneFpFile(object):
@property
def fp(self):
return None
self.assertTrue(is_fp_closed(NoneFpFile()))
def test_is_fp_closed_object_has_fp(self):
class FpFile(object):
@property
def fp(self):
return True
self.assertTrue(not is_fp_closed(FpFile()))
def test_is_fp_closed_object_has_neither_fp_nor_closed(self):
class NotReallyAFile(object):
pass
self.assertRaises(ValueError, is_fp_closed, NotReallyAFile())
def test_ssl_wrap_socket_loads_the_cert_chain(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, sock=socket,
certfile='/path/to/certfile')
mock_context.load_cert_chain.assert_called_once_with(
'/path/to/certfile', None)
@patch('urllib3.util.ssl_.create_urllib3_context')
def test_ssl_wrap_socket_creates_new_context(self,
create_urllib3_context):
socket = object()
ssl_wrap_socket(sock=socket, cert_reqs='CERT_REQUIRED')
create_urllib3_context.assert_called_once_with(
None, 'CERT_REQUIRED', ciphers=None
)
def test_ssl_wrap_socket_loads_verify_locations(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_certs='/path/to/pem',
sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
'/path/to/pem', None)
def test_ssl_wrap_socket_loads_certificate_directories(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_cert_dir='/path/to/pems',
sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
None, '/path/to/pems')
def test_ssl_wrap_socket_with_no_sni(self):
socket = object()
mock_context = Mock()
# Ugly preservation of original value
HAS_SNI = ssl_.HAS_SNI
ssl_.HAS_SNI = False
ssl_wrap_socket(ssl_context=mock_context, sock=socket)
mock_context.wrap_socket.assert_called_once_with(socket)
ssl_.HAS_SNI = HAS_SNI
def test_ssl_wrap_socket_with_no_sni_warns(self):
socket = object()
mock_context = Mock()
# Ugly preservation of original value
HAS_SNI = ssl_.HAS_SNI
ssl_.HAS_SNI = False
with patch('warnings.warn') as warn:
ssl_wrap_socket(ssl_context=mock_context, sock=socket)
mock_context.wrap_socket.assert_called_once_with(socket)
ssl_.HAS_SNI = HAS_SNI
self.assertTrue(warn.call_count >= 1)
warnings = [call[0][1] for call in warn.call_args_list]
self.assertTrue(SNIMissingWarning in warnings)
def test_const_compare_digest_fallback(self):
target = hashlib.sha256(b'abcdef').digest()
self.assertTrue(_const_compare_digest_backport(target, target))
prefix = target[:-1]
self.assertFalse(_const_compare_digest_backport(target, prefix))
suffix = target + b'0'
self.assertFalse(_const_compare_digest_backport(target, suffix))
incorrect = hashlib.sha256(b'xyz').digest()
self.assertFalse(_const_compare_digest_backport(target, incorrect))
def test_has_ipv6_disabled_on_compile(self):
with patch('socket.has_ipv6', False):
self.assertFalse(_has_ipv6('::1'))
def test_has_ipv6_enabled_but_fails(self):
with patch('socket.has_ipv6', True):
with patch('socket.socket') as mock:
instance = mock.return_value
instance.bind = Mock(side_effect=Exception('No IPv6 here!'))
self.assertFalse(_has_ipv6('::1'))
def test_has_ipv6_enabled_and_working(self):
with patch('socket.has_ipv6', True):
with patch('socket.socket') as mock:
instance = mock.return_value
instance.bind.return_value = True
self.assertTrue(_has_ipv6('::1'))
def test_ip_family_ipv6_enabled(self):
with patch('urllib3.util.connection.HAS_IPV6', True):
self.assertEqual(allowed_gai_family(), socket.AF_UNSPEC)
def test_ip_family_ipv6_disabled(self):
with patch('urllib3.util.connection.HAS_IPV6', False):
self.assertEqual(allowed_gai_family(), socket.AF_INET)
def test_parse_retry_after(self):
invalid = [
"-1",
"+1",
"1.0",
six.u("\xb2"), # \xb2 = ^2
]
retry = Retry()
for value in invalid:
self.assertRaises(InvalidHeader, retry.parse_retry_after, value)
self.assertEqual(retry.parse_retry_after("0"), 0)
self.assertEqual(retry.parse_retry_after("1000"), 1000)
self.assertEqual(retry.parse_retry_after("\t42 "), 42)
|
|
# -*- coding: utf-8 -*-
"""
Survey module with XForm Survey objects and utility functions.
"""
import codecs
import os
import re
import tempfile
import xml.etree.ElementTree as ETree
from collections import defaultdict
from datetime import datetime
from functools import lru_cache
from pyxform import constants
from pyxform.errors import PyXFormError, ValidationError
from pyxform.external_instance import ExternalInstance
from pyxform.instance import SurveyInstance
from pyxform.instance_info import InstanceInfo
from pyxform.question import Question
from pyxform.section import Section
from pyxform.survey_element import SurveyElement
from pyxform.utils import (
BRACKETED_TAG_REGEX,
LAST_SAVED_INSTANCE_NAME,
LAST_SAVED_REGEX,
NSMAP,
PatchedText,
get_languages_with_bad_tags,
has_dynamic_label,
node,
)
from pyxform.validators import enketo_validate, odk_validate
RE_PULLDATA = re.compile(r"(pulldata\s*\(\s*)(.*?),")
def register_nsmap():
"""Function to register NSMAP namespaces with ETree"""
for prefix, uri in NSMAP.items():
prefix_no_xmlns = prefix.replace("xmlns", "").replace(":", "")
ETree.register_namespace(prefix_no_xmlns, uri)
register_nsmap()
@lru_cache(maxsize=None)
def is_parent_a_repeat(survey, xpath):
"""
Returns the XPATH of the first repeat of the given xpath in the survey,
otherwise False will be returned.
"""
parent_xpath = "/".join(xpath.split("/")[:-1])
if not parent_xpath:
return False
if survey.any_repeat(parent_xpath):
return parent_xpath
return is_parent_a_repeat(survey, parent_xpath)
@lru_cache(maxsize=None)
def share_same_repeat_parent(survey, xpath, context_xpath, reference_parent=False):
"""
Returns a tuple of the number of steps from the context xpath to the shared
repeat parent and the xpath to the target xpath from the shared repeat
parent.
For example,
xpath = /data/repeat_a/group_a/name
context_xpath = /data/repeat_a/group_b/age
returns (2, '/group_a/name')'
"""
def _get_steps_and_target_xpath(context_parent, xpath_parent, include_parent=False):
parts = []
steps = 1
if not include_parent:
remainder_xpath = xpath[len(xpath_parent) :]
context_parts = context_xpath[len(xpath_parent) + 1 :].split("/")
xpath_parts = xpath[len(xpath_parent) + 1 :].split("/")
else:
split_idx = len(xpath_parent.split("/"))
context_parts = context_xpath.split("/")[split_idx - 1 :]
xpath_parts = xpath.split("/")[split_idx - 1 :]
remainder_xpath = "/".join(xpath_parts)
for index, item in enumerate(context_parts[:-1]):
try:
if xpath[len(context_parent) + 1 :].split("/")[index] != item:
steps = len(context_parts[index:])
parts = xpath_parts[index:]
break
else:
parts = remainder_xpath.split("/")[index + 2 :]
except IndexError:
steps = len(context_parts[index - 1 :])
parts = xpath_parts[index - 1 :]
break
return (steps, "/" + "/".join(parts) if parts else remainder_xpath)
context_parent = is_parent_a_repeat(survey, context_xpath)
xpath_parent = is_parent_a_repeat(survey, xpath)
if context_parent and xpath_parent and xpath_parent in context_parent:
if (not context_parent == xpath_parent and reference_parent) or bool(
is_parent_a_repeat(survey, context_parent)
):
context_shared_ancestor = is_parent_a_repeat(survey, context_parent)
if context_shared_ancestor == xpath_parent:
# Check if context_parent is a child repeat of the xpath_parent
# If the context_parent is a child of the xpath_parent reference the entire
# xpath_parent in the generated nodeset
context_parent = context_shared_ancestor
elif context_parent == xpath_parent and context_shared_ancestor:
# If the context_parent is a child of another
# repeat and is equal to the xpath_parent
# we avoid refrencing the context_parent and instead reference the shared
# ancestor
reference_parent = False
return _get_steps_and_target_xpath(context_parent, xpath_parent, reference_parent)
elif context_parent and xpath_parent:
# Check if context_parent and xpath_parent share a common
# repeat ancestor
context_shared_ancestor = is_parent_a_repeat(survey, context_parent)
xpath_shared_ancestor = is_parent_a_repeat(survey, xpath_parent)
if (
xpath_shared_ancestor
and context_shared_ancestor
and xpath_shared_ancestor == context_shared_ancestor
):
return _get_steps_and_target_xpath(
context_shared_ancestor, xpath_shared_ancestor
)
return (None, None)
class Survey(Section):
"""
Survey class - represents the full XForm XML.
"""
FIELDS = Section.FIELDS.copy()
FIELDS.update(
{
"_xpath": dict,
"_created": datetime.now, # This can't be dumped to json
"setvalues_by_triggering_ref": dict,
"title": str,
"id_string": str,
"sms_keyword": str,
"sms_separator": str,
"sms_allow_media": bool,
"sms_date_format": str,
"sms_datetime_format": str,
"sms_response": str,
constants.COMPACT_PREFIX: str,
constants.COMPACT_DELIMITER: str,
"file_name": str,
"default_language": str,
"_translations": dict,
"submission_url": str,
"auto_send": str,
"auto_delete": str,
"public_key": str,
"instance_xmlns": str,
"version": str,
"choices": dict,
"style": str,
"attribute": dict,
"namespaces": str,
}
) # yapf: disable
def validate(self):
if self.id_string in [None, "None"]:
raise PyXFormError("Survey cannot have an empty id_string")
super(Survey, self).validate()
self._validate_uniqueness_of_section_names()
def _validate_uniqueness_of_section_names(self):
root_node_name = self.name
section_names = []
for element in self.iter_descendants():
if isinstance(element, Section):
if element.name in section_names:
if element.name == root_node_name:
# The root node name is rarely explictly set; explain
# the problem in a more helpful way (#510)
raise PyXFormError(
'The name "%s" is the same as the form name. '
"Use a different section name "
'(or change the form name in the "name" column of '
"the settings sheet)." % element.name
)
raise PyXFormError(
"There are two sections with the name %s." % element.name
)
section_names.append(element.name)
def get_nsmap(self):
"""Add additional namespaces"""
namespaces = getattr(self, constants.NAMESPACES, None)
if namespaces and isinstance(namespaces, str):
nslist = [
ns.split("=")
for ns in namespaces.split()
if len(ns.split("=")) == 2 and ns.split("=")[0] != ""
]
xmlns = "xmlns:"
nsmap = NSMAP.copy()
nsmap.update(
dict(
[
(xmlns + k, v.replace('"', "").replace("'", ""))
for k, v in nslist
if xmlns + k not in nsmap
]
)
)
return nsmap
return NSMAP
def xml(self):
"""
calls necessary preparation methods, then returns the xml.
"""
self.validate()
self._setup_xpath_dictionary()
for triggering_reference in self.setvalues_by_triggering_ref.keys():
if not (re.match(BRACKETED_TAG_REGEX, triggering_reference)):
raise PyXFormError(
"Only references to other fields are allowed in the 'trigger' column."
)
# try to resolve reference and fail if can't
self.insert_xpaths(triggering_reference, self)
body_kwargs = {}
if hasattr(self, constants.STYLE) and getattr(self, constants.STYLE):
body_kwargs["class"] = getattr(self, constants.STYLE)
nsmap = self.get_nsmap()
return node(
"h:html",
node("h:head", node("h:title", self.title), self.xml_model()),
node("h:body", *self.xml_control(), **body_kwargs),
**nsmap
)
def get_setvalues_for_question_name(self, question_name):
return self.setvalues_by_triggering_ref.get("${%s}" % question_name)
@staticmethod
def _generate_static_instances(list_name, choice_list):
"""
Generates <instance> elements for static data
(e.g. choices for select type questions)
Note that per commit message 0578242 and in xls2json.py R539, an
instance is only output for select items defined in the choices sheet
when the item has a choice_filter, and it is that way for backwards
compatibility.
"""
instance_element_list = []
multi_language = isinstance(choice_list[0].get("label"), dict)
has_media = bool(choice_list[0].get("media"))
for idx, choice in enumerate(choice_list):
choice_element_list = []
# Add a unique id to the choice element in case there is itext
# it references
if (
multi_language
or has_media
or has_dynamic_label(choice_list, multi_language)
):
itext_id = "-".join([list_name, str(idx)])
choice_element_list.append(node("itextId", itext_id))
for name, value in sorted(choice.items()):
if isinstance(value, str) and name != "label":
choice_element_list.append(node(name, str(value)))
if (
not multi_language
and not has_media
and not has_dynamic_label(choice_list, multi_language)
and isinstance(value, str)
and name == "label"
):
choice_element_list.append(node(name, str(value)))
instance_element_list.append(node("item", *choice_element_list))
return InstanceInfo(
type="choice",
context="survey",
name=list_name,
src=None,
instance=node("instance", node("root", *instance_element_list), id=list_name),
)
@staticmethod
def _generate_external_instances(element):
if isinstance(element, ExternalInstance):
name = element["name"]
extension = element["type"].split("-")[0]
prefix = "file-csv" if extension == "csv" else "file"
src = "jr://{}/{}.{}".format(prefix, name, extension)
return InstanceInfo(
type="external",
context="[type: {t}, name: {n}]".format(
t=element["parent"]["type"], n=element["parent"]["name"]
),
name=name,
src=src,
instance=node("instance", id=name, src=src),
)
return None
@staticmethod
def _validate_external_instances(instances):
"""
Must have unique names.
- Duplications could come from across groups; this checks the form.
- Errors are pooled together into a (hopefully) helpful message.
"""
seen = {}
for i in instances:
element = i.name
if seen.get(element) is None:
seen[element] = [i]
else:
seen[element].append(i)
errors = []
for element, copies in seen.items():
if len(copies) > 1:
contexts = ", ".join(x.context for x in copies)
errors.append(
"Instance names must be unique within a form. "
"The name '{i}' was found {c} time(s), "
"under these contexts: {contexts}".format(
i=element, c=len(copies), contexts=contexts
)
)
if errors:
raise ValidationError("\n".join(errors))
@staticmethod
def _generate_pulldata_instances(element):
def get_pulldata_functions(element):
"""
Returns a list of different pulldata(... function strings if
pulldata function is defined at least once for any of:
calculate, constraint, readonly, required, relevant
:param: element (pyxform.survey.Survey):
"""
functions_present = []
for formula_name in constants.EXTERNAL_INSTANCES:
if "pulldata(" in str(element["bind"].get(formula_name)):
functions_present.append(element["bind"][formula_name])
if "pulldata(" in str(element["choice_filter"]):
functions_present.append(element["choice_filter"])
if "pulldata(" in str(element["default"]):
functions_present.append(element["default"])
return functions_present
def get_instance_info(element, file_id):
uri = "jr://file-csv/{}.csv".format(file_id)
return InstanceInfo(
type=u"pulldata",
context="[type: {t}, name: {n}]".format(
t=element[u"parent"][u"type"], n=element[u"parent"][u"name"]
),
name=file_id,
src=uri,
instance=node("instance", id=file_id, src=uri),
)
pulldata_usages = get_pulldata_functions(element)
if len(pulldata_usages) > 0:
pulldata_instances = []
for usage in pulldata_usages:
for call_match in re.finditer(RE_PULLDATA, usage):
groups = call_match.groups()
if len(groups) == 2:
first_argument = ( # first argument to pulldata()
groups[1].replace("'", "").replace('"', "").strip()
)
pulldata_instances.append(
get_instance_info(element, first_argument)
)
return pulldata_instances
return None
@staticmethod
def _generate_from_file_instances(element):
itemset = element.get("itemset")
if itemset and (itemset.endswith(".csv") or itemset.endswith(".xml")):
file_id, ext = os.path.splitext(itemset)
uri = "jr://%s/%s" % (
"file" if ext == ".xml" else "file-%s" % ext[1:],
itemset,
)
return InstanceInfo(
type="file",
context="[type: {t}, name: {n}]".format(
t=element["parent"]["type"], n=element["parent"]["name"]
),
name=file_id,
src=uri,
instance=node("instance", id=file_id, src=uri),
)
return None
# True if a last-saved instance should be generated, false otherwise
@staticmethod
def _generate_last_saved_instance(element):
for expression_type in constants.EXTERNAL_INSTANCES:
last_saved_expression = re.search(
LAST_SAVED_REGEX, str(element["bind"].get(expression_type))
)
if last_saved_expression:
return True
return re.search(LAST_SAVED_REGEX, str(element["choice_filter"])) or re.search(
LAST_SAVED_REGEX, str(element["default"])
)
@staticmethod
def _get_last_saved_instance():
name = "__last-saved" # double underscore used to minimize risk of name conflicts
uri = "jr://instance/last-saved"
return InstanceInfo(
type="instance",
context=None,
name=name,
src=uri,
instance=node("instance", id=name, src=uri),
)
def _generate_instances(self):
"""
Get instances from all the different ways that they may be generated.
An opportunity to validate instances before output to the XML model.
Instance names used for the id attribute are generated as follows:
- xml-external: item name value (for type==xml-external)
- pulldata: first arg to calculation->pulldata()
- select from file: file name arg to type->itemset
- choices: list_name (for type==select_*)
- last-saved: static name of jr://instance/last-saved
Validation and business rules for output of instances:
- xml-external item name must be unique across the XForm and the form
is considered invalid if there is a duplicate name. This differs from
other item types which allow duplicates if not in the same group.
- for all instance sources, if the same instance name is encountered,
the following rules are used to allow re-using instances but prevent
overwriting conflicting instances:
- same id, same src URI: skip adding the second (duplicate) instance
- same id, different src URI: raise an error
- otherwise: output the instance
There are two other things currently supported by pyxform that involve
external files and are not explicitly handled here, but may be relevant
to future efforts to harmonise / simplify external data workflows:
- `search` appearance/function: works a lot like pulldata but the csv
isn't made explicit in the form.
- `select_one_external`: implicitly relies on a `itemsets.csv` file and
uses XPath-like expressions for querying.
"""
instances = []
generate_last_saved = False
for i in self.iter_descendants():
i_ext = self._generate_external_instances(element=i)
i_pull = self._generate_pulldata_instances(element=i)
i_file = self._generate_from_file_instances(element=i)
if not generate_last_saved:
generate_last_saved = self._generate_last_saved_instance(element=i)
for x in [i_ext, i_pull, i_file]:
if x is not None:
instances += x if isinstance(x, list) else [x]
if generate_last_saved:
instances += [self._get_last_saved_instance()]
# Append last so the choice instance is excluded on a name clash.
for name, value in self.choices.items():
instances += [
self._generate_static_instances(list_name=name, choice_list=value)
]
# Check that external instances have unique names.
if instances:
ext_only = [x for x in instances if x.type == "external"]
self._validate_external_instances(instances=ext_only)
seen = {}
for i in instances:
if i.name in seen.keys() and seen[i.name].src != i.src:
# Instance id exists with different src URI -> error.
msg = (
"The same instance id will be generated for different "
"external instance source URIs. Please check the form."
" Instance name: '{i}', Existing type: '{e}', "
"Existing URI: '{iu}', Duplicate type: '{d}', "
"Duplicate URI: '{du}', Duplicate context: '{c}'.".format(
i=i.name,
iu=seen[i.name].src,
e=seen[i.name].type,
d=i.type,
du=i.src,
c=i.context,
)
)
raise PyXFormError(msg)
elif i.name in seen.keys() and seen[i.name].src == i.src:
# Instance id exists with same src URI -> ok, don't duplicate.
continue
else:
# Instance doesn't exist yet -> add it.
yield i.instance
seen[i.name] = i
def xml_model(self):
"""
Generate the xform <model> element
"""
self._setup_translations()
self._setup_media()
self._add_empty_translations()
model_kwargs = {"odk:xforms-version": constants.CURRENT_XFORMS_VERSION}
model_children = []
if self._translations:
model_children.append(self.itext())
model_children += [node("instance", self.xml_instance())]
model_children += list(self._generate_instances())
model_children += self.xml_bindings()
model_children += self.xml_actions()
if self.submission_url or self.public_key or self.auto_send or self.auto_delete:
submission_attrs = dict()
if self.submission_url:
submission_attrs["action"] = self.submission_url
submission_attrs["method"] = "post"
if self.public_key:
submission_attrs["base64RsaPublicKey"] = self.public_key
if self.auto_send:
submission_attrs["orx:auto-send"] = self.auto_send
if self.auto_delete:
submission_attrs["orx:auto-delete"] = self.auto_delete
submission_node = node("submission", **submission_attrs)
model_children.insert(0, submission_node)
return node("model", *model_children, **model_kwargs)
def xml_instance(self, **kwargs):
result = Section.xml_instance(self, **kwargs)
# set these first to prevent overwriting id and version
for key, value in self.attribute.items():
result.setAttribute(str(key), value)
result.setAttribute("id", self.id_string)
# add instance xmlns attribute to the instance node
if self.instance_xmlns:
result.setAttribute("xmlns", self.instance_xmlns)
if self.version:
result.setAttribute("version", self.version)
if self.prefix:
result.setAttribute("odk:prefix", self.prefix)
if self.delimiter:
result.setAttribute("odk:delimiter", self.delimiter)
return result
def _add_to_nested_dict(self, dicty, path, value):
if len(path) == 1:
dicty[path[0]] = value
return
if path[0] not in dicty:
dicty[path[0]] = {}
self._add_to_nested_dict(dicty[path[0]], path[1:], value)
def _setup_translations(self):
"""
set up the self._translations dict which will be referenced in the
setup media and itext functions
"""
def _setup_choice_translations(name, choice_value, itext_id):
for media_type_or_language, value in choice_value.items(): # noqa
if isinstance(value, dict):
for language, val in value.items():
self._add_to_nested_dict(
self._translations,
[language, itext_id, media_type_or_language],
val,
)
else:
if name == "media":
self._add_to_nested_dict(
self._translations,
[self.default_language, itext_id, media_type_or_language],
value,
)
else:
self._add_to_nested_dict(
self._translations,
[media_type_or_language, itext_id, "long"],
value,
)
self._translations = defaultdict(dict) # pylint: disable=W0201
for element in self.iter_descendants():
# Skip creation of translations for choices in filtered selects
# The creation of these translations is done futher below in this
# function
parent = element.get("parent")
if parent and not parent.get("choice_filter"):
for d in element.get_translations(self.default_language):
translation_path = d["path"]
form = "long"
if "guidance_hint" in d["path"]:
translation_path = d["path"].replace("guidance_hint", "hint")
form = "guidance"
self._translations[d["lang"]][translation_path] = self._translations[
d["lang"]
].get(translation_path, {})
self._translations[d["lang"]][translation_path].update(
{
form: {
"text": d["text"],
"output_context": d["output_context"],
}
}
)
# This code sets up translations for choices in filtered selects.
for list_name, choice_list in self.choices.items():
multi_language = isinstance(choice_list[0].get("label"), dict)
has_media = bool(choice_list[0].get("media"))
if (
not multi_language
and not has_media
and not has_dynamic_label(choice_list, multi_language)
):
continue
for idx, choice in zip(range(len(choice_list)), choice_list):
for name, choice_value in choice.items():
itext_id = "-".join([list_name, str(idx)])
if isinstance(choice_value, dict):
_setup_choice_translations(name, choice_value, itext_id)
elif name == "label":
self._add_to_nested_dict(
self._translations,
[self.default_language, itext_id, "long"],
choice_value,
)
def _add_empty_translations(self):
"""
Adds translations so that every itext element has the same elements \
accross every language.
When translations are not provided "-" will be used.
This disables any of the default_language fallback functionality.
"""
paths = {}
for lang, translation in self._translations.items():
for path, content in translation.items():
paths[path] = paths.get(path, set()).union(content.keys())
for lang, translation in self._translations.items():
for path, content_types in paths.items():
if path not in self._translations[lang]:
self._translations[lang][path] = {}
for content_type in content_types:
if content_type not in self._translations[lang][path]:
self._translations[lang][path][content_type] = "-"
def _setup_media(self):
"""
Traverse the survey, find all the media, and put in into the \
_translations data structure which looks like this:
{language : {element_xpath : {media_type : media}}}
It matches the xform nesting order.
"""
def _set_up_media_translations(media_dict, translation_key):
# This is probably papering over a real problem, but anyway,
# in py3, sometimes if an item is on an xform with multiple
# languages and the item only has media defined in # "default"
# (e.g. no "image" vs. "image::lang"), the media dict will be
# nested inside of a dict with key "default", e.g.
# {"default": {"image": "my_image.jpg"}}
media_dict_default = media_dict.get("default", None)
if isinstance(media_dict_default, dict):
media_dict = media_dict_default
for media_type, possibly_localized_media in media_dict.items():
if media_type not in SurveyElement.SUPPORTED_MEDIA:
raise PyXFormError("Media type: " + media_type + " not supported")
if isinstance(possibly_localized_media, dict):
# media is localized
localized_media = possibly_localized_media
else:
# media is not localized so create a localized version
# using the default language
localized_media = {self.default_language: possibly_localized_media}
for language, media in localized_media.items():
# Create the required dictionaries in _translations,
# then add media as a leaf value:
if language not in self._translations:
self._translations[language] = {}
translations_language = self._translations[language]
if translation_key not in translations_language:
translations_language[translation_key] = {}
translations_trans_key = translations_language[translation_key]
if media_type not in translations_trans_key:
translations_trans_key[media_type] = {}
translations_trans_key[media_type] = media
if not self._translations:
self._translations = defaultdict(dict) # pylint: disable=W0201
for survey_element in self.iter_descendants():
# Skip set up of media for choices in filtered selects.
# Translations for the media content should have been set up
# in _setup_translations
parent = survey_element.get("parent")
if parent and not parent.get("choice_filter"):
translation_key = survey_element.get_xpath() + ":label"
media_dict = survey_element.get("media")
_set_up_media_translations(media_dict, translation_key)
def itext(self):
"""
This function creates the survey's itext nodes from _translations
@see _setup_media _setup_translations
itext nodes are localized images/audio/video/text
@see http://code.google.com/p/opendatakit/wiki/XFormDesignGuidelines
"""
result = []
for lang, translation in self._translations.items():
if lang == self.default_language:
result.append(node("translation", lang=lang, default="true()"))
else:
result.append(node("translation", lang=lang))
for label_name, content in translation.items():
itext_nodes = []
label_type = label_name.partition(":")[-1]
if not isinstance(content, dict):
raise Exception()
for media_type, media_value in content.items():
if isinstance(media_value, dict):
value, output_inserted = self.insert_output_values(
media_value["text"], context=media_value["output_context"]
)
else:
value, output_inserted = self.insert_output_values(media_value)
if label_type == "hint":
if media_type == "guidance":
itext_nodes.append(
node(
"value",
value,
form="guidance",
toParseString=output_inserted,
)
)
else:
itext_nodes.append(
node("value", value, toParseString=output_inserted)
)
continue
if media_type == "long":
# I'm ignoring long types for now because I don't know
# how they are supposed to work.
itext_nodes.append(
node("value", value, toParseString=output_inserted)
)
elif media_type == "image":
if value != "-":
itext_nodes.append(
node(
"value",
"jr://images/" + value,
form=media_type,
toParseString=output_inserted,
)
)
else:
if value != "-":
itext_nodes.append(
node(
"value",
"jr://" + media_type + "/" + value,
form=media_type,
toParseString=output_inserted,
)
)
result[-1].appendChild(node("text", *itext_nodes, id=label_name))
return node("itext", *result)
def date_stamp(self):
"""Returns a date string with the format of %Y_%m_%d."""
return self._created.strftime("%Y_%m_%d")
def _to_ugly_xml(self):
return '<?xml version="1.0"?>' + self.xml().toxml()
def _to_pretty_xml(self):
"""
I want the to_xml method to by default validate the xml we are
producing.
"""
# Hacky way of pretty printing xml without adding extra white
# space to text
# TODO: check out pyxml
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
xml_with_linebreaks = self.xml().toprettyxml(indent=" ")
text_re = re.compile(r"(>)\n\s*(\s[^<>\s].*?)\n\s*(\s</)", re.DOTALL)
output_re = re.compile(r"\n.*(<output.*>)\n(\s\s)*")
pretty_xml = text_re.sub(lambda m: "".join(m.group(1, 2, 3)), xml_with_linebreaks)
inline_output = output_re.sub(r"\g<1>", pretty_xml)
return '<?xml version="1.0"?>\n' + inline_output
def __repr__(self):
return self.__unicode__()
def __unicode__(self):
return "<pyxform.survey.Survey instance at %s>" % hex(id(self))
def _setup_xpath_dictionary(self):
self._xpath = {} # pylint: disable=attribute-defined-outside-init
for element in self.iter_descendants():
if isinstance(element, (Question, Section)):
if element.name in self._xpath:
self._xpath[element.name] = None
else:
self._xpath[element.name] = element.get_xpath()
def _var_repl_function(
self, matchobj, context, use_current=False, reference_parent=False
):
"""
Given a dictionary of xpaths, return a function we can use to
replace ${varname} with the xpath to varname.
"""
name = matchobj.group(2)
last_saved = matchobj.group(1) is not None
is_indexed_repeat = matchobj.string.find("indexed-repeat(") > -1
indexed_repeat_regex = re.compile(r"indexed-repeat\([^)]+\)")
function_args_regex = re.compile(r"\b[^()]+\((.*)\)$")
instance_regex = re.compile(r"instance\([^)]+.+")
bracket_regex = re.compile(r"\[([^]]+)\]")
def _in_secondary_instance_predicate():
"""
check if ${} expression represented by matchobj
is in a predicate for a path expression for a secondary instance
"""
if instance_regex.search(matchobj.string) is not None:
bracket_regex_match_iter = bracket_regex.finditer(matchobj.string)
# Check whether current ${varname} is in the correct bracket_regex_match
for bracket_regex_match in bracket_regex_match_iter:
if (
matchobj.start() >= bracket_regex_match.start()
and matchobj.end() <= bracket_regex_match.end()
):
return True
return False
return False
def _relative_path(name):
"""Given name in ${name}, return relative xpath to ${name}."""
return_path = None
xpath, context_xpath = self._xpath[name], context.get_xpath()
# share same root i.e repeat_a from /data/repeat_a/...
if (
len(context_xpath.split("/")) > 2
and xpath.split("/")[2] == context_xpath.split("/")[2]
):
# if context xpath and target xpath fall under the same
# repeat use relative xpath referencing.
steps, ref_path = share_same_repeat_parent(
self, xpath, context_xpath, reference_parent
)
if steps:
ref_path = ref_path if ref_path.endswith(name) else "/%s" % name
prefix = " current()/" if use_current else " "
return_path = prefix + "/".join([".."] * steps) + ref_path + " "
return return_path
def _is_return_relative_path():
"""Determine condition to return relative xpath of current ${name}."""
indexed_repeat_relative_path_args_index = [0, 1, 3, 5]
current_matchobj = matchobj
if not last_saved and context:
if not is_indexed_repeat:
return True
# It is possible to have multiple indexed-repeat in an expression
indexed_repeats_iter = indexed_repeat_regex.finditer(matchobj.string)
for indexed_repeat in indexed_repeats_iter:
# Make sure current ${name} is in the correct indexed-repeat
if current_matchobj.end() > indexed_repeat.end():
try:
next(indexed_repeats_iter)
continue
except StopIteration:
return True
# ${name} outside of indexed-repeat always using relative path
if (
current_matchobj.end() < indexed_repeat.start()
or current_matchobj.start() > indexed_repeat.end()
):
return True
indexed_repeat_name_index = None
indexed_repeat_args = (
function_args_regex.match(indexed_repeat.group())
.group(1)
.split(",")
)
name_arg = "${{{0}}}".format(name)
for idx, arg in enumerate(indexed_repeat_args):
if name_arg in arg.strip():
indexed_repeat_name_index = idx
return (
indexed_repeat_name_index is not None
and indexed_repeat_name_index
not in indexed_repeat_relative_path_args_index
)
return False
intro = (
"There has been a problem trying to replace %s with the "
"XPath to the survey element named '%s'." % (matchobj.group(0), name)
)
if name not in self._xpath:
raise PyXFormError(intro + " There is no survey element with this name.")
if self._xpath[name] is None:
raise PyXFormError(
intro + " There are multiple survey elements" " with this name."
)
if _is_return_relative_path():
if not use_current:
use_current = _in_secondary_instance_predicate()
relative_path = _relative_path(name)
if relative_path:
return relative_path
last_saved_prefix = (
"instance('" + LAST_SAVED_INSTANCE_NAME + "')" if last_saved else ""
)
return " " + last_saved_prefix + self._xpath[name] + " "
def insert_xpaths(self, text, context, use_current=False, reference_parent=False):
"""
Replace all instances of ${var} with the xpath to var.
"""
def _var_repl_function(matchobj):
return self._var_repl_function(
matchobj, context, use_current, reference_parent
)
return re.sub(BRACKETED_TAG_REGEX, _var_repl_function, str(text))
def _var_repl_output_function(self, matchobj, context):
"""
A regex substitution function that will replace
${varname} with an output element that has the xpath to varname.
"""
return '<output value="' + self._var_repl_function(matchobj, context) + '" />'
def insert_output_values(self, text, context=None):
"""
Replace all the ${variables} in text with xpaths.
Returns that and a boolean indicating if there were any ${variables}
present.
"""
def _var_repl_output_function(matchobj):
return self._var_repl_output_function(matchobj, context)
# There was a bug where escaping is completely turned off in labels
# where variable replacement is used.
# For exampke, `${name} < 3` causes an error but `< 3` does not.
# This is my hacky fix for it, which does string escaping prior to
# variable replacement:
text_node = PatchedText()
text_node.data = text
xml_text = text_node.toxml()
# need to make sure we have reason to replace
# since at this point < is <,
# the net effect < gets translated again to &lt;
if str(xml_text).find("{") != -1:
result = re.sub(BRACKETED_TAG_REGEX, _var_repl_output_function, str(xml_text))
return result, not result == xml_text
return text, False
# pylint: disable=too-many-arguments
def print_xform_to_file(
self, path=None, validate=True, pretty_print=True, warnings=None, enketo=False
):
"""
Print the xForm to a file and optionally validate it as well by
throwing exceptions and adding warnings to the warnings array.
"""
if warnings is None:
warnings = []
if not path:
path = self._print_name + ".xml"
try:
with codecs.open(path, mode="w", encoding="utf-8") as file_obj:
if pretty_print:
file_obj.write(self._to_pretty_xml())
else:
file_obj.write(self._to_ugly_xml())
except Exception as error:
if os.path.exists(path):
os.unlink(path)
raise error
if validate:
warnings.extend(odk_validate.check_xform(path))
if enketo:
warnings.extend(enketo_validate.check_xform(path))
# Warn if one or more translation is missing a valid IANA subtag
translations = self._translations.keys()
if translations:
bad_languages = get_languages_with_bad_tags(translations)
if bad_languages:
warnings.append(
"\tThe following language declarations do not contain "
"valid machine-readable codes: "
+ ", ".join(bad_languages)
+ ". "
+ "Learn more: http://xlsform.org#multiple-language-support"
)
def to_xml(self, validate=True, pretty_print=True, warnings=None, enketo=False):
"""
Generates the XForm XML.
validate is True by default - pass the XForm XML through ODK Validator.
pretty_print is True by default - formats the XML for readability.
warnings - if a list is passed it stores all warnings generated
enketo - pass the XForm XML though Enketo Validator.
Return XForm XML string.
"""
# On Windows, NamedTemporaryFile must be opened exclusively.
# So it must be explicitly created, opened, closed, and removed.
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
try:
# this will throw an exception if the xml is not valid
self.print_xform_to_file(
path=tmp.name,
validate=validate,
pretty_print=pretty_print,
warnings=warnings,
enketo=enketo,
)
finally:
if os.path.exists(tmp.name):
os.remove(tmp.name)
if pretty_print:
return self._to_pretty_xml()
return self._to_ugly_xml()
def instantiate(self):
"""
Instantiate as in return a instance of SurveyInstance for collected
data.
"""
return SurveyInstance(self)
|
|
# -*- coding: utf-8 -*-
"""
This module offers timezone implementations subclassing the abstract
:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
environment string (in all known formats), given ranges (with help from
relative deltas), local machine timezone, fixed offset timezone, and UTC
timezone.
"""
import datetime
import struct
import time
import sys
import os
import bisect
import copy
from contextlib import contextmanager
from six import string_types, PY3
from ._common import tzname_in_python2, _tzinfo
try:
from .win import tzwin, tzwinlocal
except ImportError:
tzwin = tzwinlocal = None
ZERO = datetime.timedelta(0)
EPOCH = datetime.datetime.utcfromtimestamp(0)
EPOCHORDINAL = EPOCH.toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
if not isinstance(other, (tzutc, tzoffset)):
return NotImplemented
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(_tzinfo):
def __init__(self):
super(tzlocal, self).__init__()
self._std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
self._dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
self._dst_offset = self._std_offset
self._dst_saved = self._dst_offset - self._std_offset
self._hasdst = bool(self._dst_saved)
def utcoffset(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset - self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
# >>> import tz, datetime
# >>> t = tz.tzlocal()
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
#
# Here is a more stable implementation:
#
if not self._hasdst:
return False
dstval = self._naive_is_dst(dt)
# Check for ambiguous times:
if not dstval and self._fold is not None:
dst_fold_offset = self._naive_is_dst(dt - self._dst_saved)
if dst_fold_offset:
return self._fold
return dstval
def _naive_is_dst(self, dt):
timestamp = _datetime_to_timestamp(dt)
return time.localtime(timestamp + time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr",
"isstd", "isgmt", "dstoffset"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return NotImplemented
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt and
self.dstoffset == other.dstoffset)
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class _tzfile(object):
"""
Lightweight class for holding the relevant transition and time zone
information read from binary tzfiles.
"""
attrs = ['trans_list', 'trans_idx', 'ttinfo_list',
'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
def __init__(self, **kwargs):
for attr in self.attrs:
setattr(self, attr, kwargs.get(attr, None))
class tzfile(_tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj, filename=None):
super(tzfile, self).__init__()
file_opened_here = False
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
file_opened_here = True
elif filename is not None:
self._filename = filename
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
if fileobj is not None:
if not file_opened_here:
fileobj = _ContextWrapper(fileobj)
with fileobj as file_stream:
tzobj = self._read_tzfile(file_stream)
self._set_tzdata(tzobj)
def _set_tzdata(self, tzobj):
""" Set the time zone data of this object from a _tzfile object """
# Copy the relevant attributes over as private attributes
for attr in _tzfile.attrs:
setattr(self, '_' + attr, getattr(tzobj, attr))
def _read_tzfile(self, fileobj):
out = _tzfile()
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
out.trans_list = list(struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4)))
else:
out.trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
out.trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
out.trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now (but read anyway for correct file position)
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# Build ttinfo list
out.ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = 60 * ((gmtoff + 30) // 60)
tti = _ttinfo()
tti.offset = gmtoff
tti.dstoffset = datetime.timedelta(0)
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
out.ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
out.ttinfo_std = None
out.ttinfo_dst = None
out.ttinfo_before = None
if out.ttinfo_list:
if not out.trans_list:
out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = out.trans_idx[i]
if not out.ttinfo_std and not tti.isdst:
out.ttinfo_std = tti
elif not out.ttinfo_dst and tti.isdst:
out.ttinfo_dst = tti
if out.ttinfo_std and out.ttinfo_dst:
break
else:
if out.ttinfo_dst and not out.ttinfo_std:
out.ttinfo_std = out.ttinfo_dst
for tti in out.ttinfo_list:
if not tti.isdst:
out.ttinfo_before = tti
break
else:
out.ttinfo_before = out.ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = None
for i, tti in enumerate(out.trans_idx):
if not tti.isdst:
offset = tti.offset
laststdoffset = offset
else:
if laststdoffset is not None:
# Store the DST offset as well and update it in the list
tti.dstoffset = tti.offset - laststdoffset
out.trans_idx[i] = tti
offset = laststdoffset or 0
out.trans_list[i] += offset
# In case we missed any DST offsets on the way in for some reason, make
# a second pass over the list, looking for the /next/ DST offset.
laststdoffset = None
for i in reversed(range(len(out.trans_idx))):
tti = out.trans_idx[i]
if tti.isdst:
if not (tti.dstoffset or laststdoffset is None):
tti.dstoffset = tti.offset - laststdoffset
else:
laststdoffset = tti.offset
if not isinstance(tti.dstoffset, datetime.timedelta):
tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
out.trans_idx[i] = tti
out.trans_idx = tuple(out.trans_idx)
out.trans_list = tuple(out.trans_list)
return out
def _find_last_transition(self, dt):
# If there's no list, there are no transitions to find
if not self._trans_list:
return None
timestamp = _datetime_to_timestamp(dt)
# Find where the timestamp fits in the transition list - if the
# timestamp is a transition time, it's part of the "after" period.
idx = bisect.bisect_right(self._trans_list, timestamp)
# We want to know when the previous transition was, so subtract off 1
return idx - 1
def _get_ttinfo(self, idx):
# For no list or after the last transition, default to _ttinfo_std
if idx is None or (idx + 1) == len(self._trans_list):
return self._ttinfo_std
# If there is a list and the time is before it, return _ttinfo_before
if idx < 0:
return self._ttinfo_before
return self._trans_idx[idx]
def _find_ttinfo(self, dt):
idx = self._resolve_ambiguous_time(dt)
return self._get_ttinfo(idx)
def _resolve_ambiguous_time(self, dt, idx=None):
if idx is None:
idx = self._find_last_transition(dt)
# If we're fold-naive or we have no transitions, return the index.
if self._fold is None or idx is None:
return idx
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx > 0:
# Calculate the difference in offsets from the current to previous
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx] # Transition time
if timestamp < tt + od:
if self._fold:
return idx - 1
else:
return idx
if idx < len(self._trans_list):
# Calculate the difference in offsets from the previous to current
od = self._get_ttinfo(idx + 1).offset - tti.offset
tt = self._trans_list[idx + 1]
if timestamp > tt - od:
if self._fold:
return idx + 1
else:
return idx
return idx
def utcoffset(self, dt):
if dt is None:
return None
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.dstoffset
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return NotImplemented
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
return self.__reduce_ex__(None)
def __reduce_ex__(self, protocol):
return (self.__class__, (None, self._filename), self.__dict__)
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if dt is None:
return None
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return NotImplemented
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
if dt is None:
return None
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
def __init__(self, fileobj):
global rrule
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
# ical should be encoded in UTF-8 with CRLF
fileobj = open(fileobj, 'r')
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0] == '+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError(
"at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError(
"unsupported %s parm: %s " % (name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError(
"unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError(
"unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError(
"unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except WindowsError:
tz = None
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
def _total_seconds(td):
# Python 2.6 doesn't have a total_seconds() method on timedelta objects
return ((td.seconds + td.days * 86400) * 1000000 +
td.microseconds) // 1000000
_total_seconds = getattr(datetime.timedelta, 'total_seconds', _total_seconds)
def _datetime_to_timestamp(dt):
"""
Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds
since January 1, 1970, ignoring the time zone.
"""
return _total_seconds((dt.replace(tzinfo=None) - EPOCH))
class _ContextWrapper(object):
"""
Class for wrapping contexts so that they are passed through in a
with statement.
"""
def __init__(self, context):
self.context = context
def __enter__(self):
return self.context
def __exit__(*args, **kwargs):
pass
# vim:ts=4:sw=4:et
|
|
import time
from collections import OrderedDict, defaultdict
from functools import reduce, wraps
from inspect import signature
import matplotlib.pyplot as plt
from rfho import as_list
import tensorflow as tf
import rfho as rf
try:
from IPython.display import IFrame
import IPython
except ImportError:
print('Looks like IPython is not installed...')
IFrame, IPython = None, None
import gzip
import os
import _pickle as pickle
import numpy as np
try:
from tabulate import tabulate
except ImportError:
print('Might want to install library "tabulate" for a better dictionary printing')
tabulate = None
def join_paths(*paths):
return reduce(lambda acc, new_path: os.path.join(acc, new_path), paths)
SAVE_SETTINGS = {
'NOTEBOOK_TITLE': ''
}
_EXP_ROOT_FOLDER = os.getenv('RFHO_EXP_FOLDER')
if _EXP_ROOT_FOLDER is None:
print('Environment variable RFHO_EXP_FOLDER not found. Current directory will be used')
_EXP_ROOT_FOLDER = join_paths(os.getcwd(), 'Experiments')
print('Experiment save directory is ', _EXP_ROOT_FOLDER)
FOLDER_NAMINGS = { # TODO should go into a settings file?
'EXP_ROOT': _EXP_ROOT_FOLDER,
'OBJ_DIR': 'Obj_data',
'PLOTS_DIR': 'Plots',
'MODELS_DIR': 'Models',
'GEPHI_DIR': 'GePhi',
}
def check_or_create_dir(directory, notebook_mode=True, create=True):
if not os.path.exists(directory) and create:
os.mkdir(directory)
print('folder', directory, 'has been created')
if notebook_mode and SAVE_SETTINGS['NOTEBOOK_TITLE']:
directory = join_paths(directory, SAVE_SETTINGS['NOTEBOOK_TITLE']) # += '/' + settings['NOTEBOOK_TITLE']
if not os.path.exists(directory) and create:
os.mkdir(directory)
print('folder ', directory, 'has been created')
return directory
def save_fig(name, root_dir=None, notebook_mode=True, default_overwrite=False, extension='pdf', **savefig_kwargs):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['PLOTS_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.%s' % (name, extension)) # directory + '/%s.pdf' % name
if not default_overwrite and os.path.isfile(filename):
# if IPython is not None:
# IPython.display.display(tuple(IFrame(filename, width=800, height=600))) # FIXME not working!
overwrite = input('A file named %s already exists. Overwrite (Leave string empty for NO!)?' % filename)
if not overwrite:
print('No changes done.')
return
plt.savefig(filename, **savefig_kwargs)
# print('file saved')
def save_obj(obj, name, root_dir=None, notebook_mode=True, default_overwrite=False):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['OBJ_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.pkgz' % name) # directory + '/%s.pkgz' % name
if not default_overwrite and os.path.isfile(filename):
overwrite = input('A file named %s already exists. Overwrite (Leave string empty for NO!)?' % filename)
if not overwrite:
print('No changes done.')
return
print('Overwriting...')
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f)
# print('File saved!')
def save_text(text, name, root_dir=None, notebook_mode=True, default_overwrite=False):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.txt' % name) # directory + '/%s.pkgz' % name
if not default_overwrite and os.path.isfile(filename):
overwrite = input('A file named %s already exists. Overwrite (Leave string empty for NO!)?' % filename)
if not overwrite:
print('No changes done.')
return
print('Overwriting...')
with open(filename, "w") as text_file:
text_file.write(text)
def load_obj(name, root_dir=None, notebook_mode=True):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['OBJ_DIR']),
notebook_mode=notebook_mode, create=False)
filename = join_paths(directory, name if name.endswith('.pkgz') else name + '.pkgz')
with gzip.open(filename, 'rb') as f:
return pickle.load(f)
def save_model(session, model, step, root_dir=None, notebook_mode=True):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['MODELS_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s' % model.name)
model.saver.save(session, filename, global_step=step)
def load_model(session, model, step, root_dir=None, notebook_mode=True):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['MODELS_DIR']),
notebook_mode=notebook_mode, create=False)
filename = join_paths(directory, model.name)
model.saver.restore(session, filename + "-" + str(step))
def save_adjacency_matrix_for_gephi(matrix, name, root_dir=None, notebook_mode=True, class_names=None):
if root_dir is None: root_dir = os.getcwd()
directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['GEPHI_DIR']),
notebook_mode=notebook_mode)
filename = join_paths(directory, '%s.csv' % name)
m, n = np.shape(matrix)
assert m == n, '%s should be a square matrix.' % matrix
if not class_names:
class_names = [str(k) for k in range(n)]
left = np.array([class_names]).T
matrix = np.hstack([left, matrix])
up = np.vstack([[''], left]).T
matrix = np.vstack([up, matrix])
np.savetxt(filename, matrix, delimiter=';', fmt='%s')
def save_setting(local_variables, root_dir=None, excluded=None, default_overwrite=False, collect_data=True,
notebook_mode=True, do_print=True, append_string=''):
dictionary = generate_setting_dict(local_variables, excluded=excluded)
if do_print:
if tabulate:
print(tabulate(dictionary.items(), headers=('settings var names', 'values')))
else:
print('SETTING:')
for k, v in dictionary.items():
print(k, v, sep=': ')
print()
if collect_data: save_obj(dictionary, 'setting' + append_string,
root_dir=root_dir,
default_overwrite=default_overwrite,
notebook_mode=notebook_mode)
def generate_setting_dict(local_variables, excluded=None):
"""
Generates a dictionary of (name, values) of local variables (typically obtained by vars()) that
can be saved at the beginning of the experiment. Furthermore, if an object obj in local_variables implements the
function setting(), it saves the result of obj.setting() as value in the dictionary.
:param local_variables:
:param excluded: (optional, default []) variable or list of variables to be excluded.
:return: A dictionary
"""
excluded = as_list(excluded) or []
setting_dict = {k: v.setting() if hasattr(v, 'setting') else v
for k, v in local_variables.items() if v not in excluded}
import datetime
setting_dict['datetime'] = str(datetime.datetime.now())
return setting_dict
class Timer:
"""
Stopwatch class for timing the experiments. Uses `time` module.
"""
_div_unit = {'ms': 1. / 1000,
'sec': 1.,
'min': 60.,
'hr': 3600.}
def __init__(self, unit='sec', round_off=True):
self._starting_times = []
self._stopping_times = []
self._running = False
self.round_off = round_off
assert unit in Timer._div_unit
self.unit = unit
def reset(self):
self._starting_times = []
self._stopping_times = []
self._running = False
def start(self):
if not self._running:
self._starting_times.append(time.time())
self._running = True
return self
def stop(self):
if self._running:
self._stopping_times.append(time.time())
self._running = False
return self
def raw_elapsed_time_list(self):
def _maybe_add_last():
t2 = self._stopping_times if len(self._starting_times) == len(self._stopping_times) else \
self._stopping_times + [time.time()]
return zip(self._starting_times, t2)
return [t2 - t1 for t1, t2 in _maybe_add_last()]
def elapsed_time(self):
res = sum(self.raw_elapsed_time_list()) / Timer._div_unit[self.unit]
return res if not self.round_off else int(res)
class Saver:
"""
Class for recording experiment data
"""
SKIP = 'SKIP' # skip saving value in save_dict
def __init__(self, experiment_names, *items, append_date_to_name=True,
root_directory=FOLDER_NAMINGS['EXP_ROOT'],
timer=None, do_print=True, collect_data=True, default_overwrite=False):
"""
Initialize a saver to collect data. (Intended to be used together with OnlinePlotStream.)
:param experiment_names: string or list of strings which represent the name of the folder (and sub-folders)
experiment oand
:param items: a list of (from pairs to at most) 5-tuples that represent the things you want to save.
The first arg of each tuple should be a string that will be the key of the save_dict.
Then there can be either a callable with signature (step) -> None
Should pass the various args in ths order:
fetches: tensor or list of tensors to compute;
feeds (optional): to be passed to tf.Session.run. Can be a
callable with signature (step) -> feed_dict
options (optional): to be passed to tf.Session.run
run_metadata (optional): to be passed to tf.Session.run
:param timer: optional timer object. If None creates a new one. If false does not register time.
If None or Timer it adds to the save_dict an entry time that record elapsed_time.
The time required to perform data collection and saving are not counted, since typically
the aim is to record the true algorithm execution time!
:param root_directory: string, name of root directory (default ~HOME/Experiments)
:param do_print: (optional, default True) will print by default `save_dict` each time method `save` is executed
:param collect_data: (optional, default True) will save by default `save_dict` each time
method `save` is executed
"""
experiment_names = as_list(experiment_names)
if append_date_to_name:
from datetime import datetime
experiment_names += [datetime.today().strftime('%d-%m-%y__%Hh%Mm')]
self.experiment_names = list(experiment_names)
if not os.path.isabs(experiment_names[0]):
self.directory = join_paths(root_directory) # otherwise assume no use of root_directory
if collect_data:
check_or_create_dir(root_directory, notebook_mode=False)
else: self.directory = ''
for name in self.experiment_names:
self.directory = join_paths(self.directory, name)
check_or_create_dir(self.directory, notebook_mode=False, create=collect_data)
self.do_print = do_print
self.collect_data = collect_data
self.default_overwrite = default_overwrite
assert isinstance(timer, Timer) or timer is None or timer is False, 'timer param not good...'
if timer is None:
timer = Timer()
self.timer = timer
self.clear_items()
self.add_items(*items)
# noinspection PyAttributeOutsideInit
def clear_items(self):
"""
Removes all previously inserted items
:return:
"""
self._processed_items = []
self._step = -1
@staticmethod
def process_items(*items):
"""
Add items to the save dictionary
:param items: a list of (from pairs to at most) 5-tuples that represent the things you want to save.
The first arg of each tuple should be a string that will be the key of the save_dict.
Then there can be either a callable with signature (step) -> result or () -> result
or tensorflow things... In this second case you should pass the following args in ths order:
fetches: tensor or list of tensors to compute;
feeds (optional): to be passed to tf.Session.run. Can be a
callable with signature (step) -> feed_dict
or () -> feed_dict
options (optional): to be passed to tf.Session.run
run_metadata (optional): to be passed to tf.Session.run
:return: None
"""
assert len(items) == 0 or isinstance(items[0], str), 'Check items! first arg %s. Should be a string.' \
'All args: %s' % (items[0], items)
processed_args = []
k = 0
while k < len(items):
part = [items[k]]
k += 1
while k < len(items) and not isinstance(items[k], str):
part.append(items[k])
k += 1
assert len(part) >= 2, 'Check args! Last part %s' % part
if callable(part[1]): # python stuff
if len(part) == 2: part.append(True) # always true default condition
else: # tensorflow stuff
part += [None] * (6 - len(part)) # representing name, fetches, feeds, options, metadata
if part[3] is None: part[3] = True # default condition
processed_args.append(part)
# self._processed_items += processed_args
# return [pt[0] for pt in processed_args]
return processed_args
def add_items(self, *items):
"""
Adds internally items to this saver
:param items:
:return:
"""
processed_items = Saver.process_items(*items)
self._processed_items += processed_items
return [pt[0] for pt in processed_items]
def save(self, step=None, session=None, append_string="", do_print=None, collect_data=None,
processed_items=None, _res=None):
"""
Builds and save a dictionary with the keys and values specified at construction time or by method
`add_items`
:param processed_items: optional, processed item list (returned by add_items)
if None uses internally stored items
:param session: Optional tensorflow session, otherwise uses default session
:param step: optional step, if None (default) uses internal step
(int preferred, otherwise does not work well with `pack_save_dictionaries`).
:param append_string: (optional str) string to append at the file name to `str(step)`
:param do_print: (default as object field)
:param collect_data: (default as object field)
:param _res: used internally by context managers
:return: the dictionary
"""
from tensorflow import get_default_session
if step is None:
self._step += 1
step = self._step
if not processed_items: processed_items = self._processed_items
if do_print is None: do_print = self.do_print
if collect_data is None: collect_data = self.collect_data
if session:
ss = session
else:
ss = get_default_session()
if ss is None and do_print: print('WARNING, No tensorflow session available')
if self.timer: self.timer.stop()
def _maybe_call(_method):
if not callable(_method): return _method
if len(signature(_method).parameters) == 0:
return _method()
elif len(signature(_method).parameters) == 1:
return _method(step)
else: # complete signature?
return _method(step, _res)
save_dict = OrderedDict([(pt[0], _maybe_call(pt[1]) if callable(pt[1])
else ss.run(pt[1], feed_dict=_maybe_call(pt[2]),
options=pt[4], run_metadata=pt[5])
if _maybe_call(pt[2 if callable(pt[1]) else 3]) else Saver.SKIP)
for pt in processed_items]
)
if self.timer: save_dict['Elapsed time (%s)' % self.timer.unit] = self.timer.elapsed_time()
if do_print:
if tabulate:
print(tabulate(save_dict.items(), headers=('Step %s' % step, 'Values'), floatfmt='.5f'))
else:
print('SAVE DICT:')
for key, v in save_dict.items():
print(key, v, sep=': ')
print()
if collect_data:
self.save_obj(save_dict, str(step) + append_string)
if self.timer: self.timer.start()
return save_dict
def pack_save_dictionaries(self, name='all', append_string='', erase_others=True):
"""
Creates an unique file starting from file created by method `save`.
The file contains a dictionary with keys equal to save_dict keys and values list of values form original files.
:param name:
:param append_string:
:param erase_others:
:return: The generated dictionary
"""
import glob
all_files = sorted(glob.glob(join_paths(
self.directory, FOLDER_NAMINGS['OBJ_DIR'], '[0-9]*%s.pkgz' % append_string)),
key=os.path.getctime) # sort by creation time
if len(all_files) == 0:
print('No file found')
return
objs = [load_obj(path, root_dir='', notebook_mode=False) for path in all_files]
# packed_dict = OrderedDict([(k, []) for k in objs[0]])
# noinspection PyArgumentList
packed_dict = defaultdict(list, OrderedDict())
for obj in objs:
[packed_dict[k].append(v) for k, v in obj.items()]
self.save_obj(packed_dict, name=name + append_string)
if erase_others:
[os.remove(f) for f in all_files]
return packed_dict
def record(self, *what, append_string=''): # TODO this is un initial (maybe bad) idea.
"""
Context manager for saver. saves executions
:param what:
:param append_string:
:return:
"""
return Records.on_hyperiteration(self, *what, append_string=append_string) # FIXME to be finished
def save_text(self, text, name):
return save_text(text=text, name=name, root_dir=self.directory, default_overwrite=self.default_overwrite,
notebook_mode=False)
def save_fig(self, name, extension='pdf', **savefig_kwargs):
"""
Object-oriented version of `save_fig`
:param extension:
:param name: name of the figure (.pdf extension automatically added)
:return:
"""
return save_fig(name, root_dir=self.directory, extension=extension,
default_overwrite=self.default_overwrite, notebook_mode=False,
**savefig_kwargs)
def save_obj(self, obj, name):
"""
Object-oriented version of `save_obj`
:param obj: object to save
:param name: name of the file (.pkgz extension automatically added)
:return:
"""
return save_obj(obj, name, root_dir=self.directory,
default_overwrite=self.default_overwrite, notebook_mode=False)
def save_adjacency_matrix_for_gephi(self, matrix, name, class_names=None):
"""
Object-oriented version of `save_adjacency_matrix_for_gephi`
:param matrix:
:param name:
:param class_names:
:return:
"""
return save_adjacency_matrix_for_gephi(matrix, name, root_dir=self.directory,
notebook_mode=False, class_names=class_names)
def save_setting(self, local_variables, excluded=None, append_string=''):
"""
Object-oriented version of `save_setting`
:param local_variables:
:param excluded:
:param append_string:
:return:
"""
excluded = as_list(excluded or [])
excluded.append(self) # no reason to save itself...
return save_setting(local_variables, root_dir=self.directory, excluded=excluded,
default_overwrite=self.default_overwrite, collect_data=self.collect_data,
notebook_mode=False, do_print=self.do_print, append_string=append_string)
def load_obj(self, name):
"""
Object-oriented version of `load_obj`
:param name: name of the file (.pkgz extension automatically added)
:return: unpacked object
"""
return load_obj(name, root_dir=self.directory, notebook_mode=False)
def save_model(self, session, model, step):
save_model(session, model, step, root_dir=self.directory, notebook_mode=False)
def load_model(self, session, model, step):
load_model(session, model, step, root_dir=self.directory)
# noinspection PyPep8Naming
def Loader(folder_name):
"""
utility method for creating a Saver with loading intentions,
does not create timer nor append time to name. just give the folder name
for the saver
:param folder_name: (string or list of strings)
either absolute or relative, in which case root_directory will be used
:return: a `Saver` object
"""
return Saver(folder_name, append_date_to_name=False, timer=False,
collect_data=False)
class Records:
"""
Contains (for the moment) static convenience methods for recording quantities
"""
class on_hyperiteration:
"""
context for record at each hyperiteration
"""
def __init__(self, saver, *record_what, append_string='', do_print=None, collect_data=None):
self.saver = saver
self.append_string = append_string
if self.append_string: self.append_string = '__' + self.append_string
self.do_print = do_print
self.collect_data = collect_data
self._unwrapped = []
self._record_what = record_what or []
self._processed_items = []
self._step = 0
def __enter__(self):
self._wrap()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.collect_data:
if exc_tb:
self.saver.save_obj((str(exc_type), str(exc_val), str(exc_tb)),
'exception' + self.append_string)
self.saver.pack_save_dictionaries(append_string=self.append_string)
self._unwrap()
# TODO is this a good thing? or should we leave it to do manually
self.saver.clear_items()
if self.saver.timer: self.saver.timer.stop()
def _wrap(self):
self._unwrapped.append(rf.HyperOptimizer.initialize)
rf.HyperOptimizer.initialize = self._initialize_wrapper(rf.HyperOptimizer.initialize)
self._unwrapped.append(rf.HyperOptimizer.run)
rf.HyperOptimizer.run = self._saver_wrapper(rf.HyperOptimizer.run) # mmm...
def _unwrap(self):
rf.HyperOptimizer.initialize = self._unwrapped[0]
rf.HyperOptimizer.run = self._unwrapped[1]
def _saver_wrapper(self, f):
@wraps(f)
def _saver_wrapped(*args, **kwargs):
res = f(*args, **kwargs)
self._execute_save(res, *args, **kwargs)
return res
return _saver_wrapped
def _initialize_wrapper(self, f): # this should be good since
@wraps(f)
def _initialize_wrapped(*args, **kwargs):
first_init = f(*args, **kwargs)
# add savers just at the first initialization
if first_init:
self._processed_items += rf.flatten_list(
[Saver.process_items(*e(*args, **kwargs)) for e in self._record_what])
self._execute_save('INIT', *args, **kwargs)
return first_init
return _initialize_wrapped
# noinspection PyUnusedLocal
def _execute_save(self, res, *args, **kwargs): # maybe args and kwargs could be useful...
self.saver.save(step=self._step, append_string=self.append_string,
processed_items=self._processed_items,
do_print=self.do_print, collect_data=self.collect_data,
_res=res)
self._step += 1
# noinspection PyClassHasNoInit,PyPep8Naming
class on_forward(on_hyperiteration): # context class
"""
Saves at every iteration (before call of method `step_forward`)
"""
def _wrap(self):
self._unwrapped.append(rf.HyperOptimizer.initialize)
rf.HyperOptimizer.initialize = self._initialize_wrapper(rf.HyperOptimizer.initialize)
self._unwrapped.append(rf.ForwardHG.step_forward)
rf.ForwardHG.step_forward = self._saver_wrapper(rf.ForwardHG.step_forward) # mmm...
def _unwrap(self):
rf.HyperOptimizer.initialize = self._unwrapped[0]
rf.ForwardHG.step_forward = self._unwrapped[1]
@staticmethod
def direct(*items):
"""
Everything passed in items is passed directly to `Saver.
:param items:
:return:
"""
# noinspection PyUnusedLocal
def _call(*args, **kwargs):
return items
return _call
@staticmethod
def norms_of_z():
"""
:return:
"""
def _call(*args, **kwargs):
hg = args[0]
if isinstance(hg, rf.HyperOptimizer): hg = hg.hyper_gradients # guess most common case
assert isinstance(hg, rf.ForwardHG)
_rs = Records.tensors(*hg.zs, op=tf.norm)(args, kwargs)
return _rs
return _call
@staticmethod
def norms_of_d_dynamics_d_hypers(fd=None):
"""
In `ForwardHG` records the norm of the partial derivatives of the dynamics w.r.t. the hyperparameters.
:param fd:
:return:
"""
if fd is None: fd = lambda stp, rs: rs
def _call(*args, **kwargs):
hg = args[0]
if isinstance(hg, rf.HyperOptimizer):
hg = hg.hyper_gradients # guess most common case
assert isinstance(hg, rf.ForwardHG)
_rs = Records.tensors(*hg.d_dynamics_d_hypers, op=tf.norm,
fd=fd,
condition=lambda stp, rs: rs != 'INIT')(args, kwargs)
return _rs
return _call
@staticmethod
def hyperparameters():
"""
Simple one! record all hyperparameter values, assuming the usage of `HyperOptimizer`
:return: a function
"""
# noinspection PyUnusedLocal
def _call(*args, **kwargs):
hyper_optimizer = args[0]
assert isinstance(hyper_optimizer, rf.HyperOptimizer)
return rf.flatten_list(
[rf.simple_name(hyp), hyp]
for hyp in hyper_optimizer.hyper_list)
return _call
@staticmethod
def hypergradients():
"""
Record all hypergradient values, assuming the usage of `HyperOptimizer`
:return:
"""
# noinspection PyUnusedLocal
def _call(*args, **kwargs):
hyper_optimizer = args[0]
assert isinstance(hyper_optimizer, rf.HyperOptimizer)
return rf.flatten_list(
['grad::' + rf.simple_name(hyp), hyper_optimizer.hyper_gradients.hyper_gradients_dict[hyp]]
for hyp in hyper_optimizer.hyper_list)
return _call
@staticmethod
def tensors(*tensors, key=None, scope=None, name_contains=None,
rec_name='', op=tf.identity, fd=None,
condition=True):
"""
Little more difficult... attempts to record tensor named name
:param name_contains: record all tensors which name contains this string. Can be a list.
:type condition: bool | function
:param condition: optional condition for triggering the saving of tensors, can have different
signatures
:param tensors: varargs of tensor names
:param scope: optional for collections
:param key: to record collections
:param op: optional operation to apply to each tensor
:param rec_name: optional name to prepend to all tensors recorded by this
:param fd: # given to _process_feed_dicts_for_rec
:return:
"""
if rec_name: rec_name += '::' # maybe find a better way
def _call(*args, **_kwargs):
if tensors:
_tensors = [tf.get_default_graph().get_tensor_by_name(tns + ':0') if isinstance(tns, str)
else tns for tns in tensors]
elif key:
_tensors = tf.get_collection(key, scope=scope)
elif name_contains:
_names = rf.flatten_list([[n.name for n in tf.get_default_graph().as_graph_def().node
if nc in n.name] for nc in as_list(name_contains)])
return Records.tensors(*_names, rec_name=rec_name, op=op, fd=fd, condition=True)(*args, **_kwargs)
else:
raise NotImplemented('One between key and names should be given')
# try with dictionary of form (string (simple name of placeholder), data)
_rs2 = rf.flatten_list([rec_name + rf.simple_name(tns.name),
op(tns),
Records._process_feed_dicts_for_rec(fd, *args, **_kwargs),
condition]
for tns in _tensors)
return _rs2
return _call
@staticmethod
def model(): # TODO discuss with others to see what's best way to save models...
"""
Should save the model(s) in a useful way..
:return:
"""
raise NotImplemented()
@staticmethod
def setting(): # TODO I have no precise idea on how to implement this...
"""
Should save experiment meta-info like params, dataset, beginning/end...
name of experiment function, git version and so on.
:return:
"""
raise NotImplemented()
@staticmethod
def _process_feed_dicts_for_rec(fd, *args, **kwargs):
# TODO add more functionality...
"""
# try with dictionary of form (string (simple name of placeholder), data)
:param fd:
:param args: # might be useful??
:param kwargs:
:return:
"""
if fd is None or callable(fd): return fd
def _std_process_dict(_dict):
return {tf.get_default_graph().get_tensor_by_name(n + ':0'): v for n, v in _dict.items()}
def _fds():
if isinstance(fd, dict):
_rs = _std_process_dict(fd)
elif isinstance(fd, (list, tuple)): # (x, y, dataset)
if len(fd) == 3 and isinstance(fd[2], rf.Dataset): # very common scenario
_rs = {tf.get_default_graph().get_tensor_by_name(fd[0] + ':0'): fd[2].data,
tf.get_default_graph().get_tensor_by_name(fd[1] + ':0'): fd[2].target,
}
else:
raise NotImplemented('not understood')
else:
raise NotImplemented('not understood')
return _rs
return _fds
if __name__ == '__main__':
sav1 = Saver('tbd',
'random', lambda step: np.random.randn(),
default_overwrite=True
)
sav1.timer.start()
sav1.save(0)
time.sleep(2)
sav1.save(1)
time.sleep(1)
sav1.save(2)
sav1.pack_save_dictionaries()
|
|
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
import json
import multiprocessing
import optparse
import os
import shlex
import sys
# Add testrunner to the path.
sys.path.insert(
0,
os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
from testrunner.testproc import progress
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.shard import ShardProc
from testrunner.testproc.sigproc import SignalProc
from testrunner.testproc.timeout import TimeoutProc
BASE_DIR = (
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)))))
DEFAULT_OUT_GN = 'out.gn'
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in separate steps on the bots.
TEST_MAP = {
# This needs to stay in sync with test/bot_default.isolate.
"bot_default": [
"debugger",
"mjsunit",
"cctest",
"wasm-spec-tests",
"inspector",
"webkit",
"mkgrokdump",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
# This needs to stay in sync with test/default.isolate.
"default": [
"debugger",
"mjsunit",
"cctest",
"wasm-spec-tests",
"inspector",
"mkgrokdump",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
# This needs to stay in sync with test/d8_default.isolate.
"d8_default": [
# TODO(machenbach): uncomment after infra side lands.
#"debugger",
"mjsunit",
"webkit",
#"message",
#"preparser",
#"intl",
],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
"debugger",
"mjsunit",
"cctest",
"inspector",
"webkit",
"intl",
],
"unittests": [
"unittests",
],
}
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"arm64"]
class ModeConfig(object):
def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
self.flags = flags
self.timeout_scalefactor = timeout_scalefactor
self.status_mode = status_mode
self.execution_mode = execution_mode
DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort"]
MODES = {
"debug": ModeConfig(
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
),
"optdebug": ModeConfig(
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
),
"release": ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="release",
execution_mode="release",
),
# Normal trybot release configuration. There, dchecks are always on which
# implies debug is set. Hence, the status file needs to assume debug-like
# behavior/timeouts.
"tryrelease": ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="debug",
execution_mode="release",
),
# This mode requires v8 to be compiled with dchecks and slow dchecks.
"slowrelease": ModeConfig(
flags=RELEASE_FLAGS + ["--enable-slow-asserts"],
timeout_scalefactor=2,
status_mode="debug",
execution_mode="release",
),
}
PROGRESS_INDICATORS = {
'verbose': progress.VerboseProgressIndicator,
'dots': progress.DotsProgressIndicator,
'color': progress.ColorProgressIndicator,
'mono': progress.MonochromeProgressIndicator,
}
class TestRunnerError(Exception):
pass
class BuildConfig(object):
def __init__(self, build_config):
# In V8 land, GN's x86 is called ia32.
if build_config['v8_target_cpu'] == 'x86':
self.arch = 'ia32'
else:
self.arch = build_config['v8_target_cpu']
self.is_debug = build_config['is_debug']
self.asan = build_config['is_asan']
self.cfi_vptr = build_config['is_cfi']
self.dcheck_always_on = build_config['dcheck_always_on']
self.gcov_coverage = build_config['is_gcov_coverage']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
self.no_snap = not build_config['v8_use_snapshot']
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
self.ubsan_vptr = build_config['is_ubsan_vptr']
# Export only for MIPS target
if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
self.mips_arch_variant = build_config['mips_arch_variant']
self.mips_use_msa = build_config['mips_use_msa']
def __str__(self):
detected_options = []
if self.asan:
detected_options.append('asan')
if self.cfi_vptr:
detected_options.append('cfi_vptr')
if self.dcheck_always_on:
detected_options.append('dcheck_always_on')
if self.gcov_coverage:
detected_options.append('gcov_coverage')
if self.msan:
detected_options.append('msan')
if self.no_i18n:
detected_options.append('no_i18n')
if self.no_snap:
detected_options.append('no_snap')
if self.predictable:
detected_options.append('predictable')
if self.tsan:
detected_options.append('tsan')
if self.ubsan_vptr:
detected_options.append('ubsan_vptr')
return '\n'.join(detected_options)
class BaseTestRunner(object):
def __init__(self, basedir=None):
self.basedir = basedir or BASE_DIR
self.outdir = None
self.build_config = None
self.mode_name = None
self.mode_options = None
def execute(self, sys_args=None):
if sys_args is None: # pragma: no cover
sys_args = sys.argv[1:]
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print ' '.join(sys.argv)
self._load_build_config(options)
try:
self._process_default_options(options)
self._process_options(options)
except TestRunnerError:
parser.print_help()
raise
args = self._parse_test_args(args)
suites = self._get_suites(args, options)
self._prepare_suites(suites, options)
self._setup_env()
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
tests = [t for s in suites for t in s.tests]
return self._do_execute(tests, args, options)
except TestRunnerError:
return utils.EXIT_CODE_INTERNAL_ERROR
except KeyboardInterrupt:
return utils.EXIT_CODE_INTERRUPTED
def _create_parser(self):
parser = optparse.OptionParser()
parser.usage = '%prog [options] [tests]'
parser.description = """TESTS: %s""" % (TEST_MAP["default"])
self._add_parser_default_options(parser)
self._add_parser_options(parser)
return parser
def _add_parser_default_options(self, parser):
parser.add_option("--gn", help="Scan out.gn for the last built"
" configuration",
default=False, action="store_true")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
parser.add_option("--buildbot", help="DEPRECATED!",
default=False, action="store_true")
parser.add_option("--arch",
help="The architecture to run tests for")
parser.add_option("-m", "--mode",
help="The test mode in which to run (uppercase for ninja"
" and buildbot builds): %s" % MODES.keys())
parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
"directory will be used")
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run")
parser.add_option("--swarming", default=False, action="store_true",
help="Indicates running test driver on swarming.")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type=int)
# Shard
parser.add_option("--shard-count", default=1, type=int,
help="Split tests into this number of shards")
parser.add_option("--shard-run", default=1, type=int,
help="Run this shard from the split up tests.")
# Progress
parser.add_option("-p", "--progress",
choices=PROGRESS_INDICATORS.keys(), default="mono",
help="The style of progress indicator (verbose, dots, "
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite", default="v8tests",
help="The testsuite name in the JUnit output file")
# Rerun
parser.add_option("--rerun-failures-count", default=0, type=int,
help="Number of times to rerun each failing test case. "
"Very slow tests will be rerun only once.")
parser.add_option("--rerun-failures-max", default=100, type=int,
help="Maximum number of failing test cases to rerun")
# Test config
parser.add_option("--command-prefix", default="",
help="Prepended to each shell command used to run a test")
parser.add_option("--extra-flags", action="append", default=[],
help="Additional flags to pass to each test command")
parser.add_option("--isolates", action="store_true", default=False,
help="Whether to test isolates")
parser.add_option("--no-harness", "--noharness",
default=False, action="store_true",
help="Run without test harness of a given suite")
parser.add_option("--random-seed", default=0, type=int,
help="Default seed for initializing random generator")
parser.add_option("-t", "--timeout", default=60, type=int,
help="Timeout for single test in seconds")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="Verbose output")
# TODO(machenbach): Temporary options for rolling out new test runner
# features.
parser.add_option("--mastername", default='',
help="Mastername property from infrastructure. Not "
"setting this option indicates manual usage.")
parser.add_option("--buildername", default='',
help="Buildername property from infrastructure. Not "
"setting this option indicates manual usage.")
def _add_parser_options(self, parser):
pass
def _parse_args(self, parser, sys_args):
options, args = parser.parse_args(sys_args)
if any(map(lambda v: v and ',' in v,
[options.arch, options.mode])): # pragma: no cover
print 'Multiple arch/mode are deprecated'
raise TestRunnerError()
return options, args
def _load_build_config(self, options):
for outdir in self._possible_outdirs(options):
try:
self.build_config = self._do_load_build_config(outdir, options.verbose)
except TestRunnerError:
pass
if not self.build_config: # pragma: no cover
print 'Failed to load build config'
raise TestRunnerError
print 'Build found: %s' % self.outdir
if str(self.build_config):
print '>>> Autodetected:'
print self.build_config
# Returns possible build paths in order:
# gn
# outdir
# outdir/arch.mode
# Each path is provided in two versions: <path> and <path>/mode for buildbot.
def _possible_outdirs(self, options):
def outdirs():
if options.gn:
yield self._get_gn_outdir()
return
yield options.outdir
if options.arch and options.mode:
yield os.path.join(options.outdir,
'%s.%s' % (options.arch, options.mode))
for outdir in outdirs():
yield os.path.join(self.basedir, outdir)
# buildbot option
if options.mode:
yield os.path.join(self.basedir, outdir, options.mode)
def _get_gn_outdir(self):
gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
latest_timestamp = -1
latest_config = None
for gn_config in os.listdir(gn_out_dir):
gn_config_dir = os.path.join(gn_out_dir, gn_config)
if not os.path.isdir(gn_config_dir):
continue
if os.path.getmtime(gn_config_dir) > latest_timestamp:
latest_timestamp = os.path.getmtime(gn_config_dir)
latest_config = gn_config
if latest_config:
print(">>> Latest GN build found: %s" % latest_config)
return os.path.join(DEFAULT_OUT_GN, latest_config)
def _do_load_build_config(self, outdir, verbose=False):
build_config_path = os.path.join(outdir, "v8_build_config.json")
if not os.path.exists(build_config_path):
if verbose:
print("Didn't find build config: %s" % build_config_path)
raise TestRunnerError()
with open(build_config_path) as f:
try:
build_config_json = json.load(f)
except Exception: # pragma: no cover
print("%s exists but contains invalid json. Is your build up-to-date?"
% build_config_path)
raise TestRunnerError()
# In auto-detect mode the outdir is always where we found the build config.
# This ensures that we'll also take the build products from there.
self.outdir = os.path.dirname(build_config_path)
return BuildConfig(build_config_json)
def _process_default_options(self, options):
# We don't use the mode for more path-magic.
# Therefore transform the buildbot mode here to fix build_config value.
if options.mode:
options.mode = self._buildbot_to_v8_mode(options.mode)
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
if options.mode:
if options.mode not in MODES: # pragma: no cover
print '%s mode is invalid' % options.mode
raise TestRunnerError()
if MODES[options.mode].execution_mode != build_config_mode:
print ('execution mode (%s) for %s is inconsistent with build config '
'(%s)' % (
MODES[options.mode].execution_mode,
options.mode,
build_config_mode))
raise TestRunnerError()
self.mode_name = options.mode
else:
self.mode_name = build_config_mode
self.mode_options = MODES[self.mode_name]
if options.arch and options.arch != self.build_config.arch:
print('--arch value (%s) inconsistent with build config (%s).' % (
options.arch, self.build_config.arch))
raise TestRunnerError()
if options.shell_dir: # pragma: no cover
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
if options.j == 0:
options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
def _buildbot_to_v8_mode(self, config):
"""Convert buildbot build configs to configs understood by the v8 runner.
V8 configs are always lower case and without the additional _x64 suffix
for 64 bit builds on windows with ninja.
"""
mode = config[:-4] if config.endswith('_x64') else config
return mode.lower()
def _process_options(self, options):
pass
def _setup_env(self):
# Use the v8 root as cwd as some test cases use "load" with relative paths.
os.chdir(self.basedir)
# Many tests assume an English interface.
os.environ['LANG'] = 'en_US.UTF-8'
symbolizer_option = self._get_external_symbolizer_option()
if self.build_config.asan:
asan_options = [
symbolizer_option,
'allow_user_segv_handler=1',
'allocator_may_return_null=1',
]
if not utils.GuessOS() in ['macos', 'windows']:
# LSAN is not available on mac and windows.
asan_options.append('detect_leaks=1')
else:
asan_options.append('detect_leaks=0')
os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
if self.build_config.cfi_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
'print_summary=1',
'symbolize=1',
symbolizer_option,
])
if self.build_config.ubsan_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
symbolizer_option,
])
if self.build_config.msan:
os.environ['MSAN_OPTIONS'] = symbolizer_option
if self.build_config.tsan:
suppressions_file = os.path.join(
self.basedir,
'tools',
'sanitizers',
'tsan_suppressions.txt')
os.environ['TSAN_OPTIONS'] = " ".join([
symbolizer_option,
'suppressions=%s' % suppressions_file,
'exit_code=0',
'report_thread_leaks=0',
'history_size=7',
'report_destroy_locked=0',
])
def _get_external_symbolizer_option(self):
external_symbolizer_path = os.path.join(
self.basedir,
'third_party',
'llvm-build',
'Release+Asserts',
'bin',
'llvm-symbolizer',
)
if utils.IsWindows():
# Quote, because sanitizers might confuse colon as option separator.
external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
return 'external_symbolizer_path=%s' % external_symbolizer_path
def _parse_test_args(self, args):
if not args:
args = self._get_default_suite_names()
# Expand arguments with grouped tests. The args should reflect the list
# of suites as otherwise filters would break.
def expand_test_group(name):
return TEST_MAP.get(name, [name])
return reduce(list.__add__, map(expand_test_group, args), [])
def _get_suites(self, args, options):
names = self._args_to_suite_names(args)
return self._load_suites(names, options)
def _args_to_suite_names(self, args):
# Use default tests if no test configuration was provided at the cmd line.
all_names = set(utils.GetSuitePaths(os.path.join(self.basedir, 'test')))
args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
return [name for name in args_names if name in all_names]
def _get_default_suite_names(self):
return []
def _load_suites(self, names, options):
test_config = self._create_test_config(options)
def load_suite(name):
if options.verbose:
print '>>> Loading test suite: %s' % name
return testsuite.TestSuite.LoadTestSuite(
os.path.join(self.basedir, 'test', name),
test_config)
return map(load_suite, names)
def _prepare_suites(self, suites, options):
self._load_status_files(suites, options)
for s in suites:
s.ReadTestCases()
def _load_status_files(self, suites, options):
# simd_mips is true if SIMD is fully supported on MIPS
variables = self._get_statusfile_variables(options)
for s in suites:
s.ReadStatusFile(variables)
def _get_statusfile_variables(self, options):
simd_mips = (
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant == "r6" and
self.build_config.mips_use_msa)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
return {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"endurance_fuzzer": False,
"gc_fuzzer": False,
"gc_stress": False,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": False,
"predictable": self.build_config.predictable,
"simd_mips": simd_mips,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": False,
"system": utils.GuessOS(),
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
def _create_test_config(self, options):
timeout = options.timeout * self._timeout_scalefactor(options)
return TestConfig(
command_prefix=options.command_prefix,
extra_flags=options.extra_flags,
isolates=options.isolates,
mode_flags=self.mode_options.flags,
no_harness=options.no_harness,
noi18n=self.build_config.no_i18n,
random_seed=options.random_seed,
shell_dir=self.outdir,
timeout=timeout,
verbose=options.verbose,
)
def _timeout_scalefactor(self, options):
factor = self.mode_options.timeout_scalefactor
# Simulators are slow, therefore allow a longer timeout.
if self.build_config.arch in SLOW_ARCHS:
factor *= 2
# Predictable mode is slower.
if self.build_config.predictable:
factor *= 2
return factor
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
def _prepare_procs(self, procs):
procs = filter(None, procs)
for i in xrange(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
return None
return ShardProc(myid - 1, count)
def _get_shard_info(self, options):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (options.shard_run > 1 and
options.shard_run != shard_run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_run < 1 or shard_run > shard_count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return 1, 1
return shard_run, shard_count
def _create_progress_indicators(self, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
if options.junitout:
procs.append(progress.JUnitTestProgressIndicator(options.junitout,
options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
return procs
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def _create_signal_proc(self):
return SignalProc()
def _create_rerun_proc(self, options):
if not options.rerun_failures_count:
return None
return RerunProc(options.rerun_failures_count,
options.rerun_failures_max)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to convert SavedModel to frozen GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.lite.python.convert import tensor_name
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
def _log_tensor_details(tensor_info):
"""Log tensor details: name, shape, and type."""
for key in tensor_info:
val = tensor_info[key]
dtype = types_pb2.DataType.Name(val.dtype)
if val.tensor_shape.unknown_rank:
shape = "unknown_rank"
else:
dims = [str(dim.size) for dim in val.tensor_shape.dim]
shape = "({})".format(", ".join(dims))
logging.info("Tensor's key in saved_model's tensor_map: %s", key)
logging.info(" tensor name: %s, shape: %s, type: %s", val.name, shape,
dtype)
def _get_meta_graph_def(saved_model_dir, tag_set):
"""Validate saved_model and extract MetaGraphDef.
Args:
saved_model_dir: saved_model path to convert.
tag_set: Set of tag(s) of the MetaGraphDef to load.
Returns:
The meta_graph_def used for tflite conversion.
Raises:
ValueError: No valid MetaGraphDef for given tag_set.
"""
with session.Session(graph=ops.Graph()) as sess:
return loader.load(sess, tag_set, saved_model_dir)
def _get_signature_def(meta_graph, signature_key):
"""Get the signature def from meta_graph with given signature_key.
Args:
meta_graph: meta_graph_def.
signature_key: signature_def in the meta_graph_def.
Returns:
The signature_def used for tflite conversion.
Raises:
ValueError: Given signature_key is not valid for this meta_graph.
"""
signature_def_map = meta_graph.signature_def
signature_def_keys = set(signature_def_map.keys())
logging.info(
"The given SavedModel MetaGraphDef contains SignatureDefs with the "
"following keys: %s", signature_def_keys)
if signature_key not in signature_def_keys:
raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible "
"values are '{}'.".format(signature_key,
",".join(signature_def_keys)))
return signature_def_map[signature_key]
def _get_inputs_outputs(signature_def):
"""Get inputs and outputs from SignatureDef.
Args:
signature_def: SignatureDef in the meta_graph_def for conversion.
Returns:
The inputs and outputs in the graph for conversion.
"""
inputs_tensor_info = signature_def.inputs
outputs_tensor_info = signature_def.outputs
logging.info("input tensors info: ")
_log_tensor_details(inputs_tensor_info)
logging.info("output tensors info: ")
_log_tensor_details(outputs_tensor_info)
def gather_names(tensor_info):
return [tensor_info[key].name for key in tensor_info]
inputs = gather_names(inputs_tensor_info)
outputs = gather_names(outputs_tensor_info)
return inputs, outputs
def _get_tensors(graph, signature_def_tensor_names=None,
user_tensor_names=None):
"""Gets the tensors associated with the tensor names.
Either signature_def_tensor_names or user_tensor_names should be provided. If
the user provides tensors, the tensors associated with the user provided
tensor names are provided. Otherwise, the tensors associated with the names in
the SignatureDef are provided.
Args:
graph: GraphDef representing graph.
signature_def_tensor_names: Tensor names stored in either the inputs or
outputs of a SignatureDef. (default None)
user_tensor_names: Tensor names provided by the user. (default None)
Returns:
List of tensors.
Raises:
ValueError:
signature_def_tensors and user_tensor_names are undefined or empty.
user_tensor_names are not valid.
"""
tensors = []
if user_tensor_names:
# Sort the tensor names.
user_tensor_names = sorted(user_tensor_names)
tensors = get_tensors_from_tensor_names(graph, user_tensor_names)
elif signature_def_tensor_names:
tensors = [
graph.get_tensor_by_name(name)
for name in sorted(signature_def_tensor_names)
]
else:
# Throw ValueError if signature_def_tensors and user_tensor_names are both
# either undefined or empty.
raise ValueError(
"Specify either signature_def_tensor_names or user_tensor_names")
return tensors
def get_tensors_from_tensor_names(graph, tensor_names):
"""Gets the Tensors associated with the `tensor_names` in the provided graph.
Args:
graph: TensorFlow Graph.
tensor_names: List of strings that represent names of tensors in the graph.
Returns:
A list of Tensor objects in the same order the names are provided.
Raises:
ValueError:
tensor_names contains an invalid tensor name.
"""
# Get the list of all of the tensors.
tensor_name_to_tensor = {
tensor_name(tensor): tensor for op in graph.get_operations()
for tensor in op.values()
}
# Get the tensors associated with tensor_names.
tensors = []
invalid_tensors = []
for name in tensor_names:
tensor = tensor_name_to_tensor.get(name)
if tensor is None:
invalid_tensors.append(name)
else:
tensors.append(tensor)
# Throw ValueError if any user input names are not valid tensors.
if invalid_tensors:
raise ValueError("Invalid tensors '{}' were found.".format(
",".join(invalid_tensors)))
return tensors
def set_tensor_shapes(tensors, shapes):
"""Sets Tensor shape for each tensor if the shape is defined.
Args:
tensors: TensorFlow ops.Tensor.
shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
"""
if shapes:
for tensor in tensors:
shape = shapes.get(tensor_name(tensor))
if shape is not None:
tensor.set_shape(shape)
def freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key):
"""Converts a SavedModel to a frozen graph.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input arrays
from SignatureDef when none are provided.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
frozen_graph_def: Frozen GraphDef.
in_tensors: List of input tensors for the graph.
out_tensors: List of output tensors for the graph.
Raises:
ValueError:
SavedModel doesn't contain a MetaGraphDef identified by tag_set.
signature_key is not in the MetaGraphDef.
assets/ directory is in the MetaGraphDef.
input_shapes does not match the length of input_arrays.
input_arrays or output_arrays are not valid.
"""
# Read SignatureDef.
meta_graph = _get_meta_graph_def(saved_model_dir, tag_set)
signature_def = _get_signature_def(meta_graph, signature_key)
inputs, outputs = _get_inputs_outputs(signature_def)
# Check SavedModel for assets directory.
collection_def = meta_graph.collection_def
if constants.ASSETS_KEY in collection_def:
raise ValueError("SavedModels with assets/ directory are not supported.")
graph = ops.Graph()
with session.Session(graph=graph) as sess:
loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)
# Gets input and output tensors.
# TODO(zhixianyan): Use TFLite supported Op list to filter outputs.
in_tensors = _get_tensors(graph, inputs, input_arrays)
out_tensors = _get_tensors(graph, outputs, output_arrays)
set_tensor_shapes(in_tensors, input_shapes)
output_names = [node.split(":")[0] for node in outputs]
frozen_graph_def = tf_graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_names)
return frozen_graph_def, in_tensors, out_tensors
|
|
from __future__ import unicode_literals
import collections
import copy
import datetime
import numbers
import operator
import re
import scorched.strings
import scorched.exc
import scorched.dates
from functools import reduce
from collections.abc import Iterable
from scorched.compat import str
from scorched.compat import basestring
from scorched.compat import python_2_unicode_compatible
PARSERS = ("edismax", "dismax")
def is_iter(val):
return not isinstance(val, basestring) and isinstance(val, Iterable)
class LuceneQuery(object):
default_term_re = re.compile(r'^\w+$')
def __init__(self, option_flag=None, original=None,
multiple_tags_allowed=False):
self.normalized = False
if original is None:
self.option_flag = option_flag
self.multiple_tags_allowed = multiple_tags_allowed
self.terms = collections.defaultdict(set)
self.phrases = collections.defaultdict(set)
self.ranges = set()
self.subqueries = []
self._and = True
self._or = self._not = self._pow = False
self.boosts = []
else:
self.option_flag = original.option_flag
self.multiple_tags_allowed = original.multiple_tags_allowed
self.terms = copy.copy(original.terms)
self.phrases = copy.copy(original.phrases)
self.ranges = copy.copy(original.ranges)
self.subqueries = copy.copy(original.subqueries)
self._or = original._or
self._and = original._and
self._not = original._not
self._pow = original._pow
self.boosts = copy.copy(original.boosts)
def clone(self):
return LuceneQuery(original=self)
def options(self):
opts = {}
s = self.__unicode_special__()
if s:
opts[self.option_flag] = s
return opts
# Below, we sort all our value_sets - this is for predictability when
# testing.
def serialize_term_queries(self, terms):
s = []
for name, value_set in list(terms.items()):
if name:
tmp = [u'%s:%s' % (name, self.to_query(value))
for value in value_set]
if name == '*':
tmp = [u'%s:%s' % (name, value)
for value in value_set]
s += tmp
else:
s += [self.to_query(value) for value in value_set]
return sorted(s)
def to_solr(self, value):
if isinstance(value, bool):
return u"true" if value else u"false"
if isinstance(value, datetime.datetime):
return str(scorched.dates.solr_date(value))
return str(value)
def to_query(self, value):
if isinstance(value, scorched.strings.DismaxString):
ret = value
elif isinstance(value, scorched.strings.WildcardString):
ret = value.escape_for_lqs_term()
else:
ret = scorched.strings.RawString(
self.to_solr(value)).escape_for_lqs_term()
return ret
range_query_templates = {
"any": u"[* TO *]",
"lt": u"{* TO %s}",
"lte": u"[* TO %s]",
"gt": u"{%s TO *}",
"gte": u"[%s TO *]",
"rangeexc": u"{%s TO %s}",
"range": u"[%s TO %s]",
}
def serialize_range_queries(self):
s = []
for name, rel, values in sorted(self.ranges):
range_s = self.range_query_templates[rel]
if values:
values = values[0]
if not is_iter(values):
values = [values]
values = sorted(values)
values = [self.to_query(v) for v in values]
range_s = self.range_query_templates[rel] % tuple(
values)
s.append(u"%s:%s" % (name, range_s))
return s
def child_needs_parens(self, child):
if len(child) == 1:
return False
elif self._or:
return not (child._or or child._pow)
elif (self._and or self._not):
return not (child._and or child._not or child._pow)
elif self._pow is not False:
return True
else:
return True
@staticmethod
def merge_term_dicts(*args):
d = collections.defaultdict(set)
for arg in args:
for k, v in list(arg.items()):
d[k].update(v)
return dict((k, v) for k, v in list(d.items()))
def normalize(self):
if self.normalized:
return self, False
mutated = False
_subqueries = []
_terms = self.terms
_phrases = self.phrases
_ranges = self.ranges
for s in self.subqueries:
_s, changed = s.normalize()
if not _s or changed:
mutated = True
if _s:
if (_s._and and self._and) or (_s._or and self._or):
mutated = True
_terms = self.merge_term_dicts(_terms, _s.terms)
_phrases = self.merge_term_dicts(_phrases, _s.phrases)
_ranges = _ranges.union(_s.ranges)
_subqueries.extend(_s.subqueries)
else:
_subqueries.append(_s)
if mutated:
newself = self.clone()
newself.terms = _terms
newself.phrases = _phrases
newself.ranges = _ranges
newself.subqueries = _subqueries
self = newself
if self._not:
if not len(self.subqueries):
newself = self.clone()
newself._not = False
newself._and = True
self = newself
mutated = True
elif len(self.subqueries) == 1:
if self.subqueries[0]._not:
newself = self.clone()
newself.subqueries = self.subqueries[0].subqueries
newself._not = False
newself._and = True
self = newself
mutated = True
else:
raise ValueError
elif self._pow:
if not len(self.subqueries):
newself = self.clone()
newself._pow = False
self = newself
mutated = True
elif self._and or self._or:
if not self.terms and not self.phrases and not self.ranges \
and not self.boosts:
if len(self.subqueries) == 1:
self = self.subqueries[0]
mutated = True
self.normalized = True
return self, mutated
@python_2_unicode_compatible
def __str__(self):
return self.__unicode_special__(force_serialize=True)
def __unicode_special__(self, level=0, op=None, force_serialize=False):
if not self.normalized:
self, _ = self.normalize()
if self.boosts:
# Clone and rewrite to effect the boosts.
newself = self.clone()
newself.boosts = []
boost_queries = [self.Q(**kwargs) ** boost_score
for kwargs, boost_score in self.boosts]
newself = newself | (newself & reduce(operator.or_, boost_queries))
newself, _ = newself.normalize()
return newself.__unicode_special__(level=level,
force_serialize=force_serialize)
else:
alliter = [self.serialize_term_queries(self.terms),
self.serialize_term_queries(self.phrases),
self.serialize_range_queries()]
u = []
for iterator in alliter:
u.extend(iterator)
for q in self.subqueries:
op_ = u'OR' if self._or else u'AND'
if self.child_needs_parens(q):
u.append(
u"(%s)" % q.__unicode_special__(
level=level + 1, op=op_))
else:
u.append(
u"%s" % q.__unicode_special__(level=level + 1, op=op_))
if self._and:
if (not force_serialize and
level == 0 and
self.multiple_tags_allowed):
return u
else:
return u' AND '.join(u)
elif self._or:
return u' OR '.join(u)
elif self._not:
assert len(u) == 1
if level == 0 or (level == 1 and op == "AND"):
return u'NOT %s' % u[0]
else:
return u'(*:* AND NOT %s)' % u[0]
elif self._pow is not False:
assert len(u) == 1
return u"%s^%s" % (u[0], self._pow)
else:
raise ValueError
def __len__(self):
# How many terms in this (sub) query?
if len(self.subqueries) == 1:
subquery_length = len(self.subqueries[0])
else:
subquery_length = len(self.subqueries)
return sum([sum(len(v) for v in list(self.terms.values())),
sum(len(v) for v in list(self.phrases.values())),
len(self.ranges),
subquery_length])
def Q(self, *args, **kwargs):
q = LuceneQuery()
q.add(args, kwargs)
return q
def __bool__(self):
return bool(self.terms) or bool(self.phrases) or bool(self.ranges) or \
bool(self.subqueries)
def __or__(self, other):
q = LuceneQuery()
q._and = False
q._or = True
q.subqueries = [self, other]
return q
def __and__(self, other):
q = LuceneQuery()
q.subqueries = [self, other]
return q
def __invert__(self):
q = LuceneQuery()
q._and = False
q._not = True
q.subqueries = [self]
return q
def __pow__(self, value):
try:
float(value)
except ValueError:
raise ValueError("Non-numeric value supplied for boost")
q = LuceneQuery()
q.subqueries = [self]
q._and = False
q._pow = value
return q
def add(self, args, kwargs):
self.normalized = False
_args = []
for arg in args:
if isinstance(arg, LuceneQuery):
self.subqueries.append(arg)
else:
_args.append(arg)
args = _args
try:
terms_or_phrases = kwargs.pop("__terms_or_phrases")
except KeyError:
terms_or_phrases = None
for value in args:
self.add_exact(None, value, terms_or_phrases)
for k, v in list(kwargs.items()):
try:
field_name, rel = k.split("__")
except ValueError:
field_name, rel = k, 'eq'
if not field_name:
if (k, v) != ("*", "*"):
# the only case where wildcards in field names are allowed
raise ValueError("%s is not a valid field name" % k)
if rel == 'eq':
self.add_exact(field_name, v, terms_or_phrases)
else:
self.add_range(field_name, rel, v)
def add_exact(self, field_name, values, term_or_phrase):
# We let people pass in a list of values to match.
# This really only makes sense for text fields or
# multivalued fields.
if not is_iter(values):
values = [values]
# We can only do a field_name == "*" if:
if not field_name or field_name == "*":
if len(values) == 1 and values[0] == "*":
self.terms["*"].add("*")
return
insts = values
for inst in insts:
this_term_or_phrase = term_or_phrase or self.term_or_phrase(inst)
if isinstance(inst, numbers.Number):
this_term_or_phrase = 'terms'
getattr(self, this_term_or_phrase)[field_name].add(inst)
def add_range(self, field_name, rel, value):
if rel not in self.range_query_templates:
raise scorched.exc.SolrError("No such relation '%s' defined" % rel)
insts = (value,)
if rel in ('range', 'rangeexc'):
try:
assert len(value) == 2
except (AssertionError, TypeError):
raise scorched.exc.SolrError(
"'%s__%s' argument must be a length-2 iterable" % (
field_name, rel))
elif rel == 'any':
if value is not True:
raise scorched.exc.SolrError("'%s__%s' argument must be True")
insts = ()
self.ranges.add((field_name, rel, insts))
def term_or_phrase(self, arg, force=None):
return 'terms' if self.default_term_re.match(str(arg)) else 'phrases'
def add_boost(self, kwargs, boost_score):
self.boosts.append((kwargs, boost_score))
class BaseSearch(object):
"""Base class for common search options management"""
option_modules = ('query_obj', 'filter_obj', 'paginator',
'more_like_this', 'highlighter', 'postings_highlighter',
'faceter', 'grouper', 'sorter', 'facet_querier',
'debugger', 'spellchecker', 'requesthandler',
'field_limiter', 'parser', 'pivoter', 'facet_ranger',
'term_vectors', 'stat')
def _init_common_modules(self):
self.query_obj = LuceneQuery(u'q')
self.filter_obj = LuceneQuery(u'fq',
multiple_tags_allowed=True)
self.paginator = PaginateOptions()
self.highlighter = HighlightOptions()
self.postings_highlighter = PostingsHighlightOptions()
self.faceter = FacetOptions()
self.pivoter = FacetPivotOptions()
self.grouper = GroupOptions()
self.sorter = SortOptions()
self.debugger = DebugOptions()
self.spellchecker = SpellcheckOptions()
self.requesthandler = RequestHandlerOption()
self.field_limiter = FieldLimitOptions()
self.facet_ranger = FacetRangeOptions()
self.facet_querier = FacetQueryOptions()
self.term_vectors = TermVectorOptions()
self.stat = StatOptions()
def clone(self):
return self.__class__(interface=self.interface, original=self)
def Q(self, *args, **kwargs):
q = LuceneQuery()
q.add(args, kwargs)
return q
def query(self, *args, **kwargs):
newself = self.clone()
newself.query_obj.add(args, kwargs)
return newself
def query_by_term(self, *args, **kwargs):
return self.query(__terms_or_phrases="terms", *args, **kwargs)
def query_by_phrase(self, *args, **kwargs):
return self.query(__terms_or_phrases="phrases", *args, **kwargs)
def boost_relevancy(self, boost_score, **kwargs):
if not self.query_obj:
raise TypeError("Can't boost the relevancy of an empty query")
try:
float(boost_score)
except ValueError:
raise ValueError("Non-numeric boost value supplied")
newself = self.clone()
newself.query_obj.add_boost(kwargs, boost_score)
return newself
def filter(self, *args, **kwargs):
newself = self.clone()
newself.filter_obj.add(args, kwargs)
return newself
def filter_by_term(self, *args, **kwargs):
return self.filter(__terms_or_phrases="terms", *args, **kwargs)
def filter_by_phrase(self, *args, **kwargs):
return self.filter(__terms_or_phrases="phrases", *args, **kwargs)
def facet_by(self, fields, **kwargs):
newself = self.clone()
newself.faceter.update(fields, **kwargs)
return newself
def facet_range(self, fields, **kwargs):
newself = self.clone()
newself.facet_ranger.update(fields, **kwargs)
return newself
def pivot_by(self, fields, **kwargs):
newself = self.clone()
newself.pivoter.update(fields, **kwargs)
return newself
def group_by(self, field, **kwargs):
newself = self.clone()
kwargs['field'] = field
if 'ngroups' not in kwargs:
kwargs['ngroups'] = True
newself.grouper.update(None, **kwargs)
return newself
def facet_query(self, *args, **kwargs):
newself = self.clone()
newself.facet_querier.update(self.Q(*args, **kwargs))
return newself
def highlight(self, fields=None, **kwargs):
newself = self.clone()
newself.highlighter.update(fields, **kwargs)
return newself
def postings_highlight(self, fields=None, **kwargs):
newself = self.clone()
newself.postings_highlighter.update(fields, **kwargs)
return newself
def mlt(self, fields, query_fields=None, **kwargs):
newself = self.clone()
newself.more_like_this.update(fields, query_fields, **kwargs)
return newself
def term_vector(self, fields=None, **kwargs):
newself = self.clone()
newself.term_vectors.update(fields, **kwargs)
return newself
def alt_parser(self, parser, **kwargs):
if parser not in PARSERS:
raise scorched.exc.SolrError(
"Parser (%s) is not supported choose between (%s)" % (
parser, PARSERS))
newself = self.clone()
if parser == 'dismax':
newself.parser = DismaxOptions()
elif parser == 'edismax':
newself.parser = EdismaxOptions()
newself.parser.update(**kwargs)
return newself
def paginate(self, start=None, rows=None):
newself = self.clone()
newself.paginator.update(start, rows)
return newself
def debug(self):
newself = self.clone()
newself.debugger.update(True)
return newself
def spellcheck(self):
newself = self.clone()
newself.spellchecker.update(True)
return newself
def set_requesthandler(self, handler):
newself = self.clone()
newself.requesthandler.update(handler)
return newself
def sort_by(self, field):
newself = self.clone()
newself.sorter.update(field)
return newself
def field_limit(self, fields=None, score=False, all_fields=False):
newself = self.clone()
newself.field_limiter.update(fields, score, all_fields)
return newself
def options(self):
options = {}
for option_module in self.option_modules:
if hasattr(self, option_module):
_attr = getattr(self, option_module)
options.update(_attr.options())
return options
def results_as(self, constructor):
newself = self.clone()
return newself
def stats(self, fields, **kwargs):
newself = self.clone()
newself.stat.update(fields, **kwargs)
return newself
def params(self):
return params_from_dict(**self.options())
def constructor(self, result, constructor):
construct_docs = lambda docs: [constructor(**d) for d in docs]
result.result.docs = construct_docs(result.result.docs)
for key in result.more_like_these:
result.more_like_these[key].docs = construct_docs(
result.more_like_these[key].docs)
return result
_count = None
def count(self):
if self._count is None:
# We haven't gotten the count yet. Get it. Clone self for this
# query or else we'll set rows=0 for remainder.
newself = self.clone()
r = newself.paginate(None, 0).execute()
if r.groups:
total = getattr(r.groups, r.group_field)['ngroups']
else:
total = r.result.numFound
# Set the cache
self._count = total
return self._count
def __getitem__(self, key):
if isinstance(key, int):
start, rows = key, 1
elif isinstance(key, slice):
start, rows = key.start, key.stop-key.start
else:
raise TypeError('Subscript must be int or slice')
return self.paginate(start, rows).execute()
class SolrSearch(BaseSearch):
def __init__(self, interface, original=None):
self.interface = interface
if original is None:
self.more_like_this = MoreLikeThisOptions()
self._init_common_modules()
else:
for opt in self.option_modules:
if hasattr(original, opt):
_attr = getattr(original, opt)
setattr(self, opt, _attr.clone())
def options(self):
options = super(SolrSearch, self).options()
if 'q' not in options:
options['q'] = '*:*' # search everything
return options
def execute(self, constructor=None):
ret = self.interface.search(**self.options())
if constructor:
ret = self.constructor(ret, constructor)
return ret
def cursor(self, constructor=None, rows=None):
if self.paginator.start is not None:
raise ValueError(
"cannot use the start parameter and cursors at the same time")
search = self
if rows:
search = search.paginate(rows=rows)
return SolrCursor(search, constructor)
class SolrCursor:
def __init__(self, search, constructor):
self.search = search
self.constructor = constructor
def __iter__(self):
cursor_mark = "*"
while True:
options = self.search.options()
options['cursorMark'] = cursor_mark
ret = self.search.interface.search(**options)
if self.constructor:
ret = self.search.constructor(ret, self.constructor)
for item in ret:
yield item
if ret.next_cursor_mark == cursor_mark:
break
cursor_mark = ret.next_cursor_mark
class MltSolrSearch(BaseSearch):
"""Manage parameters to build a MoreLikeThisHandler query"""
trivial_encodings = [
"utf_8", "u8", "utf", "utf8", "ascii", "646", "us_ascii"]
def __init__(self, interface, content=None, content_charset=None, url=None,
original=None):
self.interface = interface
if original is None:
if content is not None and url is not None:
raise ValueError(
"Cannot specify both content and url")
if content is not None:
if content_charset is None:
content_charset = 'utf-8'
if isinstance(content, str):
content = content.encode('utf-8')
elif content_charset.lower(
).replace('-', '_') not in self.trivial_encodings:
content = content.decode(content_charset).encode('utf-8')
self.content = content
self.url = url
self.more_like_this = MoreLikeThisHandlerOptions()
self._init_common_modules()
else:
self.content = original.content
self.url = original.url
for opt in self.option_modules:
if hasattr(original, opt):
_attr = getattr(original, opt)
setattr(self, opt, _attr.clone())
def query(self, *args, **kwargs):
if self.content is not None or self.url is not None:
raise ValueError(
"Cannot specify query as well as content on an MltSolrSearch")
return super(MltSolrSearch, self).query(*args, **kwargs)
def query_by_term(self, *args, **kwargs):
if self.content is not None or self.url is not None:
raise ValueError(
"Cannot specify query as well as content on an MltSolrSearch")
return super(MltSolrSearch, self).query_by_term(*args, **kwargs)
def query_by_phrase(self, *args, **kwargs):
if self.content is not None or self.url is not None:
raise ValueError(
"Cannot specify query as well as content on an MltSolrSearch")
return super(MltSolrSearch, self).query_by_phrase(*args, **kwargs)
def Q(self, *args, **kwargs):
if self.content is not None or self.url is not None:
raise ValueError(
"Cannot specify query as well as content on an MltSolrSearch")
return super(MltSolrSearch, self).Q(*args, **kwargs)
def boost_relevancy(self, *args, **kwargs):
if self.content is not None or self.url is not None:
raise ValueError(
"Cannot specify query as well as content on an MltSolrSearch")
return super(MltSolrSearch, self).boost_relevancy(*args, **kwargs)
def options(self):
options = super(MltSolrSearch, self).options()
if self.url is not None:
options['stream.url'] = self.url
if 'q' not in options:
options['q'] = '*:*' # search everything
return options
def execute(self, constructor=None):
ret = self.interface.mlt_search(content=self.content, **self.options())
if constructor:
ret = self.constructor(ret, constructor)
return ret
class Options(object):
def clone(self):
return self.__class__(self)
def invalid_value(self, msg=""):
assert False, msg
def update(self, fields=None, **kwargs):
if fields:
if not is_iter(fields):
fields = [fields]
for field in set(fields) - set(self.fields):
self.fields[field] = {}
elif kwargs:
fields = [None]
checked_kwargs = self.check_opts(kwargs)
for k, v in list(checked_kwargs.items()):
for field in fields:
self.fields[field][k] = v
def check_opts(self, kwargs):
checked_kwargs = {}
for k, v in list(kwargs.items()):
if k not in self.opts:
raise scorched.exc.SolrError(
"No such option for %s: %s" % (self.option_name, k))
opt_type = self.opts[k]
try:
if isinstance(opt_type, (list, tuple)):
assert v in opt_type
elif isinstance(opt_type, type):
v = opt_type(v)
else:
v = opt_type(self, v)
except:
raise scorched.exc.SolrError(
"Invalid value for %s option %s: %s" % (self.option_name,
k, v))
checked_kwargs[k] = v
return checked_kwargs
def options(self):
opts = {}
if self.fields:
opts[self.option_name] = True
fields = [field for field in self.fields if field]
self.field_names_in_opts(opts, fields)
for field_name, field_opts in list(self.fields.items()):
if not field_name:
for field_opt, v in list(field_opts.items()):
opts['%s.%s' % (self.option_name, field_opt)] = v
else:
for field_opt, v in list(field_opts.items()):
opts['f.%s.%s.%s' %
(field_name, self.option_name, field_opt)] = v
return opts
class FacetOptions(Options):
option_name = "facet"
opts = {
"prefix": str,
"sort": [True, False, "count", "index"],
"limit": int,
"offset":
lambda self, x: int(x) >= 0 and int(x) or self.invalid_value(),
"mincount":
lambda self, x: int(x) >= 0 and int(x) or self.invalid_value(),
"missing": bool,
"method": ["enum", "fc"],
"enum.cache.minDf": int,
}
def __init__(self, original=None):
if original is None:
self.fields = collections.defaultdict(dict)
else:
self.fields = copy.copy(original.fields)
def field_names_in_opts(self, opts, fields):
if fields:
opts["facet.field"] = sorted(fields)
class FacetRangeOptions(Options):
option_name = "facet.range"
opts = {
"start": str,
"end": str,
"gap": str,
"hardend": bool,
"limit": int,
"mincount": int,
"include": ["lower", "upper", "edge", "outer", "all"],
"other": ["before", "after", "between", "none", "all"],
}
def __init__(self, original=None):
if original is None:
self.fields = collections.defaultdict(dict)
else:
self.fields = copy.copy(original.fields)
def field_names_in_opts(self, opts, fields):
opts['facet'] = True
opts[self.option_name] = list(self.fields.keys())
def options(self):
'''
Override options so we can move limit & mincount from facet.range to
facet.
'''
opts = super(FacetRangeOptions, self).options()
for field in self.fields.keys():
for key in ('limit', 'mincount'):
oldkey = 'f.%s.facet.range.%s' % (field, key)
newkey = 'f.%s.facet.%s' % (field, key)
if oldkey in opts:
opts[newkey] = opts[oldkey]
del opts[oldkey]
return opts
class FacetPivotOptions(Options):
option_name = "facet.pivot"
opts = {
"mincount":
lambda self, x: int(x) >= 0 and int(x) or self.invalid_value(),
}
def __init__(self, original=None):
if original is None:
self.fields = collections.defaultdict(dict)
else:
self.fields = copy.copy(original.fields)
def field_names_in_opts(self, opts, fields):
opts["facet"] = True
if fields:
field_opts = {}
for field in fields:
field_opts.update(self.fields[field])
del(self.fields[field])
self.fields[None] = field_opts
opts["facet.pivot"] = ','.join(sorted(fields))
class GroupOptions(Options):
option_name = "group"
opts = {
"field": str,
"limit": int,
"main": bool,
"ngroups": bool
}
def __init__(self, original=None):
if original is None:
self.fields = collections.defaultdict(dict)
else:
self.fields = copy.copy(original.fields)
def field_names_in_opts(self, opts, fields):
if fields:
opts["facet.field"] = sorted(fields)
class DismaxOptions(Options):
_name = "dismax"
option_name = "defType"
opts = {
"f": dict,
"qf": dict,
"mm": int,
"pf": dict,
"ps": int,
"qs": int,
"tie": float,
"bq": str,
"bf": str,
}
def __init__(self, original=None):
if original is None:
self.kwargs = {}
else:
self.kwargs = original.kwargs.copy()
def update(self, **kwargs):
checked_kwargs = self.check_opts(kwargs)
for f in ('qf', 'pf'):
field = kwargs.get(f, {})
for k, v in list(field.items()):
if v is not None:
try:
v = float(v)
except ValueError:
raise scorched.exc.SolrError(
"'%s' has non-numerical boost value" % k)
self.kwargs.update(checked_kwargs)
def options(self):
opts = {}
opts[self.option_name] = self._name
for opt_name, opt_value in list(self.kwargs.items()):
opt_type = self.opts[opt_name]
opts[opt_name] = opt_type(opt_value)
if opt_name in ("qf", "pf"):
qf_arg = []
items = sorted(list(opt_value.items()), reverse=True)
for k, v in items:
if v is None:
qf_arg.append(k)
else:
qf_arg.append("%s^%s" % (k, float(v)))
opts[opt_name] = " ".join(qf_arg)
return opts
class EdismaxOptions(DismaxOptions):
_name = "edismax"
def options(self):
opts = super(EdismaxOptions, self).options()
if 'f' in opts:
f = opts.pop('f')
for field, aliases in f.items():
opts['f.%s.qf' % field] = ' '.join(aliases)
return opts
class HighlightOptions(Options):
option_name = "hl"
opts = {"snippets": int,
"fragsize": int,
"mergeContinuous": bool,
"requireFieldMatch": bool,
"maxAnalyzedChars": int,
"alternateField": str,
"maxAlternateFieldLength": int,
"formatter": ["simple"],
"simple.pre": str,
"simple.post": str,
"fragmenter": str,
"useFastVectorHighlighter": bool, # available as of Solr 3.1
"usePhraseHighlighter": bool,
"highlightMultiTerm": bool,
"regex.slop": float,
"regex.pattern": str,
"regex.maxAnalyzedChars": int,
"boundaryScanner": str,
"bs.maxScan": str,
"bs.chars": str,
"bs.type": str,
"bs.language": str,
"bs.country": str,
}
def __init__(self, original=None):
if original is None:
self.fields = collections.defaultdict(dict)
else:
self.fields = copy.copy(original.fields)
def field_names_in_opts(self, opts, fields):
if fields:
opts["hl.fl"] = ",".join(sorted(fields))
class PostingsHighlightOptions(Options):
option_name = "hl"
opts = {"snippets": int,
"tag.pre": str,
"tag.post": str,
"tag.ellipsis": str,
"defaultSummary": bool,
"encoder": str,
"score.k1": float,
"score.b": float,
"score.pivot": float,
"bs.type": str,
"bs.language": str,
"bs.country": str,
"bs.variant": str,
"maxAnalyzedChars": str,
"multiValuedSeperatorChar": str
}
def __init__(self, original=None):
if original is None:
self.fields = collections.defaultdict(dict)
else:
self.fields = copy.copy(original.fields)
def field_names_in_opts(self, opts, fields):
if fields:
opts["hl.fl"] = ",".join(sorted(fields))
class MoreLikeThisOptions(Options):
option_name = "mlt"
opts = {"count": int,
"mintf": int,
"mindf": int,
"maxdf": int,
"minwl": int,
"maxwl": int,
"maxqt": int,
"maxntp": int,
"boost": bool,
}
def __init__(self, original=None):
if original is None:
self.fields = set()
self.query_fields = {}
self.kwargs = {}
else:
self.fields = copy.copy(original.fields)
self.query_fields = copy.copy(original.query_fields)
self.kwargs = copy.copy(original.kwargs)
def update(self, fields, query_fields=None, **kwargs):
if fields is None:
return
if not is_iter(fields):
fields = [fields]
self.fields.update(fields)
if query_fields is not None:
for k, v in list(query_fields.items()):
if k not in self.fields:
raise scorched.exc.SolrError(
"'%s' specified in query_fields but not fields" % k)
if v is not None:
try:
v = float(v)
except ValueError:
raise scorched.exc.SolrError(
"'%s' has non-numerical boost value" % k)
self.query_fields.update(query_fields)
checked_kwargs = self.check_opts(kwargs)
self.kwargs.update(checked_kwargs)
def options(self):
opts = {}
if self.fields:
opts['mlt'] = True
opts['mlt.fl'] = ','.join(sorted(self.fields))
if self.query_fields:
qf_arg = []
items = sorted(list(self.query_fields.items()), reverse=True)
for k, v in items:
if v is None:
qf_arg.append(k)
else:
qf_arg.append("%s^%s" % (k, float(v)))
opts["mlt.qf"] = " ".join(qf_arg)
for opt_name, opt_value in list(self.kwargs.items()):
opt_type = self.opts[opt_name]
opts["mlt.%s" % opt_name] = opt_type(opt_value)
return opts
class MoreLikeThisHandlerOptions(MoreLikeThisOptions):
opts = {
'match.include': bool,
'match.offset': int,
'interestingTerms': ["list", "details", "none"],
}
opts.update(MoreLikeThisOptions.opts)
del opts['count']
def options(self):
opts = {}
if self.fields:
opts['mlt.fl'] = ','.join(sorted(self.fields))
if self.query_fields:
qf_arg = []
for k, v in list(self.query_fields.items()):
if v is None:
qf_arg.append(k)
else:
qf_arg.append("%s^%s" % (k, float(v)))
opts["mlt.qf"] = " ".join(qf_arg)
for opt_name, opt_value in list(self.kwargs.items()):
opts["mlt.%s" % opt_name] = opt_value
return opts
class TermVectorOptions(Options):
option_name = "tv"
opts = {
"all": bool,
"df": bool,
"offsets": bool,
"positions": bool,
"payloads": bool,
"tf": bool,
"tf_idf": bool,
}
def __init__(self, original=None):
if original is None:
self.fields = collections.defaultdict(dict)
self.enabled = False
else:
self.fields = copy.copy(original.fields)
self.enabled = original.enabled
def field_names_in_opts(self, opts, fields):
if fields:
opts["tv.fl"] = ",".join(sorted(fields))
def update(self, fields=None, **kwargs):
super(TermVectorOptions, self).update(fields, **kwargs)
self.enabled = True
def options(self):
opts = super(TermVectorOptions, self).options()
if self.enabled and not opts:
opts = {"tv": True}
return opts
class PaginateOptions(Options):
def __init__(self, original=None):
if original is None:
self.start = None
self.rows = None
else:
self.start = original.start
self.rows = original.rows
def update(self, start, rows):
if start is not None:
if start < 0:
raise scorched.exc.SolrError(
"paginator start index must be 0 or greater")
self.start = start
if rows is not None:
if rows < 0:
raise scorched.exc.SolrError(
"paginator rows must be 0 or greater")
self.rows = rows
def options(self):
opts = {}
if self.start is not None:
opts['start'] = self.start
if self.rows is not None:
opts['rows'] = self.rows
return opts
class SortOptions(Options):
option_name = "sort"
def __init__(self, original=None):
if original is None:
self.fields = []
else:
self.fields = copy.copy(original.fields)
def update(self, field):
# We're not allowing function queries a la Solr1.5
if field.startswith('-'):
order = "desc"
field = field[1:]
elif field.startswith('+'):
order = "asc"
field = field[1:]
else:
order = "asc"
self.fields.append([order, field])
def options(self):
if self.fields:
return {"sort": ", ".join(
"%s %s" % (field, order) for order, field in self.fields)}
else:
return {}
class DebugOptions(Options):
# XXX should be changed to 'debug' added in 4.0
# https://wiki.apache.org/solr/CommonQueryParameters#Debugging
option_name = "debugQuery"
def __init__(self, original=None):
if original is None:
self.debug = False
else:
self.debug = original.debug
def update(self, debug):
self.debug = debug
def options(self):
if self.debug:
return {"debugQuery": True}
else:
return {}
class SpellcheckOptions(Options):
option_name = "spellcheck"
def __init__(self, original=None):
if original is None:
self.spellcheck = False
else:
self.spellcheck = original.spellcheck
def update(self, spellcheck):
self.spellcheck = spellcheck
def options(self):
if self.spellcheck:
return {"spellcheck": True}
else:
return {}
class RequestHandlerOption(Options):
option_name = "qt"
def __init__(self, original=None):
if original is None:
# XXX 'standard' is deprecated
# https://wiki.apache.org/solr/SolrRequestHandler#Old_handleSelect.3Dtrue_Resolution_.28qt_param.29
self.handler = None
else:
self.handler = original.handler
def update(self, handler):
self.handler = handler
def options(self):
ret = {}
if self.handler:
ret = {"qt": self.handler}
return ret
class FieldLimitOptions(Options):
option_name = "fl"
def __init__(self, original=None):
if original is None:
self.fields = set()
self.score = False
self.all_fields = False
else:
self.fields = copy.copy(original.fields)
self.score = original.score
self.all_fields = original.all_fields
def update(self, fields=None, score=False, all_fields=False):
if fields is None:
fields = []
if not is_iter(fields):
fields = [fields]
self.fields.update(fields)
self.score = score
self.all_fields = all_fields
def options(self):
opts = {}
if self.all_fields:
fields = set("*")
else:
fields = self.fields
if self.score:
fields.add("score")
if fields:
opts['fl'] = ','.join(sorted(fields))
return opts
class FacetQueryOptions(Options):
def __init__(self, original=None):
if original is None:
self.queries = []
else:
self.queries = [q.clone() for q in original.queries]
def update(self, query):
self.queries.append(query)
def options(self):
if self.queries:
return {'facet.query': [str(q) for q in self.queries],
'facet': True}
else:
return {}
class StatOptions(Options):
option_name = "stats"
opts = {
"stats.facet": str,
}
# NOTE: Solr documentation indicates stats.facet is a legacy parameter,
# recommends using stats.field with facet.pivot instead
def __init__(self, original=None):
if original is None:
self.stats = False
self.facet = None
self.fields = collections.defaultdict(dict)
else:
self.stats = original.stats
self.fields = copy.copy(original.fields)
self.facet = original.facet
def update(self, fields=None, **kwargs):
if 'facet' in kwargs:
self.facet = kwargs['facet']
del kwargs['facet']
super(StatOptions, self).update(fields, **kwargs)
self.stats = True
def field_names_in_opts(self, opts, fields):
if fields:
opts["stats.field"] = sorted(fields)
def options(self):
opts = super(StatOptions, self).options()
# stats = True set based on option_name
if self.facet:
opts['stats.facet'] = self.facet
return opts
def params_from_dict(**kwargs):
utf8_params = []
for k, vs in list(kwargs.items()):
if isinstance(k, bytes):
k = k.decode('utf-8')
# We allow for multivalued options with lists.
if not is_iter(vs):
vs = [vs]
for v in vs:
if isinstance(v, bool):
v = b"true" if v else b"false"
if isinstance(v, str):
v = v.encode('utf-8')
if isinstance(v, numbers.Number):
v = str(v).encode('utf-8')
utf8_params.append((k, v))
return sorted(utf8_params)
|
|
from validator import Validator
from collections import defaultdict
import re
class EtherValidator(Validator):
def __init__(self, rule):
self.corpus = rule[0]
self.doc = rule[1]
self.domain = rule[2]
self.name = rule[3]
self.operator = rule[4]
self.argument = rule[5]
def _apply_exists(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) == 0:
report += "Column named '" + self.name + "' not found<br/>"
return report, tooltip, cells
def _apply_doesntexist(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) > 0:
report += "Columns named '" + self.name + "' are not allowed<br/>"
cells += [letter + "1" for letter in col_letters]
return report, tooltip, cells
def _apply_span_equals_number(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) == 0:
report += "Column named " + self.name + " not found<br/>"
return report, tooltip, cells
for letter in col_letters:
for cell in parsed_ether[letter]:
if cell.row == "1":
continue
if self.argument == "1":
if cell.span != "1":
report += "Cell " + cell.col + cell.row + ": span is not 1<br/>"
cells.append(cell.col + cell.row)
else:
if cell.span != "" and cell.span != self.argument:
report += "Cell " + cell.col + cell.row + ": span is not " + self.argument + "<br/>"
cells.append(cell.col + cell.row)
return report, tooltip, cells
def _apply_regex(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
for letter in col_letters:
for cell in parsed_ether[letter]:
if cell.row == "1":
continue
match = re.search(self.argument, cell.content)
if match is None:
report += ("Cell " + cell.col + cell.row
+ ": content does not match pattern " + self.argument + "<br/>")
tooltip += ("Cell " + cell.col + cell.row + ":<br/>"
+ "Content: " + cell.content + "<br/>"
+ "Pattern: " + self.argument + "<br/>")
cells.append(cell.col + cell.row)
return report, tooltip, cells
def _binary_op_check_cols_exist(self, colmap):
name_letters = colmap[self.name]
arg_letters = colmap[self.argument]
if len(name_letters) == 0:
if self.operator != "==":
return "Column named " + self.name + " not found<br/>"
if len(arg_letters) == 0:
if self.operator != "==":
return "Column named " + self.argument + " not found<br/>"
return ""
def _binary_op_setup(self, parsed_ether):
colmap = parsed_ether['__colmap__'] # name -> list of col letters
name_letters = colmap[self.name]
arg_letters = colmap[self.argument]
name_tuples = defaultdict(list)
arg_tuples = defaultdict(list)
start_rows = defaultdict(list)
all_rows = []
for letter in name_letters:
for cell in parsed_ether[letter]:
start_rows[letter].append(cell.row)
# "de-merge" cell so we have an entry for every row in its span with its letter and content
for i in range(int(cell.span) or 1):
row = str(int(cell.row) + i)
name_tuples[row].append((letter, cell.content))
all_rows.append(row)
# same as above with arg_letters
for letter in arg_letters:
for cell in parsed_ether[letter]:
start_rows[letter].append(cell.row)
for i in range(int(cell.span) or 1):
row = str(int(cell.row) + i)
arg_tuples[row].append((letter, cell.content))
if row not in all_rows:
all_rows.append(row)
name_start_cells = []
name_start_rows = set() # for O(1) lookup
for letter in name_letters:
name_start_cells += [(letter, row) for row in start_rows[letter]]
name_start_rows = name_start_rows.union(set(row for row in start_rows[letter]))
arg_start_cells = []
arg_start_rows = set()
for letter in arg_letters:
arg_start_cells += [(letter, row) for row in start_rows[letter]]
arg_start_rows = arg_start_rows.union(set(row for row in start_rows[letter]))
return name_letters, arg_letters, name_tuples, arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, arg_start_cells, arg_start_rows
def _apply_subspan(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
# check to see if all cells in rhs are contained within cells on lhs
if row in arg_tuples and row not in name_tuples:
for letter, _ in arg_tuples[row]:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " must appear in the span of a cell in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_equal_span_length(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
if row == "1":
continue
name_len = len(name_tuples[row])
arg_len = len(arg_tuples[row])
if name_len > arg_len:
for letter, _ in name_tuples[row][arg_len:]:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
elif arg_len > name_len:
for letter, _ in arg_tuples[row][name_len:]:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
for letter, row in name_start_cells:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
for letter, row in arg_start_cells:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_equal_span_length_and_content(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
if row == "1":
continue
name_len = len(name_tuples[row])
arg_len = len(arg_tuples[row])
if name_len > arg_len:
for letter, _ in name_tuples[row][arg_len:]:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
elif arg_len > name_len:
for letter, _ in arg_tuples[row][name_len:]:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
for i in range(min(len(name_tuples[row]), len(arg_tuples[row]))):
name_letter, name_content = name_tuples[row][i]
arg_letter, arg_content = arg_tuples[row][i]
if arg_content != name_content and (row in start_rows[arg_letter] or row in start_rows[name_letter]):
cells.append(name_letter + row)
cells.append(arg_letter + row)
report += ("Cells " + name_letter + row
+ " and " + arg_letter + row
+ " must have equivalent content.<br/>")
for letter, row in name_start_cells:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
for letter, row in arg_start_cells:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_rule(self, parsed_ether):
if self.name is None:
return "", "", []
if self.operator == "exists":
return self._apply_exists(parsed_ether)
if self.operator == "doesntexist":
return self._apply_doesntexist(parsed_ether)
elif self.operator == "|":
return self._apply_span_equals_number(parsed_ether)
elif self.operator == "~":
return self._apply_regex(parsed_ether)
elif self.operator == ">":
return self._apply_subspan(parsed_ether)
elif self.operator == "=":
return self._apply_equal_span_length(parsed_ether)
elif self.operator == "==":
return self._apply_equal_span_length_and_content(parsed_ether)
else:
raise Exception("Unknown EtherCalc validation operator: '" + str(self.operator) + "'")
def applies(self, doc_name, doc_corpus):
if self.corpus is not None and re.search(self.corpus, doc_corpus) is None:
return False
if self.doc is not None and re.search(self.doc, doc_name) is None:
return False
return True
def validate(self, parsed_ether):
report, tooltip, cells = self._apply_rule(parsed_ether)
return {"report": report,
"tooltip": tooltip,
"cells": cells}
|
|
"""
Copyright: Jev Kuznetsov
Licence: BSD
Interface to interactive brokers together with gui widgets
"""
import sys
# import os
from time import sleep
from PyQt4.QtCore import (SIGNAL, SLOT)
from PyQt4.QtGui import (QApplication, QFileDialog, QDialog, QVBoxLayout, QHBoxLayout, QDialogButtonBox,
QTableView, QPushButton, QWidget, QLabel, QLineEdit, QGridLayout, QHeaderView)
import ib
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from ib.ext.Order import Order
import logger as logger
from qtpandas import DataFrameModel, TableView
from eventSystem import Sender
import numpy as np
import pandas
from pandas import DataFrame, Index
from datetime import datetime
import os
import datetime as dt
import time
priceTicks = {1: 'bid', 2: 'ask', 4: 'last', 6: 'high', 7: 'low', 9: 'close', 14: 'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol, secType='STK', exchange='SMART', currency='USD'):
""" contract factory function """
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = secType
contract.m_exchange = exchange
contract.m_currency = currency
return contract
def _str2datetime(s):
""" convert string to datetime """
return datetime.strptime(s, '%Y%m%d')
def readActivityFlex(fName):
"""
parse trade log in a csv file produced by IB 'Activity Flex Query'
the file should contain these columns:
['Symbol','TradeDate','Quantity','TradePrice','IBCommission']
Returns:
A DataFrame with parsed trade data
"""
import csv
rows = []
with open(fName, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
header = ['TradeDate', 'Symbol', 'Quantity', 'TradePrice', 'IBCommission']
types = dict(zip(header, [_str2datetime, str, int, float, float]))
idx = dict(zip(header, [rows[0].index(h) for h in header]))
data = dict(zip(header, [[] for h in header]))
for row in rows[1:]:
print row
for col in header:
val = types[col](row[idx[col]])
data[col].append(val)
return DataFrame(data)[header].sort(column='TradeDate')
class Subscriptions(DataFrameModel, Sender):
""" a data table containing price & subscription data """
def __init__(self, tws=None):
super(Subscriptions, self).__init__()
self.df = DataFrame() # this property holds the data in a table format
self._nextId = 1
self._id2symbol = {} # id-> symbol lookup dict
self._header = ['id', 'position', 'bid', 'ask', 'last'] # columns of the _data table
# register callbacks
if tws is not None:
tws.register(self.priceHandler, message.TickPrice)
tws.register(self.accountHandler, message.UpdatePortfolio)
def add(self, symbol, subId=None):
"""
Add a subscription to data table
return : subscription id
"""
if subId is None:
subId = self._nextId
data = dict(zip(self._header, [subId, 0, np.nan, np.nan, np.nan]))
row = DataFrame(data, index=Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
self._nextId = subId + 1
self._rebuildIndex()
self.emit(SIGNAL("layoutChanged()"))
return subId
def priceHandler(self, msg):
""" handler function for price updates. register this with ibConnection class """
if priceTicks[msg.field] not in self._header: # do nothing for ticks that are not in _data table
return
self.df[priceTicks[msg.field]][self._id2symbol[msg.tickerId]] = msg.price
#notify viewer
col = self._header.index(priceTicks[msg.field])
row = self.df.index.tolist().index(self._id2symbol[msg.tickerId])
idx = self.createIndex(row, col)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), idx, idx)
def accountHandler(self, msg):
if msg.contract.m_symbol in self.df.index.tolist():
self.df['position'][msg.contract.m_symbol] = msg.position
def _rebuildIndex(self):
""" udate lookup dictionary id-> symbol """
symbols = self.df.index.tolist()
ids = self.df['id'].values.tolist()
self._id2symbol = dict(zip(ids, symbols))
def __repr__(self):
return str(self.df)
class Broker(object):
"""
Broker class acts as a wrapper around ibConnection
from ibPy. It tracks current subscriptions and provides
data models to viewiers .
"""
def __init__(self, name='broker'):
""" initialize broker class
"""
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self.tws = ibConnection() # tws interface
self.nextValidOrderId = None
self.dataModel = Subscriptions(self.tws) # data container
self.tws.registerAll(self.defaultHandler)
#self.tws.register(self.debugHandler,message.TickPrice)
self.tws.register(self.nextValidIdHandler, 'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True, '')
def subscribeStk(self, symbol, secType='STK', exchange='SMART', currency='USD'):
""" subscribe to stock data """
self.log.debug('Subscribing to ' + symbol)
# if symbol in self.data.symbols:
# print 'Already subscribed to {0}'.format(symbol)
# return
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self.dataModel.add(symbol)
self.tws.reqMktData(subId, c, '', False)
self.contracts[symbol] = c
return subId
@property
def data(self):
return self.dataModel.df
def placeOrder(self, symbol, shares, limit=None, exchange='SMART', transmit=0):
""" place an order on already subscribed contract """
if symbol not in self.contracts.keys():
self.log.error("Can't place order, not subscribed to %s" % symbol)
return
action = {-1: 'SELL', 1: 'BUY'}
o = Order()
o.m_orderId = self.getOrderId()
o.m_action = action[cmp(shares, 0)]
o.m_totalQuantity = abs(shares)
o.m_transmit = transmit
if limit is not None:
o.m_orderType = 'LMT'
o.m_lmtPrice = limit
self.log.debug('Placing %s order for %i %s (id=%i)' % (o.m_action, o.m_totalQuantity, symbol, o.m_orderId))
self.tws.placeOrder(o.m_orderId, self.contracts[symbol], o)
def getOrderId(self):
self.nextValidOrderId += 1
return self.nextValidOrderId - 1
def unsubscribeStk(self, symbol):
self.log.debug('Function not implemented')
def disconnect(self):
self.tws.disconnect()
def __del__(self):
"""destructor, clean up """
print 'Broker is cleaning up after itself.'
self.tws.disconnect()
def debugHandler(self, msg):
print msg
def defaultHandler(self, msg):
""" default message handler """
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def nextValidIdHandler(self, msg):
self.nextValidOrderId = msg.orderId
self.log.debug('Next valid order id:{0}'.format(self.nextValidOrderId))
def saveData(self, fname):
""" save current dataframe to csv """
self.log.debug("Saving data to {0}".format(fname))
self.dataModel.df.to_csv(fname)
# def __getattr__(self, name):
# """ x.__getattr__('name') <==> x.name
# an easy way to call ibConnection methods
# @return named attribute from instance tws
# """
# return getattr(self.tws, name)
class _HistDataHandler(object):
""" handles incoming messages """
def __init__(self, tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler, message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open': [], 'high': [], 'low': [], 'close': [], 'volume': [], 'count': [], 'WAP': []}
def msgHandler(self, msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date, timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date, dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
""" return downloaded data as a DataFrame """
df = DataFrame(data=self._data, index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self, debug=False):
self._log = logger.getLogger('DLD')
self._log.debug(
'Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__, ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler, message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self, msg):
print '[debug]', msg
def requestData(self, contract, endDateTime, durationStr='1 D', barSizeSetting='30 secs', whatToShow='TRADES',
useRTH=1, formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol, endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH,
formatDate)
self._reqId += 1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time() - startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self, contract, dateTuple):
""" get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
"""
openTime = dt.datetime(*dateTuple) + dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple) + dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime, closeTime, freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract, t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class TimeKeeper(object):
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~') + '/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir, 'requests.txt'))
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
""" adds a timestamp of current request"""
with open(self.dataFile, 'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat) + '\n')
def nrRequests(self, timeSpan=600):
""" return number of requests in past timespan (s) """
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile, 'r') as f:
lines = f.readlines()
for line in lines:
if now - dt.datetime.strptime(line.strip(), self._timeFormat) < delta:
requests += 1
if requests == 0: # erase all contents if no requests are relevant
open(self.dataFile, 'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
#---------------test functions-----------------
def dummyHandler(msg):
print msg
def testConnection():
""" a simple test to check working of streaming prices etc """
tws = ibConnection()
tws.registerAll(dummyHandler)
tws.connect()
c = createContract('SPY')
tws.reqMktData(1, c, '', False)
sleep(3)
print 'testConnection done.'
def testSubscriptions():
s = Subscriptions()
s.add('SPY')
#s.add('XLE')
print s
def testBroker():
b = Broker()
sleep(2)
b.subscribeStk('SPY')
b.subscribeStk('XLE')
b.subscribeStk('GOOG')
b.placeOrder('ABC', 125, 55.1)
sleep(3)
return b
#---------------------GUI stuff--------------------------------------------
class AddSubscriptionDlg(QDialog):
def __init__(self, parent=None):
super(AddSubscriptionDlg, self).__init__(parent)
symbolLabel = QLabel('Symbol')
self.symbolEdit = QLineEdit()
secTypeLabel = QLabel('secType')
self.secTypeEdit = QLineEdit('STK')
exchangeLabel = QLabel('exchange')
self.exchangeEdit = QLineEdit('SMART')
currencyLabel = QLabel('currency')
self.currencyEdit = QLineEdit('USD')
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
lay = QGridLayout()
lay.addWidget(symbolLabel, 0, 0)
lay.addWidget(self.symbolEdit, 0, 1)
lay.addWidget(secTypeLabel, 1, 0)
lay.addWidget(self.secTypeEdit, 1, 1)
lay.addWidget(exchangeLabel, 2, 0)
lay.addWidget(self.exchangeEdit, 2, 1)
lay.addWidget(currencyLabel, 3, 0)
lay.addWidget(self.currencyEdit, 3, 1)
lay.addWidget(buttonBox, 4, 0, 1, 2)
self.setLayout(lay)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
self.setWindowTitle("Add subscription")
class BrokerWidget(QWidget):
def __init__(self, broker, parent=None):
super(BrokerWidget, self).__init__(parent)
self.broker = broker
self.dataTable = TableView()
self.dataTable.setModel(self.broker.dataModel)
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
#self.dataTable.resizeColumnsToContents()
dataLabel = QLabel('Price Data')
dataLabel.setBuddy(self.dataTable)
dataLayout = QVBoxLayout()
dataLayout.addWidget(dataLabel)
dataLayout.addWidget(self.dataTable)
addButton = QPushButton("&Add Symbol")
saveDataButton = QPushButton("&Save Data")
#deleteButton = QPushButton("&Delete")
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(addButton)
buttonLayout.addWidget(saveDataButton)
buttonLayout.addStretch()
layout = QHBoxLayout()
layout.addLayout(dataLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.connect(addButton, SIGNAL('clicked()'), self.addSubscription)
self.connect(saveDataButton, SIGNAL('clicked()'), self.saveData)
#self.connect(deleteButton,SIGNAL('clicked()'),self.deleteSubscription)
def addSubscription(self):
dialog = AddSubscriptionDlg(self)
if dialog.exec_():
self.broker.subscribeStk(str(dialog.symbolEdit.text()), str(dialog.secTypeEdit.text()),
str(dialog.exchangeEdit.text()), str(dialog.currencyEdit.text()))
def saveData(self):
""" save data to a .csv file """
fname = unicode(QFileDialog.getSaveFileName(self, caption="Save data to csv", filter='*.csv'))
if fname:
self.broker.saveData(fname)
# def deleteSubscription(self):
# pass
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(640, 480)
self.setWindowTitle('Broker test')
self.broker = Broker()
self.broker.subscribeStk('SPY')
self.broker.subscribeStk('XLE')
self.broker.subscribeStk('GOOG')
brokerWidget = BrokerWidget(self.broker, self)
lay = QVBoxLayout()
lay.addWidget(brokerWidget)
self.setLayout(lay)
def startGui():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
import ib
print 'iby version:', ib.version
#testConnection()
#testBroker()
#testSubscriptions()
print message.messageTypeNames()
startGui()
print 'All done'
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MixFavourite.user'
db.alter_column(u'spa_mixfavourite', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
def backwards(self, orm):
# Changing field 'MixFavourite.user'
db.alter_column(u'spa_mixfavourite', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['spa.UserProfile']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa._activity': {
'Meta': {'object_name': '_Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'spa.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attendees'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 5, 25, 0, 0)'}),
'event_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 5, 25, 0, 0)'}),
'event_description': ('tinymce.views.HTMLField', [], {}),
'event_recurrence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Recurrence']"}),
'event_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2013, 5, 25, 0, 0)'}),
'event_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'event_venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Venue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.genre': {
'Meta': {'object_name': 'Genre'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'spa.label': {
'Meta': {'object_name': 'Label'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa.mix': {
'Meta': {'object_name': 'Mix'},
'description': ('django.db.models.fields.TextField', [], {}),
'download_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['spa.Genre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'mix_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'stream_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '38', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 25, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"}),
'waveform_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'spa.mixdownload': {
'Meta': {'object_name': 'MixDownload', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'downloads'", 'to': "orm['spa.Mix']"})
},
'spa.mixfavourite': {
'Meta': {'object_name': 'MixFavourite'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favourites'", 'to': "orm['spa.Mix']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': u"orm['auth.User']"})
},
'spa.mixlike': {
'Meta': {'object_name': 'MixLike', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'likes'", 'to': "orm['spa.Mix']"})
},
'spa.mixplay': {
'Meta': {'object_name': 'MixPlay', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plays'", 'to': "orm['spa.Mix']"})
},
'spa.purchaselink': {
'Meta': {'object_name': 'PurchaseLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_link'", 'to': "orm['spa.Tracklist']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'spa.recurrence': {
'Meta': {'object_name': 'Recurrence', '_ormbases': ['spa._Lookup']},
u'_lookup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Lookup']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.release': {
'Meta': {'object_name': 'Release'},
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'release_artist': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 5, 25, 0, 0)'}),
'release_description': ('django.db.models.fields.TextField', [], {}),
'release_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'release_label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Label']"}),
'release_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"})
},
'spa.releaseaudio': {
'Meta': {'object_name': 'ReleaseAudio'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_audio'", 'null': 'True', 'to': "orm['spa.Release']"})
},
'spa.tracklist': {
'Meta': {'object_name': 'Tracklist'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.SmallIntegerField', [], {}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracklist'", 'to': "orm['spa.Mix']"}),
'remixer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timeindex': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'spa.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_sharing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'activity_sharing_networks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'social'", 'max_length': '15'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers_rel_+'", 'null': 'True', 'to': "orm['spa.UserProfile']"}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'following_rel_+'", 'null': 'True', 'to': "orm['spa.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'spa.venue': {
'Meta': {'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'venue_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['spa']
|
|
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a :epkg:`sphinx` extension for a quote.
"""
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx.locale import _
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.statemachine import StringList
from sphinx.util.nodes import nested_parse_with_titles
class quote_node(nodes.admonition):
"""
Defines ``quote`` node.
"""
pass
class QuoteNode(BaseAdmonition):
"""
A ``quotedef`` entry, displayed in the form of an admonition.
It takes the following options:
* *author*
* *book*
* *year*
* *pages*
* *tag*
* *source*
* *lid* or *label*
* *index*, additional index words beside the title and the author
* *date*, if the text was written or declared at specific date
Example::
.. quote::
:author: author
:book: book
:year: year
:pages: pages (optional)
:tag: something
:lid: id (used for further reference)
:source: optional
:index: word
A monkey could...
"""
node_class = quote_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'author': directives.unchanged,
'book': directives.unchanged,
'year': directives.unchanged,
'pages': directives.unchanged,
'tag': directives.unchanged,
'lid': directives.unchanged,
'label': directives.unchanged,
'source': directives.unchanged,
'class': directives.class_option,
'index': directives.unchanged,
'date': directives.unchanged,
}
def run(self):
"""
Builds the mathdef text.
"""
env = self.state.document.settings.env if hasattr(
self.state.document.settings, "env") else None
docname = None if env is None else env.docname
if docname is not None:
docname = docname.replace("\\", "/").split("/")[-1]
if not self.options.get('class'):
self.options['class'] = ['admonition-quote']
# body
(quote,) = super(QuoteNode, self).run()
if isinstance(quote, nodes.system_message):
return [quote] # pragma: no cover
# mid
tag = self.options.get('tag', 'quotetag').strip()
if len(tag) == 0:
raise ValueError("tag is empty") # pragma: no cover
def __(text):
if text:
return _(text)
return ""
# book
author = __(self.options.get('author', "").strip())
book = __(self.options.get('book', "").strip())
pages = __(self.options.get('pages', "").strip())
year = __(self.options.get('year', "").strip())
source = __(self.options.get('source', "").strip())
index = __(self.options.get('index', "").strip())
date = __(self.options.get('date', "").strip())
indexes = []
if index:
indexes.append(index) # pragma: no cover
# add a label
lid = self.options.get('lid', self.options.get('label', None))
if lid:
tnl = ['', ".. _{0}:".format(lid), ""]
else:
tnl = [] # pragma: no cover
if author:
tnl.append("**{0}**, ".format(author))
indexes.append(author)
if book:
tnl.append("*{0}*".format(book))
indexes.append(book)
if pages:
tnl.append(", {0}".format(pages))
if date:
tnl.append(" ({0})".format(date))
if source:
if source.startswith("http"):
tnl.append(", `source <{0}>`_".format(source))
else:
tnl.append(", {0}".format(source))
tnl.append('')
tnl.append(".. index:: " + ", ".join(indexes))
tnl.append('')
content = StringList(tnl)
content = content + self.content
node = quote_node()
try:
nested_parse_with_titles(self.state, content, node)
except Exception as e: # pragma: no cover
from sphinx.util import logging
logger = logging.getLogger("blogpost")
logger.warning(
"[blogpost] unable to parse '{0}' - '{1}' - {2}".format(author, book, e))
raise e
node['tag'] = tag
node['author'] = author
node['pages'] = pages
node['year'] = year
node['label'] = lid
node['source'] = source
node['book'] = book
node['index'] = index
node['content'] = '\n'.join(self.content)
node['classes'] += ["quote"]
return [node]
def visit_quote_node(self, node):
"""
visit_quote_node
"""
self.visit_admonition(node)
def depart_quote_node(self, node):
"""
depart_quote_node,
see https://github.com/sphinx-doc/sphinx/blob/master/sphinx/writers/html.py
"""
self.depart_admonition(node)
def visit_quote_node_rst(self, node):
"""
visit_quote_node
"""
self.new_state(0)
self.add_text(".. quote::")
for k, v in sorted(node.attributes.items()):
if k in ("content", 'classes'):
continue
if v:
self.new_state(4)
self.add_text(":{0}: {1}".format(k, v))
self.end_state(wrap=False, end=None)
self.add_text(self.nl)
self.new_state(4)
self.add_text(node['content'])
self.end_state()
self.end_state()
raise nodes.SkipNode
def depart_quote_node_rst(self, node):
"""
depart_quote_node,
see https://github.com/sphinx-doc/sphinx/blob/master/sphinx/writers/html.py
"""
pass
def setup(app):
"""
setup for ``mathdef`` (sphinx)
"""
if hasattr(app, "add_mapping"):
app.add_mapping('quote', quote_node)
app.add_node(quote_node,
html=(visit_quote_node, depart_quote_node),
epub=(visit_quote_node, depart_quote_node),
elatex=(visit_quote_node, depart_quote_node),
latex=(visit_quote_node, depart_quote_node),
text=(visit_quote_node, depart_quote_node),
md=(visit_quote_node, depart_quote_node),
rst=(visit_quote_node_rst, depart_quote_node_rst))
app.add_directive('quote', QuoteNode)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
|
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), objects with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
if indent is not None and not isinstance(indent, basestring):
indent = indent * ' '
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
else:
_asdict = _namedtuple_as_object and getattr(o, '_asdict', None)
if _asdict and callable(_asdict):
for chunk in _iterencode_dict(_asdict(), _current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointable object SavedModel save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import adam
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
class _ModelWithOptimizer(training.Model):
def __init__(self):
super(_ModelWithOptimizer, self).__init__()
self.dense = core.Dense(1)
self.optimizer = adam.AdamOptimizer(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
class SaveTest(test.TestCase):
def _import_and_infer(
self, save_dir, inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
model = loader.load(session, [], save_dir)
signature = model.signature_def[signature_key]
self.assertEqual(set(inputs.keys()), set(signature.inputs.keys()))
feed_dict = {}
for arg_name in inputs.keys():
feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = (
inputs[arg_name])
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
def test_method_save_signature(self):
root = tracking.Checkpointable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual(
{"output_0": 2.},
self._import_and_infer(save_dir, {"x": 1.}))
def test_method_save_concrete(self):
root = tracking.Checkpointable()
root.f = def_function.function(
lambda z: {"out": 2. * z})
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
{"non_default_key": root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))})
self.assertEqual(
{"out": 2.},
self._import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_non_concrete_error(self):
root = tracking.Checkpointable()
root.f = def_function.function(lambda x: 2. * x)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "must be converted to concrete functions"):
save.save(root, save_dir, root.f)
def test_nested_inputs(self):
root = tracking.Checkpointable()
root.f = def_function.function(
lambda x: 2. * x[0],
input_signature=([tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)],))
root.f([constant_op.constant(1.), constant_op.constant(1.)])
# Concrete functions must always have uniquely named Tensor inputs. Save
# relies on this.
with self.assertRaisesRegexp(
ValueError, "two arguments named 'x'"):
root.f.get_concrete_function()
def test_nested_outputs(self):
root = tracking.Checkpointable()
root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x)))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "non-flat outputs"):
save.save(root, save_dir, to_save)
def test_nested_dict_outputs(self):
root = tracking.Checkpointable()
root.f = def_function.function(
lambda x: {"a": 2. * x, "b": (3. * x, 4. * x)})
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "dictionary containing non-Tensor value"):
save.save(root, save_dir, to_save)
def test_variable(self):
root = tracking.Checkpointable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v1 * root.v2 * x)
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, to_save)
self.assertAllEqual({"output_0": 12.},
self._import_and_infer(save_dir, {"x": 2.}))
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
first_loss = model(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir, model.call)
second_loss = model(x, y)
self.assertNotEqual(first_loss, second_loss)
self.assertAllClose(
second_loss,
self._import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]}))
def test_trivial_save_exception(self):
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(ValueError, "signature"):
save.save(tracking.Checkpointable(), save_dir)
def test_single_method_default_signature(self):
model = _ModelWithOptimizer()
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertIn("loss",
self._import_and_infer(save_dir,
{"x": [[3., 4.]], "y": [2.]}))
def test_single_function_default_signature(self):
model = tracking.Checkpointable()
model.f = def_function.function(lambda: 3., input_signature=())
model.f()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertAllClose({"output_0": 3.},
self._import_and_infer(save_dir, {}))
def test_ambiguous_signatures(self):
model = _ModelWithOptimizer()
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model(x, y)
model.second_function = def_function.function(lambda: 1.)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(ValueError, "call.*second_function"):
save.save(model, save_dir)
def test_docstring(self):
class Adder(util.Checkpoint):
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def add(self, x):
return x + x + 1.
to_save = Adder()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 7.},
self._import_and_infer(save_dir, {"x": 3.}))
def test_default_attr_stripping(self):
class Complex(util.Checkpoint):
@def_function.function(input_signature=[])
def __call__(self):
return math_ops.complex(
constant_op.constant(1.),
constant_op.constant(2.),
name="complex")
to_save = Complex()
to_save()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
loader.load(session, [], save_dir)
func, = graph._functions.values()
complex_node, = [
node for node in func.definition.node_def if node.op == "Complex"]
self.assertNotIn("T", complex_node.attr)
self.assertNotIn("Tout", complex_node.attr)
class MemoryTests(test.TestCase):
def setUp(self):
self._model = _ModelWithOptimizer()
@test_util.assert_no_garbage_created
def test_no_reference_cycles(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
self._model(x, y)
if sys.version_info[0] < 3:
# TODO(allenl): debug reference cycles in Python 2.x
self.skipTest("This test only works in Python 3+. Reference cycles are "
"created in older Python versions.")
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(self._model, save_dir, self._model.call)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
"""
Interact with the Censys Search API through the command line.
"""
import os
import sys
import csv
import time
import json
import argparse
from pathlib import Path
from typing import Union, Optional, List, Tuple
import requests
from censys.base import CensysAPIBase
from censys.config import get_config, write_config, DEFAULT
from censys.ipv4 import CensysIPv4
from censys.websites import CensysWebsites
from censys.certificates import CensysCertificates
from censys.exceptions import (
CensysCLIException,
CensysNotFoundException,
CensysUnauthorizedException,
)
Fields = List[str]
Results = List[dict]
Index = Union[CensysIPv4, CensysWebsites, CensysCertificates]
class CensysAPISearch:
"""
This class searches the Censys API, taking in options from the command line and
returning the results to a CSV or JSON file, or to stdout.
Args:
api_id (str, optional): The API ID provided by Censys.
api_secret (str, optional): The API secret provided by Censys.
start_page (int, optional): Page number to start from. Defaults to 1.
max_pages (int, optional): The maximum number of pages. Defaults to 10.
"""
csv_fields: Fields = list()
"""A list of fields to be used by the CSV writer."""
def __init__(self, **kwargs):
self.api_user = kwargs.get("api_id")
self.api_pass = kwargs.get("api_secret")
self.start_page = kwargs.get("start_page", 1)
self.max_pages = kwargs.get("max_pages", 10)
@staticmethod
def _write_csv(file_path: str, search_results: Results, fields: Fields) -> bool:
"""
This method writes the search results to a new file in CSV format.
Args:
file_path (str): Name of the file to write to on the disk.
search_results (Results): A list of results from the query.
fields (Fields): A list of fields to write as headers.
Returns:
bool: True if wrote to file successfully.
"""
with open(file_path, "w") as output_file:
if search_results and isinstance(search_results, list):
# Get the header row from the first result
writer = csv.DictWriter(output_file, fieldnames=fields)
writer.writeheader()
for result in search_results:
# Use the Dict writer to process and write results to CSV
writer.writerow(result)
print(f"Wrote results to file {file_path}")
# method returns True, if the file has been written successfully.
return True
@staticmethod
def _write_json(file_path: str, search_results: Results) -> bool:
"""
This method writes the search results to a new file in JSON format.
Args:
file_path (str): Name of the file to write to on the disk.
search_results (Results): A list of results from the query.
Returns:
bool: True if wrote to file successfully.
"""
with open(file_path, "w") as output_file:
# Since the results are already in JSON, just write them to a file.
json.dump(search_results, output_file, indent=4)
print(f"Wrote results to file {file_path}")
return True
@staticmethod
def _write_screen(search_results: Results) -> bool:
"""
This method writes the search results to screen.
Args:
search_results (Results): A list of results from the query.
Returns:
bool: True if wrote to file successfully.
"""
print(json.dumps(search_results, indent=4))
return True
def write_file(
self,
results_list: Results,
file_format: str = "screen",
file_path: Optional[str] = None,
) -> bool:
"""
This method just sorts which format will be used to store
the results of the query.
Args:
results_list (Results): A list of results from the API query.
file_format (str, optional): The format of the output.
file_path (str optional): A path to write results to.
Returns:
bool: True if wrote out successfully.
"""
if file_format and isinstance(file_format, str):
file_format = file_format.lower()
if not file_path:
# This method just creates some dynamic file names
file_name_ext = f"{time.time()}.{file_format}"
file_path = f"censys-query-output.{file_name_ext}"
if file_format == "json":
return self._write_json(file_path, results_list)
if file_format == "csv":
return self._write_csv(file_path, results_list, fields=self.csv_fields)
return self._write_screen(results_list)
def _combine_fields(
self, default_fields: Fields, user_fields: Fields, overwrite: bool = False,
) -> Fields:
"""
This method is used to specify which fields will be returned in the results.
Args:
default_fields (Fields): A list of fields that are returned by default.
user_fields (Fields): A list of user-specified fields. Max 20.
overwrite (bool, optional): Whether to overwrite or append default fields
with user fields. Defaults to False.
Raises:
CensysCLIException: Too many fields specified.
Returns:
Fields: A list of fields.
"""
field_list: Fields = default_fields
if user_fields:
if overwrite:
field_list = user_fields
else:
field_list = list(set(user_fields + default_fields))
# This is the hard limit for the number of fields that can be in a query.
if len(list(field_list)) > 20:
raise CensysCLIException(
"Too many fields specified. The maximum number of fields is 20."
)
self.csv_fields = list(field_list)
return list(field_list)
def _process_search(
self, query: str, search_index: Index, fields: Fields
) -> Results:
"""
This method provides a common way to process searches from the API.
Args:
query (str): The string to send to the API as a query.
search_index (Index): The data set to be queried.
fields (Fields): A list of fields to be returned for each result.
Returns:
Results: A list of results from the query.
"""
records = []
while True:
response = search_index.paged_search(
query=query, fields=fields, page=self.start_page
)
for record in response["results"]:
records.append(record)
# Break while loop when last page is reached
if (
response["metadata"]["page"] >= response["metadata"]["pages"]
or response["metadata"]["page"] >= self.max_pages
):
break
self.start_page += 1
return records
def search_ipv4(self, **kwargs) -> Results:
"""
A method to search the IPv4 data set via the API.
Args:
query (str): The string search query.
fields (list, optional): The fields that should be returned with a query.
overwrite (bool, optional): Whether to overwrite or append default fields
with user fields. Defaults to False.
Returns:
Results: A list of results from the query.
"""
default_fields = [
"updated_at",
"protocols",
"metadata.description",
"autonomous_system.name",
"23.telnet.banner.banner",
"80.http.get.title",
"80.http.get.metadata.description",
"8080.http.get.metadata.description",
"8888.http.get.metadata.description",
"443.https.get.metadata.description",
"443.https.get.title",
"443.https.tls.certificate.parsed.subject_dn",
"443.https.tls.certificate.parsed.names",
"443.https.tls.certificate.parsed.subject.common_name",
"443.https.tls.certificate.parsed.extensions.subject_alt_name.dns_names",
]
query = kwargs.get("query", "")
fields = kwargs.get("fields", [])
overwrite = kwargs.get("overwrite", False)
index = CensysIPv4(api_id=self.api_user, api_secret=self.api_pass)
return self._process_search(
query,
index,
self._combine_fields(default_fields, fields, overwrite=overwrite),
)
def search_certificates(self, **kwargs) -> Results:
"""
A method to search the Certificates data set via the API.
Args:
query (str): The string search query.
fields (list, optional): The fields that should be returned with a query.
overwrite (bool, optional): Whether to overwrite or append default fields
with user fields. Defaults to False.
Returns:
Results: A list of results from the query.
"""
default_fields = [
"metadata.updated_at",
"parsed.issuer.common_name",
"parsed.names",
"parsed.serial_number",
"parsed.self_signed",
"parsed.subject.common_name",
"parsed.validity.start",
"parsed.validity.end",
"parsed.validity.length",
"metadata.source",
"metadata.seen_in_scan",
"tags",
]
query = kwargs.get("query", "")
fields = kwargs.get("fields", [])
overwrite = kwargs.get("overwrite", False)
index = CensysCertificates(api_id=self.api_user, api_secret=self.api_pass)
return self._process_search(
query,
index,
self._combine_fields(default_fields, fields, overwrite=overwrite),
)
def search_websites(self, **kwargs) -> Results:
"""
A method to search the Websites (Alexa Top 1M) data set via the API.
Args:
query (str): The string search query.
fields (list, optional): The fields that should be returned with a query.
overwrite (bool, optional): Whether to overwrite or append default fields
with user fields. Defaults to False.
Returns:
Results: A list of results from the query.
"""
default_fields = [
"443.https.tls.version",
"alexa_rank",
"domain",
"ports",
"protocols",
"tags",
"updated_at",
]
query = kwargs.get("query", "")
fields = kwargs.get("fields", [])
overwrite = kwargs.get("overwrite", False)
index = CensysWebsites(api_id=self.api_user, api_secret=self.api_pass)
return self._process_search(
query,
index,
self._combine_fields(default_fields, fields, overwrite=overwrite),
)
class CensysHNRI:
"""
This class searches the Censys API, check the user's current IP for risks.
Args:
api_id (str, optional): The API ID provided by Censys.
api_secret (str, optional): The API secret provided by Censys.
"""
HIGH_RISK_DEFINITION: List[str] = ["telnet", "redis", "postgres", "vnc"]
MEDIUM_RISK_DEFINITION: List[str] = ["ssh", "http", "https"]
def __init__(self, api_id: str, api_secret: str):
self.index = CensysIPv4(api_id, api_secret)
@staticmethod
def get_current_ip() -> str:
"""
Uses ipify.org to get the current IP address.
Returns:
str: IP address.
"""
response = requests.get("https://api.ipify.org?format=json")
current_ip = response.json().get("ip")
return current_ip
def translate_risk(self, protocols: list) -> Tuple[list, list]:
"""
Interpret protocols to risks.
Args:
protocols (list): List of slash divided ports/protocols.
Returns:
Tuple[list, list]: Lists of high and medium risks.
"""
high_risk = []
medium_risk = []
for protocol in protocols:
port, protocol = protocol.split("/")
string = f"{protocol} on {port}"
if protocol in self.HIGH_RISK_DEFINITION:
high_risk.append({"port": port, "protocol": protocol, "string": string})
elif protocol in self.MEDIUM_RISK_DEFINITION:
medium_risk.append(
{"port": port, "protocol": protocol, "string": string}
)
elif protocol == "banner":
medium_risk.append(
{"port": port, "protocol": "unknown protocol", "string": string}
)
else:
medium_risk.append(
{"port": port, "protocol": protocol, "string": string}
)
return high_risk, medium_risk
@staticmethod
def risks_to_string(high_risk: list, medium_risk: list) -> str:
"""
Risks to printable string.
Args:
high_risk (list): Lists of high risks.
medium_risk (list): Lists of medium risks.
Raises:
CensysCLIException: No information/risks found.
Returns:
str: Printable string for CLI.
"""
len_high_risk = len(high_risk)
len_medium_risk = len(medium_risk)
if len_high_risk + len_medium_risk == 0:
raise CensysCLIException
response = ""
if len_high_risk > 0:
response = (
response
+ "High Risks Found: \n"
+ "\n".join([risk.get("string") for risk in high_risk])
)
else:
response = response + "You don't have any High Risks in your network\n"
if len_medium_risk > 0:
response = (
response
+ "Medium Risks Found: \n"
+ "\n".join([risk.get("string") for risk in medium_risk])
)
else:
response = response + "You don't have any Medium Risks in your network\n"
return response
def view_current_ip_risks(self) -> str:
"""
Gets protocol information for the current IP and returns any risks.
Returns:
str: Printable
"""
current_ip = self.get_current_ip()
try:
results = self.index.view(current_ip)
protocols = results.get("protocols", [])
high_risk, medium_risk = self.translate_risk(protocols)
return self.risks_to_string(high_risk, medium_risk)
except (CensysNotFoundException, CensysCLIException):
return "No Risks were found on your network"
def search(args):
"""
search subcommand.
Args:
args (Namespace): Argparse Namespace.
"""
censys_args = {}
if args.start_page:
censys_args["start_page"] = args.start_page
if args.max_pages:
censys_args["max_pages"] = args.max_pages
if args.api_id:
censys_args["api_id"] = args.api_id
if args.api_secret:
censys_args["api_secret"] = args.api_secret
censys = CensysAPISearch(**censys_args)
search_args = {"query": args.query}
if args.fields:
search_args["fields"] = args.fields
if args.overwrite:
search_args["overwrite"] = args.overwrite
indexes = {
"ipv4": censys.search_ipv4,
"certs": censys.search_certificates,
"websites": censys.search_websites,
}
index_type = args.index_type or args.query_type
index_func = indexes[index_type]
results = index_func(**search_args)
try:
censys.write_file(results, file_format=args.format, file_path=args.output)
except ValueError as error: # pragma: no cover
print(f"Error writing log file. Error: {error}")
def hnri(args):
"""
hnri subcommand.
Args:
args (Namespace): Argparse Namespace.
"""
client = CensysHNRI(args.api_id, args.api_secret)
risks = client.view_current_ip_risks()
print(risks)
def cli_config(_): # pragma: no cover
"""
config subcommand.
Args:
_: Argparse Namespace.
"""
api_id_prompt = "Censys API ID"
api_secret_prompt = "Censys API Secret"
config = get_config()
api_id = config.get(DEFAULT, "api_id")
api_secret = config.get(DEFAULT, "api_secret")
if api_id and api_secret:
redacted_id = api_id.replace(api_id[:32], 32 * "*")
redacted_secret = api_secret.replace(api_secret[:28], 28 * "*")
api_id_prompt = f"{api_id_prompt} [{redacted_id}]"
api_secret_prompt = f"{api_secret_prompt} [{redacted_secret}]"
api_id = input(api_id_prompt + ": ").strip() or api_id
api_secret = input(api_secret_prompt + ": ").strip() or api_secret
if not (api_id and api_secret):
print("Please enter valid credentials")
sys.exit(1)
try:
client = CensysAPIBase(api_id, api_secret)
account = client.account()
email = account.get("email")
# Assumes that login was successfully
config.set(DEFAULT, "api_id", api_id)
config.set(DEFAULT, "api_secret", api_secret)
write_config(config)
print(f"\nSuccessfully authenticated for {email}")
sys.exit(0)
except CensysUnauthorizedException:
print("Failed to authenticate")
sys.exit(1)
def get_parser() -> argparse.ArgumentParser:
"""
Gets ArgumentParser for CLI.
Returns:
argparse.ArgumentParser
"""
config = get_config()
auth = argparse.ArgumentParser(add_help=False)
auth.add_argument(
"--api-id",
default=os.getenv("CENSYS_API_ID") or config.get(DEFAULT, "api_id"),
required=False,
help="a Censys API ID \
(alternatively you can use the env variable CENSYS_API_ID)",
)
auth.add_argument(
"--api-secret",
default=os.getenv("CENSYS_API_SECRET") or config.get(DEFAULT, "api_secret"),
required=False,
help="a Censys API SECRET \
(alternatively you can use the env variable CENSYS_API_SECRET)",
)
parser = argparse.ArgumentParser()
parser.set_defaults()
subparsers = parser.add_subparsers()
# Search Specific Args
search_parser = subparsers.add_parser(
"search",
description="Query Censys Search for resource data by providing a query \
string, the resource index, and the fields to be returned",
help="query Censys search",
parents=[auth],
)
search_parser.add_argument(
"-q",
"--query",
type=str,
required=True,
help="a string written in Censys Search syntax",
)
index_types = ["ipv4", "certs", "websites"]
index_metavar = "ipv4|certs|websites"
index_default = "ipv4"
search_parser.add_argument(
"--index-type",
type=str,
default=index_default,
choices=index_types,
metavar=index_metavar,
help="which resource index to query",
)
# Backwards compatibility
search_parser.add_argument(
"--query_type",
type=str,
default=index_default,
choices=index_types,
metavar=index_metavar,
help=argparse.SUPPRESS,
)
search_parser.add_argument(
"--fields", nargs="+", help="list of index-specific fields"
)
search_parser.add_argument(
"--overwrite",
action="store_true",
default=False,
help="overwrite instead of append fields returned by default \
with fields provided in the fields argument",
)
search_parser.add_argument(
"-f",
"--format",
type=str,
default="screen",
metavar="json|csv|screen",
help="format of output",
)
search_parser.add_argument(
"-o", "--output", type=Path, help="output file path",
)
search_parser.add_argument(
"--start-page", default=1, type=int, help="page number to start from"
)
search_parser.add_argument(
"--max-pages",
default=1,
type=int,
help="maximum number of pages of results to return",
)
search_parser.set_defaults(func=search)
# HNRI Specific Args
hnri_parser = subparsers.add_parser(
"hnri",
description="Home Network Risk Identifier (H.N.R.I.)",
help="home network risk identifier",
parents=[auth],
)
hnri_parser.set_defaults(func=hnri)
# Config Specific Args
config_parser = subparsers.add_parser(
"config",
description="Configure Censys API Settings",
help="configure Censys API settings",
)
config_parser.set_defaults(func=cli_config)
return parser
def main():
"""main cli function"""
parser = get_parser()
# Executes by subcommand
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
parser.print_help()
parser.exit()
except KeyboardInterrupt: # pragma: no cover
sys.exit(1)
if __name__ == "__main__": # pragma: no cover
main()
|
|
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.datastructures import MultiValueDict
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_by_path
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
default_exception_reporter_filter = import_by_path(
settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def format_path_status(self, path):
if not os.path.exists(path):
return "File does not exist"
if not os.path.isfile(path):
return "Not a file"
if not os.access(path, os.R_OK):
return "File is not readable"
return "File exists"
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
# If the template_source_loaders haven't been populated yet, you need
# to provide an empty list for this for loop to not fail.
if template_source_loaders is None:
template_source_loaders = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{
'name': t,
'status': self.format_path_status(t),
} for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno+1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and tried[0][0].app_name == tried[0][0].namespace == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>
{% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% load firstof from future %}{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>
Of course, you haven't actually done any work yet.
Next, start your first app by running <code>python manage.py startapp [appname]</code>.
</p>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
|
|
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['Fixed_Mask', 'Fixed_Mask_M1A', 'M1A', 'M1A_Watchpoint', 'Watchpoint', 'M2A_VDM', 'M2A_VDM_Grating', 'Grating', 'Grating_Aperture', 'Aperture', 'Watchpoint2', 'M3A_HFM', 'M3A_HFM_Watchpoint3', 'Watchpoint3', 'Pinhole', 'Watchpoint4', 'Watchpoint4_Sample', 'Sample']
for el_name in names:
if el_name == 'Fixed_Mask':
# Fixed_Mask: aperture 26.2m
el.append(srwlib.SRWLOptA(
_shape=v.op_Fixed_Mask_shape,
_ap_or_ob='a',
_Dx=v.op_Fixed_Mask_Dx,
_Dy=v.op_Fixed_Mask_Dy,
_x=v.op_Fixed_Mask_x,
_y=v.op_Fixed_Mask_y,
))
pp.append(v.op_Fixed_Mask_pp)
elif el_name == 'Fixed_Mask_M1A':
# Fixed_Mask_M1A: drift 26.2m
el.append(srwlib.SRWLOptD(
_L=v.op_Fixed_Mask_M1A_L,
))
pp.append(v.op_Fixed_Mask_M1A_pp)
elif el_name == 'M1A':
# M1A: mirror 27.2m
mirror_file = v.op_M1A_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by M1A beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_M1A_dim,
_ang=abs(v.op_M1A_ang),
_amp_coef=v.op_M1A_amp_coef,
_size_x=v.op_M1A_size_x,
_size_y=v.op_M1A_size_y,
))
pp.append(v.op_M1A_pp)
elif el_name == 'M1A_Watchpoint':
# M1A_Watchpoint: drift 27.2m
el.append(srwlib.SRWLOptD(
_L=v.op_M1A_Watchpoint_L,
))
pp.append(v.op_M1A_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 40.4m
pass
elif el_name == 'M2A_VDM':
# M2A_VDM: mirror 40.4m
mirror_file = v.op_M2A_VDM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by M2A_VDM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_M2A_VDM_dim,
_ang=abs(v.op_M2A_VDM_ang),
_amp_coef=v.op_M2A_VDM_amp_coef,
_size_x=v.op_M2A_VDM_size_x,
_size_y=v.op_M2A_VDM_size_y,
))
pp.append(v.op_M2A_VDM_pp)
elif el_name == 'M2A_VDM_Grating':
# M2A_VDM_Grating: drift 40.4m
el.append(srwlib.SRWLOptD(
_L=v.op_M2A_VDM_Grating_L,
))
pp.append(v.op_M2A_VDM_Grating_pp)
elif el_name == 'Grating':
# Grating: grating 40.46m
mirror = srwlib.SRWLOptMirPl(
_size_tang=v.op_Grating_size_tang,
_size_sag=v.op_Grating_size_sag,
_nvx=v.op_Grating_nvx,
_nvy=v.op_Grating_nvy,
_nvz=v.op_Grating_nvz,
_tvx=v.op_Grating_tvx,
_tvy=v.op_Grating_tvy,
_x=v.op_Grating_x,
_y=v.op_Grating_y,
)
el.append(srwlib.SRWLOptG(
_mirSub=mirror,
_m=v.op_Grating_m,
_grDen=v.op_Grating_grDen,
_grDen1=v.op_Grating_grDen1,
_grDen2=v.op_Grating_grDen2,
_grDen3=v.op_Grating_grDen3,
_grDen4=v.op_Grating_grDen4,
))
pp.append(v.op_Grating_pp)
elif el_name == 'Grating_Aperture':
# Grating_Aperture: drift 40.46m
el.append(srwlib.SRWLOptD(
_L=v.op_Grating_Aperture_L,
))
pp.append(v.op_Grating_Aperture_pp)
elif el_name == 'Aperture':
# Aperture: aperture 42.46m
el.append(srwlib.SRWLOptA(
_shape=v.op_Aperture_shape,
_ap_or_ob='a',
_Dx=v.op_Aperture_Dx,
_Dy=v.op_Aperture_Dy,
_x=v.op_Aperture_x,
_y=v.op_Aperture_y,
))
pp.append(v.op_Aperture_pp)
elif el_name == 'Watchpoint2':
# Watchpoint2: watch 42.46m
pass
elif el_name == 'M3A_HFM':
# M3A_HFM: sphericalMirror 42.46m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_M3A_HFM_r,
_size_tang=v.op_M3A_HFM_size_tang,
_size_sag=v.op_M3A_HFM_size_sag,
_nvx=v.op_M3A_HFM_nvx,
_nvy=v.op_M3A_HFM_nvy,
_nvz=v.op_M3A_HFM_nvz,
_tvx=v.op_M3A_HFM_tvx,
_tvy=v.op_M3A_HFM_tvy,
_x=v.op_M3A_HFM_x,
_y=v.op_M3A_HFM_y,
))
pp.append(v.op_M3A_HFM_pp)
elif el_name == 'M3A_HFM_Watchpoint3':
# M3A_HFM_Watchpoint3: drift 42.46m
el.append(srwlib.SRWLOptD(
_L=v.op_M3A_HFM_Watchpoint3_L,
))
pp.append(v.op_M3A_HFM_Watchpoint3_pp)
elif el_name == 'Watchpoint3':
# Watchpoint3: watch 54.36m
pass
elif el_name == 'Pinhole':
# Pinhole: aperture 54.36m
el.append(srwlib.SRWLOptA(
_shape=v.op_Pinhole_shape,
_ap_or_ob='a',
_Dx=v.op_Pinhole_Dx,
_Dy=v.op_Pinhole_Dy,
_x=v.op_Pinhole_x,
_y=v.op_Pinhole_y,
))
pp.append(v.op_Pinhole_pp)
elif el_name == 'Watchpoint4':
# Watchpoint4: watch 54.36m
pass
elif el_name == 'Watchpoint4_Sample':
# Watchpoint4_Sample: drift 54.36m
el.append(srwlib.SRWLOptD(
_L=v.op_Watchpoint4_Sample_L,
))
pp.append(v.op_Watchpoint4_Sample_pp)
elif el_name == 'Sample':
# Sample: watch 55.5m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'NSLS-II CSX-1 beamline', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', 0.0, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.00089, 'electron beam relative energy spread'],
['ebm_emx', 'f', 7.6e-10, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 1.84, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 1.17, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.3513, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.0492, 'undulator period [m]'],
['und_len', 'f', 1.85, 'undulator length [m]'],
['und_zc', 'f', 1.25, 'undulator center longitudinal position [m]'],
['und_sx', 'i', -1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', 1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 10.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 2000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 2000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 10.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 2000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 2000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.003, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.003, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.03, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.03, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 750.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.004, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.004, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 0.3, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 30000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#to add options
['op_r', 'f', 26.2, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'u', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# Fixed_Mask: aperture
['op_Fixed_Mask_shape', 's', 'r', 'shape'],
['op_Fixed_Mask_Dx', 'f', 0.005, 'horizontalSize'],
['op_Fixed_Mask_Dy', 'f', 0.005, 'verticalSize'],
['op_Fixed_Mask_x', 'f', 0.0, 'horizontalOffset'],
['op_Fixed_Mask_y', 'f', 0.0, 'verticalOffset'],
# Fixed_Mask_M1A: drift
['op_Fixed_Mask_M1A_L', 'f', 1.0, 'length'],
# M1A: mirror
['op_M1A_hfn', 's', 'mirror_1d.dat', 'heightProfileFile'],
['op_M1A_dim', 's', 'x', 'orientation'],
['op_M1A_ang', 'f', 0.0218166, 'grazingAngle'],
['op_M1A_amp_coef', 'f', 0.01, 'heightAmplification'],
['op_M1A_size_x', 'f', 0.00545, 'horizontalTransverseSize'],
['op_M1A_size_y', 'f', 0.025, 'verticalTransverseSize'],
# M1A_Watchpoint: drift
['op_M1A_Watchpoint_L', 'f', 13.2, 'length'],
# M2A_VDM: mirror
['op_M2A_VDM_hfn', 's', 'mirror_1d.dat', 'heightProfileFile'],
['op_M2A_VDM_dim', 's', 'y', 'orientation'],
['op_M2A_VDM_ang', 'f', 0.0290353, 'grazingAngle'],
['op_M2A_VDM_amp_coef', 'f', 0.01, 'heightAmplification'],
['op_M2A_VDM_size_x', 'f', 0.025, 'horizontalTransverseSize'],
['op_M2A_VDM_size_y', 'f', 0.1, 'verticalTransverseSize'],
# M2A_VDM_Grating: drift
['op_M2A_VDM_Grating_L', 'f', 0.06, 'length'],
# Grating: grating
['op_Grating_size_tang', 'f', 0.3, 'tangentialSize'],
['op_Grating_size_sag', 'f', 0.015, 'sagittalSize'],
['op_Grating_nvx', 'f', 0.0, 'normalVectorX'],
['op_Grating_nvy', 'f', 0.999657108688, 'normalVectorY'],
['op_Grating_nvz', 'f', -0.0261852066962, 'normalVectorZ'],
['op_Grating_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_Grating_tvy', 'f', 0.0261852066962, 'tangentialVectorY'],
['op_Grating_x', 'f', 0.0, 'horizontalOffset'],
['op_Grating_y', 'f', 0.0, 'verticalOffset'],
['op_Grating_m', 'f', 1.0, 'diffractionOrder'],
['op_Grating_grDen', 'f', 100.0, 'grooveDensity0'],
['op_Grating_grDen1', 'f', 0.0548, 'grooveDensity1'],
['op_Grating_grDen2', 'f', 3.9e-06, 'grooveDensity2'],
['op_Grating_grDen3', 'f', 0.0, 'grooveDensity3'],
['op_Grating_grDen4', 'f', 0.0, 'grooveDensity4'],
# Grating_Aperture: drift
['op_Grating_Aperture_L', 'f', 2.0, 'length'],
# Aperture: aperture
['op_Aperture_shape', 's', 'r', 'shape'],
['op_Aperture_Dx', 'f', 0.001, 'horizontalSize'],
['op_Aperture_Dy', 'f', 0.001, 'verticalSize'],
['op_Aperture_x', 'f', 0.0, 'horizontalOffset'],
['op_Aperture_y', 'f', 0.0, 'verticalOffset'],
# M3A_HFM: sphericalMirror
['op_M3A_HFM_hfn', 's', '', 'heightProfileFile'],
['op_M3A_HFM_dim', 's', 'x', 'orientation'],
['op_M3A_HFM_r', 'f', 846.5455704, 'radius'],
['op_M3A_HFM_size_tang', 'f', 0.3, 'tangentialSize'],
['op_M3A_HFM_size_sag', 'f', 0.1, 'sagittalSize'],
['op_M3A_HFM_ang', 'f', 0.0218166, 'grazingAngle'],
['op_M3A_HFM_nvx', 'f', 0.999762027421, 'normalVectorX'],
['op_M3A_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_M3A_HFM_nvz', 'f', -0.0218148693884, 'normalVectorZ'],
['op_M3A_HFM_tvx', 'f', 0.0218148693884, 'tangentialVectorX'],
['op_M3A_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_M3A_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_M3A_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_M3A_HFM_y', 'f', 0.0, 'verticalOffset'],
# M3A_HFM_Watchpoint3: drift
['op_M3A_HFM_Watchpoint3_L', 'f', 11.9, 'length'],
# Pinhole: aperture
['op_Pinhole_shape', 's', 'c', 'shape'],
['op_Pinhole_Dx', 'f', 1e-05, 'horizontalSize'],
['op_Pinhole_Dy', 'f', 1e-05, 'verticalSize'],
['op_Pinhole_x', 'f', 0.0, 'horizontalOffset'],
['op_Pinhole_y', 'f', 0.0, 'verticalOffset'],
# Watchpoint4_Sample: drift
['op_Watchpoint4_Sample_L', 'f', 1.14, 'length'],
#---Propagation parameters
['op_Fixed_Mask_pp', 'f', [0, 0, 1.0, 0, 0, 1.3, 1.0, 1.3, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Fixed_Mask'],
['op_Fixed_Mask_M1A_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Fixed_Mask_M1A'],
['op_M1A_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M1A'],
['op_M1A_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M1A_Watchpoint'],
['op_M2A_VDM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M2A_VDM'],
['op_M2A_VDM_Grating_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M2A_VDM_Grating'],
['op_Grating_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0580381, 0.998314, 1.0, 0.0], 'Grating'],
['op_Grating_Aperture_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Grating_Aperture'],
['op_Aperture_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Aperture'],
['op_M3A_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M3A_HFM'],
['op_M3A_HFM_Watchpoint3_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 8.0, 1.0, 16.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M3A_HFM_Watchpoint3'],
['op_Pinhole_pp', 'f', [0, 0, 1.0, 0, 0, 0.1, 20.0, 0.1, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Pinhole'],
['op_Watchpoint4_Sample_pp', 'f', [0, 0, 1.0, 3, 0, 0.3, 1.0, 0.3, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Watchpoint4_Sample'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 0.2, 1.0, 0.35, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
])
def main():
v = srwl_bl.srwl_uti_parse_options(varParam, use_sys_argv=True)
op = set_optics(v)
v.ss = True
v.ss_pl = 'e'
v.sm = True
v.sm_pl = 'e'
v.pw = True
v.pw_pl = 'xy'
v.si = True
v.si_pl = 'xy'
v.tr = True
v.tr_pl = 'xz'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
from collections import deque
from subprocess import call
from optparse import OptionParser
from tempfile import mkstemp
import os
import random
import re
import shlex
import shutil
import subprocess
import sys
import time
import resource
FNULL = open('/dev/null', 'w')
base_path = os.path.dirname(sys.argv[0])[:-len('src/py/')]
file_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def main():
(options, args) = get_options()
start_time = time.time()
fasta_file = options.fasta_file
error_files = []
reads_untrimmed_location = [options.first_mates, options.second_mates]
reads_trimmed_location = []
shell_file = options.output_dir + "/commands.sh"
sam_output_location_dir = options.output_dir + "/sam/"
sam_output_location = sam_output_location_dir + "library.sam"
singleton_output_dir = options.output_dir + "/singleton/"
singleton_output_location = singleton_output_dir + "singletons.csv"
ensure_dir(sam_output_location_dir)
ensure_dir(singleton_output_dir)
global shell_file_fp
shell_file_fp = open(shell_file, 'w')
setup_shell_file()
bins_dir = options.output_dir + "/bins/"
ensure_dir(bins_dir)
input_fasta_saved = options.fasta_file
output_dir_saved = options.output_dir
all_contig_lengths = {}
if options.min_contig_length > 0:
step("FILTERING ASSEMBLY CONTIGS LESS THAN " + str(options.min_contig_length) + ' BPs')
all_contig_lengths = filter_short_contigs(options)
results(options.fasta_file)
fasta_file = options.fasta_file
input_fasta_saved = options.fasta_file
step("ALIGNING READS")
unaligned_dir = run_bowtie2(options, sam_output_location)
contig_lengths = get_contig_lengths(sam_output_location)
step("RUNNING SAMTOOLS")
bam_location, sorted_bam_location, pileup_file = \
run_samtools(options, sam_output_location, index=True)
if options.coverage_file is None:
step("CALCULATING CONTIG COVERAGE")
options.coverage_file = calculate_contig_coverage(options, pileup_file)
results(options.coverage_file)
pileup_file = run_abundance_by_kmers(options)
results(options.coverage_file)
contig_abundances = get_contig_abundances(options.coverage_file)
step("CALCULATING ASSEMBLY PROBABILITY")
run_lap(options, sam_output_location, reads_trimmed_location)
if options.threads > 1:
step("PARTITIONING COVERAGE FILE")
run_split_pileup(options, pileup_file)
step("DEPTH OF COVERAGE")
error_files.append(run_depth_of_coverage(options, pileup_file))
contig_to_bin_map, bin_dir_dict = bin_coverage(options,bins_dir)
split_sam_by_bin(sam_output_location, contig_to_bin_map, bin_dir_dict)
outputBreakpointDir = options.output_dir + "/breakpoint/"
ouputBreakpointLocation = outputBreakpointDir + "errorsDetected.csv"
ensure_dir(outputBreakpointDir)
step("BREAKPOINT")
error_files.append(run_breakpoint_finder(options,\
unaligned_dir, outputBreakpointDir))
for bin_dir in os.listdir(bins_dir):
#if 'bin' in bin_dir:
coverages = bin_dir
options.fasta_file = os.path.abspath(output_dir_saved) + '/bins/'\
+ bin_dir + '/' + os.path.basename(input_fasta_saved)
options.output_dir = os.path.abspath(output_dir_saved) + '/bins/'\
+ bin_dir
bin_dir_infix = '/bins/' + bin_dir + '/'
bin_dir = os.path.abspath(options.output_dir) + '/bins/' + bin_dir + '/'
#warning("Bin dir is: %s" % bin_dir)
sam_output_location_dir = options.output_dir + '/sam/'
sam_output_location = sam_output_location_dir + 'library.sam'
step("RUNNING SAMTOOLS ON COVERAGE BIN " + coverages)
bam_location, sorted_bam_location, pileup_file = \
run_samtools(options, sam_output_location, with_pileup=False)
#step("DEPTH OF COVERAGE")
#error_files.append(run_depth_of_coverage(options, pileup_file))
step("MATE-PAIR HAPPINESS ON COVERAGE BIN " + coverages)
try:
error_files.append(run_reapr(options, sorted_bam_location))
except:
e = sys.exc_info()[0]
error("Reapr failed to run with: %s" % str(e))
options.output_dir = output_dir_saved
options.fasta_file = input_fasta_saved
step("SUMMARY")
summary_file = open(options.output_dir + "/summary.gff", 'w')
suspicious_file = open(options.output_dir + "/suspicious.gff", 'w')
summary_table_file = open(options.output_dir + "/summary.tsv", 'w')
#suspicious_table_file = open(options.output_dir + "/suspicious.tsv", 'w')
misassemblies = []
for error_file in error_files:
if error_file:
for line in open(error_file, 'r'):
misassemblies.append(line.strip().split('\t'))
# Sort misassemblies by start site.
misassemblies.sort(key = lambda misassembly: (misassembly[0], int(misassembly[3]), int(misassembly[4])))
final_misassemblies = []
for misassembly in misassemblies:
# Truncate starting/ending region if it is near the end of the contigs.
if int(misassembly[3]) <= options.ignore_end_distances and \
int(misassembly[4]) > options.ignore_end_distances:
misassembly[3] = str(options.ignore_end_distances + 1)
if int(misassembly[4]) >= (contig_lengths[misassembly[0]] - options.ignore_end_distances) and \
int(misassembly[3]) < (contig_lengths[misassembly[0]] - options.ignore_end_distances):
misassembly[4] = str(contig_lengths[misassembly[0]] - options.ignore_end_distances - 1)
# Don't print a flagged region if it occurs near the ends of the contig.
if int(misassembly[3]) > options.ignore_end_distances and \
int(misassembly[4]) < (contig_lengths[misassembly[0]] - options.ignore_end_distances):
summary_file.write('\t'.join(misassembly) + '\n')
final_misassemblies.append(misassembly)
summary_file.close()
results(options.output_dir + "/summary.gff")
#=====
# Open Read Frame (ORF) filtering
#===
orf_filtered_misassemblies = []
if options.orf_file:
call_arr = ["sort", "-T ./", "-k1,1", "-k4,4n", options.orf_file, "-o", options.output_dir + "/" + options.orf_file + "_sorted"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr)
call_arr = ["sort", "-T ./", "-k1,1", "-k4,4n", summary_file.name, "-o", summary_file.name+"_sorted"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
call_arr = ["mv" , summary_file.name + "_sorted", summary_file.name]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
call_arr = ["mv" , options.output_dir + "/" + options.orf_file+"_sorted", options.orf_file]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
#We have been given an orf file, we should filter based on its contents
orf_summary_file = open(options.output_dir + "/orf_filtered_summary.gff", 'w')
summary_file = open(summary_file.name, 'r')
orf_fp = open(options.orf_file, 'r')
cur_orf = orf_fp.readline()
split_cur_orf = cur_orf.split('\t')
split_cur_orf[3],split_cur_orf[4] = int(split_cur_orf[3]),int(split_cur_orf[4])
#Cycle through misassemblies
for cur_missassembly in summary_file:
split_mis = cur_missassembly.split('\t')
split_mis[3],split_mis[4] = int(split_mis[3]), int(split_mis[4])
while True:
#Misassembly before any orfs contigs
if not cur_orf or ( split_cur_orf[0] > split_mis[0] ):
break
#Misassembly after current orf contig
elif split_cur_orf[0] < split_mis[0]:
cur_orf = None
cur_orf = orf_fp.readline()
while cur_orf:
split_cur_orf = cur_orf.split('\t')
split_cur_orf[3],split_cur_orf[4] = int(split_cur_orf[3]),int(split_cur_orf[4])
if split_cur_orf[0] >= split_mis[0]:
break
cur_orf = orf_fp.readline()
if not cur_orf:
break
#First and second again
else:
#Perfect
##DO WORK #Break to move on
while True:
if not cur_orf:
break
#Done
elif split_mis[4] < split_cur_orf[3]:
break
# Advance error file to next line
elif ( split_mis[3] < split_cur_orf[3] and split_mis[4]<split_cur_orf[4] )\
or ( split_mis[3] >= split_cur_orf[3] and split_mis[4] <= split_cur_orf[4] )\
or ( split_mis[3] >= split_cur_orf[3] and split_mis[3] <= split_cur_orf[4] and split_mis[4] >= split_cur_orf[4] )\
or ( split_mis[3] < split_cur_orf[3] and split_mis[4] >= split_cur_orf[4] ):
orf_summary_file.write(cur_missassembly)
orf_filtered_misassemblies.append(cur_missassembly.strip().split('\t'))
break
#Error output
#Advance Error file
elif split_mis[3] > split_cur_orf[4]:
cur_orf = orf_fp.readline()
if cur_orf:
split_cur_orf = cur_orf.split('\t')
split_cur_orf[3],split_cur_orf[4] = int(split_cur_orf[3]),int(split_cur_orf[4])
#Advance orf file, reevaluate
else:
break
break
# Find regions with multiple misassembly signatures.
#suspicious_regions = find_suspicious_regions(misassemblies, options.min_suspicious_regions)
suspicious_regions = find_sliding_suspicious_regions(final_misassemblies, options.suspicious_window_size, options.min_suspicious_regions)
final_suspicious_misassemblies = []
for region in suspicious_regions:
#if int(region[3]) > options.ignore_end_distances and \
# int(region[4]) <= (contig_lengths[region[0]] - options.ignore_end_distances):
if int(region[4]) > (contig_lengths[region[0]] - options.ignore_end_distances):
region[4] = str(contig_lengths[region[0]] - options.ignore_end_distances)
suspicious_file.write('\t'.join(region) + '\n')
final_suspicious_misassemblies.append(region)
results(options.output_dir + "/suspicious.gff")
# Output summary table.
generate_summary_table(options.output_dir + "/summary.tsv", all_contig_lengths, \
contig_lengths, contig_abundances, final_misassemblies)
results(options.output_dir + "/summary.tsv")
if options.orf_file:
generate_summary_table(options.output_dir + "/orf_summary.tsv", \
all_contig_lengths, contig_lengths, contig_abundances, orf_filtered_misassemblies,orf=True)
joined_summary_fp = open(options.output_dir + "/joined_summary.tsv", 'w')
call_arr = ["join", "-a1" , "-o", "0", "1.2", "1.3", "1.4", "1.5", "1.6", "1.7", "1.8", "1.9", "1.10", "2.3","2.4","2.5","2.6","2.7","2.8","2.9","2.10", '-e', "0", '-1', '1', '-2', '1' , "-t", ' ', options.output_dir + "/summary.tsv", options.output_dir + "/orf_summary.tsv"]
out_cmd(joined_summary_fp.name, FNULL.name, call_arr)
call(call_arr, stdout = joined_summary_fp, stderr = FNULL)
# Output suspicious table.
#generate_summary_table(options.output_dir + "/suspicious.tsv", all_contig_lengths, \
# contig_lengths, final_suspicious_misassemblies)
#results(options.output_dir + "/suspicious.tsv")
if options.email:
notify_complete(options.email,time.time()-start_time)
def get_options():
parser = OptionParser()
parser.add_option("-a", "--assembly-fasta", dest="fasta_file", \
help="Candidate assembly file", metavar="FILE")
parser.add_option("-r", "--reads", dest="reads_filenames", \
help="First Read File", metavar="FILE")
parser.add_option("-1", "--1", dest="first_mates", \
help="Fastq filenames separated by commas that contain the first mates.")
parser.add_option("-2", "--2", dest="second_mates", \
help="Fastq filenames separated by commas that contain the second mates.")
parser.add_option("-c", "--coverage-file", dest="coverage_file", \
help="Assembly created per-contig coverage file")
parser.add_option("-o", "--output-dir", dest="output_dir", \
help = "Output directory", default="data/output/")
parser.add_option("-w", "--window-size", dest="window_size", \
help = "Sliding window size when determining misassemblies.", default = "201")
parser.add_option("-q", "--fastq", dest="fastq_file", \
default=False, action='store_true', \
help="if set, input reads are fastq format (fasta by default).")
parser.add_option("-p", "--threads", dest="threads", \
help = "Number of threads", default="8")
parser.add_option("-I", "--minins", dest="min_insert_size", \
help="Min insert sizes for mate pairs separated by commas.", default="0")
parser.add_option("-X", "--maxins", dest="max_insert_size", \
help="Max insert sizes for mate pairs separated by commas.", default="500")
parser.add_option("-n", "--orientation", dest="orientation", default="fr", \
help="Orientation of the mates.")
parser.add_option("-m", "--mu" , dest="mu", default = "180", \
help="average mate pair insert sizes.")
parser.add_option("-t", "--sigma" , dest="sigma", default = "18", \
help="standard deviation of mate pair insert sizes.")
parser.add_option("-x", "--max-alignments", dest="max_alignments", default = "10000", \
help="bowtie2 parameter to set the max number of alignments.")
parser.add_option("-e", "--email", dest="email", \
help="Email to notify when job completes")
parser.add_option("-g", "--min-coverage", dest="min_coverage", type="int", default=0, \
help="Minimum average coverage to run misassembly detection.")
parser.add_option("-l", "--coverage-multiplier", dest="coverage_multiplier", type=float, default=0.0, \
help="When binning by coverage, the new high = high + high * multiplier")
parser.add_option("-s", "--min-suspicious", dest="min_suspicious_regions", default=2, type=int, \
help="Minimum number of overlapping flagged miassemblies to mark region as suspicious.")
parser.add_option("-d", "--suspicious-window-size", dest="suspicious_window_size", default=2000, type=int, \
help="Mark region as suspicious if multiple signatures occur within this window size.")
parser.add_option('-z', "--min-contig-length", dest="min_contig_length", default=1000, type=int, \
help="Ignore contigs smaller than this length.")
parser.add_option('-b', "--ignore-ends", dest="ignore_end_distances", default=0, type=int, \
help="Ignore flagged regions within b bps from the ends of the contigs.")
parser.add_option('-k', "--breakpoint-bin", dest="breakpoints_bin", default="50", type=str, \
help="Bin sized used to find breakpoints.")
parser.add_option('-f', "--orf-file", dest="orf_file", help="gff formatted file containing orfs")
parser.add_option("--kmer", dest="kmer_length", help="kmer length used for abundance estimation", \
default = "15")
(options, args) = parser.parse_args()
should_err = False
if not options.fasta_file:
warning("You need to provide a fasta file with -a")
should_err = True
#if not options.read_file_1:
# warning("You need to provide the first read file with -r")
# should_err = True
#if not options.read_file_2:
# warning("You need to provide the second read file with -d")
# should_err = True
if not options.coverage_file:
warning("Coverage file not provided, will create one.")
#should_err = True
if should_err:
parser.print_help()
exit(-1)
return (options,args)
def ran_command(st, fp):
fp.write(st)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
assert os.path.exists(d)
def notify_complete(target_email,t):
call(['echo "Completed in %d" | mail -s "Job Completed" %s' % (t, target_email) ],shell=True)
def setup_shell_file():
if shell_file_fp:
shell_file_fp.write("#!/bin/bash\n")
def line(x):
print ("-"*x, file=sys.stderr)
def step(*objs):
line(75)
print(bcolors.HEADER + "STEP:\t" + bcolors.ENDC, *objs, file=sys.stderr)
def out_cmd(std_out = "", std_err = "", *objs):
#line(75)
if shell_file_fp:
if std_out != "":
std_out_sht = " 1>%s " % (std_out)
else:
std_out_sht = ""
if std_err != "":
std_err_sht = " 2>%s " % (std_err)
else:
std_err_sht = ""
shell_file_fp.write(' '.join(*objs) + std_out_sht + std_err_sht + "\n")
shell_file_fp.flush()
print(bcolors.OKBLUE + "COMMAND:\t" + bcolors.ENDC, ' '.join(*objs), file=sys.stderr)
def results(*objs):
print(bcolors.WARNING + "RESULTS:\t" + bcolors.ENDC,*objs, file=sys.stderr)
def warning(*objs):
print("INFO:\t",*objs, file=sys.stderr)
def error(*objs):
print(bcolors.WARNING + "ERROR:\t" + bcolors.ENDC, *objs, file=sys.stderr)
def filter_short_contigs(options):
"""
Filter out contigs less than a certain length.
"""
filtered_fasta_filename = options.output_dir + '/filtered_assembly.fasta'
filtered_assembly_file = open(filtered_fasta_filename, 'w')
all_contig_lengths = {}
curr_length = 0
with open(options.fasta_file,'r') as assembly:
for contig in contig_reader(assembly):
curr_length = len(''.join(contig['sequence']))
if curr_length >= options.min_contig_length:
filtered_assembly_file.write(contig['name'])
filtered_assembly_file.writelines(contig['sequence'])
filtered_assembly_file.write('\n')
all_contig_lengths[contig['name'].strip()[1:]] = curr_length
filtered_assembly_file.close()
options.fasta_file = filtered_fasta_filename
return all_contig_lengths
def get_contig_lengths(sam_filename):
"""
Return a dictionary of contig names => contig lengths from a SAM file.
"""
sam_file = open(sam_filename, 'r')
# Build dictionary of contig lengths.
contig_lengths = {}
pattern = re.compile('SN:(?P<contig>[\w_\|\.\-]+)\s*LN:(?P<length>\d+)')
line = sam_file.readline()
while line.startswith("@"):
if line.startswith("@SQ"):
matches = pattern.search(line)
if len(matches.groups()) == 2:
contig_lengths[matches.group('contig')] = int(matches.group('length'))
line = sam_file.readline()
return contig_lengths
def get_contig_abundances(abundance_filename):
"""
Return a dictionary of contig names => contig abundances from the '/coverage/temp.cvg'.
"""
abundance_file = open(abundance_filename, 'r')
# Build a dictionary of contig abundances.
contig_abundances = {}
for line in abundance_file:
contig_and_abundance = line.strip().split()
contig_abundances[contig_and_abundance[0]] = int(round(float(contig_and_abundance[1])))
return contig_abundances
def find_sliding_suspicious_regions(misassemblies, sliding_window = 2000, min_cutoff = 2):
"""
Output any region that has multiple misassembly signature types within the sliding window.
"""
regions =[]
for misassembly in misassemblies:
regions.append([misassembly[0], misassembly[3], 'START', misassembly[2]])
regions.append([misassembly[0], misassembly[4], 'END', misassembly[2]])
regions.sort(key = lambda region: (region[0], int(region[1])))
"""
Example:
relocref 36601 START Breakpoint_finder
relocref 36801 END Breakpoint_finder
relocref 67968 START REAPR
relocref 68054 START REAPR
relocref 69866 END REAPR
relocref 69867 START REAPR
relocref 71833 END REAPR
relocref 73001 START Breakpoint_finder
relocref 73201 END Breakpoint_finder
"""
# Store all the signatures, starting, and ending points within a given window.
#start_points = deque([])
#end_points = deque([])
#signatures = deque([])
signatures = []
curr_contig = None
count = 0
suspicious_regions = []
for index in xrange(0, len(misassemblies)):
curr_contig = misassemblies[index][0]
count = 0
second_index = index + 1
signatures = [misassemblies[index][2]]
start_point = int(misassemblies[index][3])
end_point = int(misassemblies[index][4]) + sliding_window
# While we are on the same contig, and still in the sliding window...
while second_index < len(misassemblies) and \
misassemblies[second_index][0] == curr_contig and \
int(misassemblies[second_index][3]) < (int(misassemblies[index][4]) + sliding_window):
if misassemblies[second_index][2] not in signatures:
signatures.append(misassemblies[second_index][2])
count += 1
if int(misassemblies[second_index][4]) > end_point:
end_point = int(misassemblies[second_index][4])
second_index += 1
if len(signatures) >= min_cutoff:
suspicious_regions.append([misassemblies[index][0], '.', 'SUSPICIOUS', str(start_point), str(end_point), '.', '.', '.', 'color=#181009;' + ','.join(signatures)])
# Hack to correct for overlapping suspicious regions.
compressed_suspicious_regions = []
prev_region = None
for region in suspicious_regions:
if prev_region is None:
prev_region = region
else:
if prev_region[0] == region[0] and int(prev_region[4]) >= int(region[3]):
prev_region[4] = region[4]
else:
compressed_suspicious_regions.append(prev_region)
prev_region = region
if prev_region:
compressed_suspicious_regions.append(prev_region)
return compressed_suspicious_regions
def find_suspicious_regions(misassemblies, min_cutoff = 2):
"""
Given a list of miassemblies in gff format
"""
regions =[]
for misassembly in misassemblies:
regions.append([misassembly[0], misassembly[3], 'START', misassembly[2]])
regions.append([misassembly[0], misassembly[4], 'END', misassembly[2]])
regions.sort(key = lambda region: (region[0], int(region[1])))
"""
Example:
relocref 36601 START Breakpoint_finder
relocref 36801 END Breakpoint_finder
relocref 67968 START REAPR
relocref 68054 START REAPR
relocref 69866 END REAPR
relocref 69867 START REAPR
relocref 71833 END REAPR
relocref 73001 START Breakpoint_finder
relocref 73201 END Breakpoint_finder
"""
curr_contig = None
curr_index = 0
curr_length = -1
start_indexes = []
start_region = 0
end_index = 0
signatures = []
recording = False
signature_starts = defaultdict(list)
curr_coverage = 0
suspicious_regions = []
for region in regions:
if curr_contig is None:
curr_contig = region[0]
recording = False
signature_starts = defaultdict(list)
# We have found a new contig, process the previous contig results.
if region[0] != curr_contig:
curr_contig = region[0]
recording = False
if region[2] == 'START':
curr_coverage += 1
if region[3] not in signatures: signatures.append(region[3])
signature_starts[region[3]].append(region[1])
# Record start point.
if curr_coverage == min_cutoff:
start_region = region[1]
recording == True
start_indexes.append(region[1])
else:
curr_coverage -= 1
end_index = region[1]
if region[3] in signatures: signatures.remove(region[3])
# If we were recording, and min signatures drop belows threshold,
# then we need to output our results
if curr_coverage < min_cutoff and recording:
min_start = None
suspicious_regions.append([region[0], '.', 'SUSPICIOUS', str(start_region), str(end_index), '.', '.', '.', 'color=#181009;' + ','.join(signatures)])
signatures = []
recording = False
if curr_coverage >= min_cutoff:
recording = True
# Hack to correct for overlapping suspicious regions.
compressed_suspicious_regions = []
prev_region = None
for region in suspicious_regions:
if prev_region is None:
prev_region = region
else:
if prev_region[0] == region[0] and int(prev_region[4]) >= int(region[3]):
prev_region[4] = region[4]
else:
compressed_suspicious_regions.append(prev_region)
prev_region = region
if prev_region:
compressed_suspicious_regions.append(prev_region)
return compressed_suspicious_regions
def generate_summary_table(table_filename, all_contig_lengths, filtered_contig_lengths, contig_abundances, misassemblies, orf=False):
"""
Output the misassemblies in a table format:
contig_name contig_length low_cov low_cov_bps high_cov high_cov_bps ...
CONTIG1 12000 1 100 0 0 ...
CONTIG2 100 NA NA NA ...
"""
table_file = open(table_filename, 'w')
if orf:
table_file.write("contig_name\tcontig_length\tabundance\torf_low_cov\torf_low_cov_bps\torf_high_cov\torf_high_cov_bps\torf_reapr\torf_reapr_bps\torf_breakpoints\torf_breakpoints_bps\n")
else:
table_file.write("contig_name\tcontig_length\tabundance\tlow_cov\tlow_cov_bps\thigh_cov\thigh_cov_bps\treapr\treapr_bps\tbreakpoints\tbreakpoints_bps\n")
prev_contig = None
curr_contig = None
# Misassembly signatures
low_coverage = 0
low_coverage_bps = 0
high_coverage = 0
high_coverage_bps = 0
reapr = 0
reapr_bps = 0
breakpoints = 0
breakpoints_bps = 0
processed_contigs = set()
for misassembly in misassemblies:
"""
contig00001 REAPR Read_orientation 88920 97033 . . . Note=Warning: Bad read orientation;colour=1
contig00001 REAPR FCD 89074 90927 0.546142 . . Note=Error: FCD failure;colour=17
contig00001 DEPTH_COV low_coverage 90818 95238 29.500000 . . low=30.000000;high=70.000000;color=#7800ef
"""
curr_contig = misassembly[0]
if prev_contig is None:
prev_contig = curr_contig
if curr_contig != prev_contig:
# Output previous contig stats.
table_file.write(prev_contig + '\t' + str(filtered_contig_lengths[prev_contig]) + '\t' + str(contig_abundances[prev_contig]) + '\t' + \
str(low_coverage) + '\t' + str(low_coverage_bps) + '\t' + str(high_coverage) + '\t' + \
str(high_coverage_bps) + '\t' + str(reapr) + '\t' + str(reapr_bps) + '\t' + str(breakpoints) + '\t' + \
str(breakpoints_bps) + '\n')
processed_contigs.add(prev_contig)
# Reset misassembly signature counts.
low_coverage = 0
low_coverage_bps = 0
high_coverage = 0
high_coverage_bps = 0
reapr = 0
reapr_bps = 0
breakpoints = 0
breakpoints_bps = 0
prev_contig = curr_contig
# Process the current contig misassembly.
if misassembly[1] == 'REAPR':
if 'Warning' not in misassembly[8]:
reapr += 1
reapr_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
elif misassembly[1] == 'DEPTH_COV':
if misassembly[2] == 'Low_coverage':
low_coverage += 1
low_coverage_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
else:
high_coverage += 1
high_coverage_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
elif misassembly[1] == 'Breakpoint_finder':
breakpoints += 1
breakpoints_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
else:
print("Unhandled error: " + misassembly[1])
if prev_contig:
# Output previous contig stats.
table_file.write(prev_contig + '\t' + str(filtered_contig_lengths[prev_contig]) + '\t' + str(contig_abundances[prev_contig]) + '\t' + \
str(low_coverage) + '\t' + str(low_coverage_bps) + '\t' + str(high_coverage) + '\t' + \
str(high_coverage_bps) + '\t' + str(reapr) + '\t' + str(reapr_bps) + '\t' + str(breakpoints) + '\t' + \
str(breakpoints_bps) + '\n')
processed_contigs.add(prev_contig)
# We need to add the remaining, error-free contigs.
for contig in filtered_contig_lengths:
if contig not in processed_contigs:
table_file.write(contig + '\t' + str(filtered_contig_lengths[contig]) + '\t' + str(contig_abundances[contig]) + '\t' + \
'0\t0\t0\t0\t0\t0\t0\t0\n')
processed_contigs.add(contig)
# Finally, add the contigs that were filtered out prior to evaluation.
for contig in all_contig_lengths:
if contig not in processed_contigs:
table_file.write(contig + '\t' + str(all_contig_lengths[contig]) + '\t' + 'NA\t' + \
'NA\tNA\tNA\tNA\tNA\tNA\tNA\tNA\n')
processed_contigs.add(contig)
def calculate_contig_coverage(options, pileup_file):
"""
Calculate contig coverage. The coverage of a contig is the mean per-bp coverage.
"""
coverage_filename = options.output_dir + '/coverage/temp.cvg'
coverage_file = open(coverage_filename, 'w')
prev_contig = None
curr_contig = None
length = 0
curr_coverage = 0
for record in open(pileup_file, 'r'):
fields = record.strip().split()
if prev_contig != fields[0]:
if prev_contig:
coverage_file.write(prev_contig + '\t' + str(float(curr_coverage) / length) + '\n')
prev_contig = fields[0]
length = 0
curr_coverage = 0
curr_coverage += int(fields[3])
length += 1
if prev_contig:
coverage_file.write(prev_contig + '\t' + str(float(curr_coverage) / length) + '\n')
coverage_file.close()
return coverage_filename
def build_bowtie2_index(index_name, reads_file):
"""
Build a Bowtie2 index.
"""
command = os.path.join(base_path, "bin/bowtie2-2.2.2/bowtie2-build ") + os.path.abspath(reads_file) + " " + os.path.abspath(index_name)
# Bad workaround.
out_cmd(FNULL.name, FNULL.name, [command])
bowtie2_build_proc = subprocess.Popen(command, shell = True, stdout = FNULL, stderr = FNULL)
bowtie_output, err = bowtie2_build_proc.communicate()
bowtie2_build_proc.wait()
return index_name
def run_bowtie2(options = None, output_sam = 'temp.sam'):
"""
Run Bowtie2 with the given options and save the SAM file.
"""
# Using bowtie2.
# Create the bowtie2 index if it wasn't given as input.
#if not assembly_index:
if not os.path.exists(os.path.abspath(options.output_dir) + '/indexes'):
os.makedirs(os.path.abspath(options.output_dir) + '/indexes')
fd, index_path = mkstemp(prefix='temp_',\
dir=(os.path.abspath(options.output_dir) + '/indexes/'))
try:
os.mkdirs(os.path.dirname(index_path))
except:
pass
fasta_file = options.fasta_file
build_bowtie2_index(os.path.abspath(index_path), os.path.abspath(fasta_file))
assembly_index = os.path.abspath(index_path)
unaligned_dir = os.path.abspath(options.output_dir) + '/unaligned_reads/'
ensure_dir(unaligned_dir)
unaligned_file = unaligned_dir + 'unaligned.reads'
#input_sam_file = output_sam_file
read_type = " -f "
if options.fastq_file:
read_type = " -q "
bowtie2_args = ""
bowtie2_unaligned_check_args = ""
if options.first_mates:
bowtie2_args = "-a -x " + assembly_index + " -1 " + options.first_mates\
+ " -2 " + options.second_mates + " -p " + options.threads\
+ " --very-sensitive -a " + " --reorder --"\
+ options.orientation + " -I " + options.min_insert_size\
+ " -X " + options.max_insert_size + " --no-mixed" #+ " --un-conc "\
#+ unaligned_file
bowtie2_unaligned_check_args = "-a -x " + assembly_index + read_type + " -U "\
+ options.first_mates + "," + options.second_mates + " --very-sensitive -a "\
+ " --reorder -p " + options.threads + " --un " + unaligned_file
else:
bowtie2_args = "-a -x " + assembly_index + read_type + " -U "\
+ options.reads_filenames + " --very-sensitive -a "\
+ " --reorder -p " + options.threads + " --un " + unaligned_file
if not options:
sys.stderr.write("[ERROR] No Bowtie2 options specified" + '\n')
return
# Using bowtie 2.
command = os.path.join(base_path, "bin/bowtie2-2.2.2/bowtie2 ") + bowtie2_args + " -S " + output_sam
out_cmd( FNULL.name, FNULL.name,[command])
#call(command.split())
args = shlex.split(command)
bowtie_proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=FNULL)
bowtie_output, err = bowtie_proc.communicate()
if bowtie2_unaligned_check_args != "":
command = os.path.join(base_path, "bin/bowtie2-2.2.2/bowtie2 ") + bowtie2_unaligned_check_args + " -S " + output_sam + "_2.sam"
out_cmd( FNULL.name, FNULL.name, [command])
args = shlex.split(command)
bowtie_proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=FNULL)
bowtie_output, err = bowtie_proc.communicate()
return unaligned_dir
def run_breakpoint_finder(options,unaligned,breakpoint_dir):
'''
attempts to find breakpoints
'''
std_err_file = open(breakpoint_dir + 'splitter_std_err.log', 'w')
call_arr = [os.path.join(base_path,'src/py/breakpoint_splitter.py'),\
'-u', unaligned,\
'-o', breakpoint_dir + 'split_reads/']
out_cmd( "", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
std_err_file.close()
std_err_file = open(breakpoint_dir + 'std_err.log','w')
call_arr = [os.path.join(base_path, 'src/py/breakpoint_finder.py'),\
'-a', options.fasta_file,\
'-r', breakpoint_dir + 'split_reads/',\
'-b', options.breakpoints_bin, '-o', breakpoint_dir,\
'-c', options.coverage_file,\
'-p', options.threads]
out_cmd( "", std_err_file.name,call_arr)
call(call_arr,stderr=std_err_file)
results(breakpoint_dir + 'interesting_bins.gff')
return breakpoint_dir + 'interesting_bins.gff'
def split_sam_by_bin(sam_output_location, contig_to_bin_map, bin_dir_dict):
common_header = ""
output_bin = {}
output_fp = {}
for bin in set(contig_to_bin_map.values()):
output_bin[bin] = ""
bin_dir = bin_dir_dict[bin]
if os.path.exists(bin_dir):
ensure_dir(bin_dir + "sam/")
output_fp[bin] = open(bin_dir + "sam/"\
+ os.path.basename(sam_output_location), 'w')
else:
error("Bin dir did not exist")
error("%s" % (str(bin_dir_dict)))
with open(sam_output_location, 'r') as sam_file:
for line in sam_file:
if line.split()[0] == "@HD" or line.split()[0] == "@PG"\
or line.split()[0] == "@CO" or line.split()[0] == "@RG":
for fp in output_fp.values():
fp.write(line)
elif line.split()[0] == "@SQ":
# TODO: Clean up.
if line.split()[1].split(':')[1] in contig_to_bin_map:
bin = contig_to_bin_map[line.split()[1].split(':')[1]]
output_fp[bin].write(line)
else:
line_split = line.split('\t')
if line_split[2] == '*':
pass
else:
# TODO: Clean up.
if line_split[2] in contig_to_bin_map:
bin = contig_to_bin_map[line_split[2]]
output_fp[bin].write(line)
def increment_coverage_window(options, low, high):
""" Find new low/high boundaries for coverage bins. """
low = high
prev_high = high
high = int(high + high * options.coverage_multiplier)
if high == prev_high:
high = high + 1
#warning("Incremented coverage window to: %d -~- %d" % (low, high))
return low, high
def bin_coverage(options, bin_dir):
contig_to_coverage_map = {}
contig_to_bin_map = {}
bin_to_name_map = {}
with open(options.coverage_file,'r') as coverage_file:
for line in coverage_file:
split_line = line.split()
if float(split_line[1]) >= options.min_coverage:
# Only store contigs who are above minimum avg coverage.
contig_to_coverage_map[split_line[0]] = float(split_line[1])
else:
warning("Not binning contig: %s due to lower than minimum coverage %f"\
% (split_line[0], options.min_coverage))
max_cvg = max(contig_to_coverage_map.values())
high = int(options.min_coverage + options.min_coverage * .1)
if high <= options.min_coverage:
high = high + 1
low = options.min_coverage
curr_bin = 0
bins = []
while len(contig_to_bin_map) < len(contig_to_coverage_map):
slice_dict = {k: v for k,v in contig_to_coverage_map.iteritems() if low<=v and high>v}
for contig in slice_dict.keys():
contig_to_bin_map[contig] = curr_bin
bin_to_name_map[curr_bin] = (low, high)
low, high = increment_coverage_window(options, low, high)
curr_bin += 1
bin_set = set(contig_to_bin_map.values())
fp_dict = {}
bin_dir_dict = {}
open_fp_count = 0
unopened_fp = {}
processed_file_names = {}
for bin in bin_set:
#a_new_bin = bin_dir + "bin" + str(bin) + "/"
a_new_bin = bin_dir + str(bin_to_name_map[bin][0]) + "x-" + str(bin_to_name_map[bin][1]) + "x/"
bin_dir_dict[bin] = a_new_bin
ensure_dir(a_new_bin)
shutil.copy(options.coverage_file, a_new_bin +\
os.path.basename(options.coverage_file))
if open_fp_count < (file_limit/2):
fp_dict[bin] = open(a_new_bin + os.path.basename(options.fasta_file),'w')
open_fp_count += 1
else:
unopened_fp[bin] = a_new_bin + os.path.basename(options.fasta_file)
#fp_dict[bin].close()
#fp_dict[bin] = a_new_bin + os.path.basename(options.fasta_file)
warning("Contig to bin map is: %s" %(str(contig_to_bin_map)))
while True:
with open(options.fasta_file,'r') as assembly:
for contig in contig_reader(assembly):
# TODO: Clean up.
if contig['name'][1:].strip() in contig_to_bin_map:
bin = contig_to_bin_map[contig['name'][1:].strip()]
if bin in fp_dict.keys() and not fp_dict[bin].closed:
with fp_dict[bin] as bin_file:
bin_file.write(contig['name'])
bin_file.writelines(contig['sequence'])
else:
warning("Throwing away contig: %s due to not being in contig_to_bin_map" % (contig['name'][1:].strip()))
temp_key_list = fp_dict.keys()[:]
for bin in temp_key_list:
fp_dict[bin].close()
open_fp_count -= 1
processed_file_names[bin] = fp_dict[bin]
del fp_dict[bin]
if len(unopened_fp.keys()) == 0:
break
temp_key_list = unopened_fp.keys()[:]
for bin in temp_key_list:
if open_fp_count < (file_limit /2 ):
fp_dict[bin] = open(unopened_fp[bin],'w')
del unopened_fp[bin]
opened_fp_count += 1
else:
break
for fp in processed_file_names.values():
name = fp.name
if os.stat(name).st_size <= 10:
warning("Would have removed tree: %s for file: %s" % (os.path.dirname(name), name))
shutil.rmtree(os.path.dirname(name))
return contig_to_bin_map,bin_dir_dict
def contig_reader(fasta_file):
save_line = ""
contig = {}
in_contig = False
for line in fasta_file:
if line[0] == '>' and in_contig:
save_line = line
ret_contig = contig
contig = {}
contig['sequence'] = []
contig['name'] = line.split()[0].strip() + "\n"
yield ret_contig
elif line[0] == '>':
contig['name'] = line.split()[0].strip() + "\n"
contig['sequence'] = []
in_contig = True
else:
contig['sequence'].append(line.strip())
yield contig
def run_lap(options, sam_output_location, reads_trimmed_location):
""" Calculate the LAP using the previously computed SAM file. """
output_probs_dir = options.output_dir + "/lap/"
ensure_dir(output_probs_dir)
output_probs_location = output_probs_dir + "output.prob"
fp = open(output_probs_location, "w")
reads = [options.reads_filenames]
if options.first_mates:
reads = [options.first_mates, options.second_mates]
call_arr = []
if options.first_mates:
call_arr = [os.path.join(base_path, "bin/lap/aligner/calc_prob.py"), "-a", options.fasta_file, "-s", sam_output_location, "-q", "-1", options.first_mates, "-2", options.second_mates, "-n", options.coverage_file, '-o', options.orientation, "-I", options.min_insert_size, "-X", options.max_insert_size, '-p', options.threads]
else:
call_arr = [os.path.join(base_path, "bin/lap/aligner/calc_prob.py"), "-a", options.fasta_file, "-s", sam_output_location, "-q", "-i", ','.join(reads), "-n", options.coverage_file, '-p', options.threads]
out_cmd(fp.name, "", call_arr)
#warning("That command outputs to: ", output_probs_location)
results(output_probs_location)
call(call_arr, stdout=fp)
output_sum_probs_location = output_probs_dir + "output.sum"
call_arr = [os.path.join(base_path, "bin/lap/aligner/sum_prob.py"), "-i", output_probs_location, "-t", "1e-80"]
out_cmd( output_sum_probs_location, "", call_arr)
call(call_arr, stdout=open(output_sum_probs_location,'w'))
results(output_sum_probs_location)
def run_samtools(options, sam_output_location, with_pileup = True, index=False):
""" Takes a sam file and runs samtools to create bam, sorted bam, and mpileup. """
bam_dir = options.output_dir + "/bam/"
ensure_dir(bam_dir)
bam_location = bam_dir + "library.bam"
sorted_bam_location = bam_dir + "sorted_library"
bam_fp = open(bam_location, 'w+')
error_file_location = bam_dir + "error.log"
error_fp = open(error_file_location, 'w+')
#warning("About to run samtools view to create bam")
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "view", "-bS", sam_output_location]
out_cmd(bam_fp.name, error_fp.name, call_arr)
#warning("That command outputs to file: ", bam_location)
call(call_arr, stdout = bam_fp, stderr = error_fp)
#warning("About to attempt to sort bam")
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "sort", bam_location, sorted_bam_location]
out_cmd( "", FNULL.name, call_arr)
call(call_arr, stderr = FNULL)
coverage_file_dir = options.output_dir + "/coverage/"
ensure_dir(coverage_file_dir)
pileup_file = coverage_file_dir + "mpileup_output.out"
p_fp = open(pileup_file, 'w')
if with_pileup:
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "mpileup", "-A", "-f", options.fasta_file, sorted_bam_location + ".bam"]
out_cmd(p_fp.name, FNULL.name, call_arr)
results(pileup_file)
#warning("That command outputs to file: ", pileup_file)
call(call_arr, stdout = p_fp, stderr = FNULL)
if index:
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "index", sorted_bam_location + ".bam"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
return (bam_location, sorted_bam_location, pileup_file)
def run_split_pileup(options, pileup_file):
""" Split the pileup file into a number of chunks. """
call_arr = [os.path.join(base_path, "src/py/split_pileup.py"), "-p", pileup_file, "-c", options.threads]
out_cmd("","",call_arr)
call(call_arr)
def run_abundance_by_kmers(options):
""" Pileup based on k-mer abundances."""
coverage_filename = options.output_dir + '/coverage/temp_kmer.cvg'
coverage_file = open(coverage_filename, 'w')
options.kmer_pileup_file = options.output_dir + "/coverage/kmer_pileup"
options.coverage_file = options.output_dir + '/coverage/temp_kmer.cvg'
# ./src/py/abundance_by_kmers.py -a test/test_kmer_abun.fna -r test/test_kmers_abun_lib.fastq -k 15 -t 4 -e .98 -p tmp_kmer_abun_15_30 -m 30
call_arr = [os.path.join(base_path, "src/py/abundance_by_kmers.py"), \
"-a", options.fasta_file,\
"-r", options.reads_filenames,\
"-k", options.kmer_length,\
"-t", options.threads,\
"-e", ".98",
"-p", options.kmer_pileup_file]
out_cmd("","",call_arr)
call(call_arr, stdout=coverage_file)
return options.kmer_pileup_file
def run_depth_of_coverage(options, pileup_file):
""" Run depth of coverage. """
dp_fp = options.output_dir + "/coverage/errors_cov.gff"
abundance_file = options.coverage_file
#call_arr = ["src/py/depth_of_coverage.py", "-a", abundance_file, "-m", pileup_file, "-w", options.window_size, "-o", dp_fp, "-g", "-e"]
call_arr = [os.path.join(base_path, "src/py/depth_of_coverage.py"), "-m", pileup_file, "-w", options.window_size, "-o", dp_fp, "-g", "-e", "-c", options.threads]
out_cmd("","",call_arr)
call(call_arr)
results(dp_fp)
return dp_fp
def run_reapr(options, sorted_bam_location):
""" Run REAPR. """
reapr_command = os.path.join(base_path, "bin/Reapr_1.0.17/reapr")
#warning("About to run facheck")
call_arr = [reapr_command, "facheck", options.fasta_file ]
out_cmd("","",call_arr)
call(call_arr)
reapr_output_dir = options.output_dir + "/reapr"
reapr_perfect_prefix = options.output_dir + "/r_perfect_prefix"
#warning("About to run reapr pipeline")
call_arr = [reapr_command, "pipeline", options.fasta_file,\
sorted_bam_location + ".bam", reapr_output_dir]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout=FNULL, stderr=FNULL)
call_arr = ["gunzip", reapr_output_dir + "/03.score.errors.gff"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout=FNULL, stderr=FNULL)
if os.path.exists(reapr_output_dir + "/03.score.errors.gff"):
return reapr_output_dir + "/03.score.errors.gff"
else:
return None
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from unittest import TestCase
import mock
from preggy import expect
from thumbor.app import ThumborServiceApp
import thumbor.utils
from thumbor.config import Config
import thumbor.server
from thumbor.server import (
get_as_integer,
get_config,
configure_log,
get_importer,
validate_config,
get_context,
get_application,
run_server,
main,
)
from thumbor.engines import BaseEngine
from tests.fixtures.custom_error_handler import ErrorHandler as CustomErrorHandler
class ServerTestCase(TestCase):
def test_can_get_value_as_integer(self):
expect(get_as_integer("1")).to_equal(1)
expect(get_as_integer("a")).to_be_null()
expect(get_as_integer("")).to_be_null()
expect(get_as_integer(None)).to_be_null()
def test_can_get_config_from_path(self):
config = get_config('./tests/fixtures/thumbor_config_server_test.conf')
expect(config).not_to_be_null()
expect(config.ALLOWED_SOURCES).to_be_like(['mydomain.com'])
@mock.patch('logging.basicConfig')
def test_can_configure_log_from_config(self, basic_config_mock):
conf = Config()
configure_log(conf, 'DEBUG')
params = dict(
datefmt='%Y-%m-%d %H:%M:%S',
level=10,
format='%(asctime)s %(name)s:%(levelname)s %(message)s'
)
basic_config_mock.assert_called_with(**params)
@mock.patch('logging.config.dictConfig')
def test_can_configure_log_from_dict_config(self, dict_config_mock):
conf = Config(
THUMBOR_LOG_CONFIG={
"level": "INFO"
}
)
configure_log(conf, 'DEBUG')
params = dict(
level="INFO",
)
dict_config_mock.assert_called_with(params)
def test_can_import_default_modules(self):
conf = Config()
importer = get_importer(conf)
expect(importer).not_to_be_null()
expect(importer.filters).not_to_be_empty()
def test_can_import_with_custom_error_handler_class(self):
conf = Config(
USE_CUSTOM_ERROR_HANDLING=True,
ERROR_HANDLER_MODULE='tests.fixtures.custom_error_handler',
)
importer = get_importer(conf)
expect(importer).not_to_be_null()
expect(importer.error_handler_class).not_to_be_null()
expect(importer.error_handler_class).to_be_instance_of(CustomErrorHandler)
def test_validate_config_security_key(self):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY=None)
with expect.error_to_happen(
RuntimeError,
message="No security key was found for this instance of thumbor. "
"Please provide one using the conf file or a security key file."):
validate_config(conf, server_parameters)
def test_validate_config_security_key_from_config(self):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY='something')
validate_config(conf, server_parameters)
expect(server_parameters.security_key).to_equal('something')
@mock.patch.object(thumbor.server, 'which')
def test_validate_gifsicle_path(self, which_mock):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY='test', USE_GIFSICLE_ENGINE=True)
which_mock.return_value = '/usr/bin/gifsicle'
validate_config(conf, server_parameters)
expect(server_parameters.gifsicle_path).to_equal('/usr/bin/gifsicle')
@mock.patch.object(thumbor.server, 'which')
def test_validate_null_gifsicle_path(self, which_mock):
server_parameters = mock.Mock(security_key=None)
conf = Config(SECURITY_KEY='test', USE_GIFSICLE_ENGINE=True)
which_mock.return_value = None
with expect.error_to_happen(
RuntimeError,
message="If using USE_GIFSICLE_ENGINE configuration to True, "
"the `gifsicle` binary must be in the PATH and must be an executable."
):
validate_config(conf, server_parameters)
def test_get_context(self):
server_parameters = mock.Mock(security_key=None, app_class='thumbor.app.ThumborServiceApp')
conf = Config(SECURITY_KEY='test')
importer = get_importer(conf)
context = get_context(server_parameters, conf, importer)
expect(context).not_to_be_null()
def test_get_application(self):
server_parameters = mock.Mock(security_key=None, app_class='thumbor.app.ThumborServiceApp')
conf = Config(SECURITY_KEY='test')
importer = get_importer(conf)
context = get_context(server_parameters, conf, importer)
app = get_application(context)
expect(app).not_to_be_null()
expect(app).to_be_instance_of(ThumborServiceApp)
@mock.patch.object(thumbor.server, 'HTTPServer')
def test_can_run_server_with_default_params(self, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd=None, port=1234, ip='0.0.0.0')
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
run_server(application, context)
server_instance_mock.bind.assert_called_with(1234, '0.0.0.0')
server_instance_mock.start.assert_called_with(1)
@mock.patch.object(thumbor.server, 'HTTPServer')
@mock.patch.object(thumbor.server, 'socket')
def test_can_run_server_with_fd(self, socket_mock, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd=11, port=1234, ip='0.0.0.0')
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
socket_mock.fromfd.return_value = "socket mock"
run_server(application, context)
server_instance_mock.add_socket.assert_called_with("socket mock")
server_instance_mock.start.assert_called_with(1)
@mock.patch.object(thumbor.server, 'HTTPServer')
@mock.patch('__builtin__.open', create=True)
@mock.patch.object(thumbor.server, 'socket')
def test_can_run_server_with_null_fd(self, socket_mock, open_mock, server_mock):
application = mock.Mock()
context = mock.Mock()
context.server = mock.Mock(fd="/path/bin", port=1234, ip='0.0.0.0')
server_instance_mock = mock.Mock()
server_mock.return_value = server_instance_mock
socket_mock.fromfd.return_value = "socket mock"
socket_mock.fileno.return_value = 12
open_mock.return_value = socket_mock
run_server(application, context)
server_instance_mock.add_socket.assert_called_with("socket mock")
server_instance_mock.start.assert_called_with(1)
@mock.patch.object(thumbor.server, 'HTTPServer')
@mock.patch.object(thumbor.server, 'get_server_parameters')
@mock.patch('tornado.ioloop.IOLoop.instance', create=True)
def test_can_run_main(self, ioloop_mock, get_server_parameters_mock, server_mock):
server_parameters = mock.Mock(
config_path='./tests/fixtures/thumbor_config_server_test.conf',
log_level='DEBUG',
security_key='sec',
app_class='thumbor.app.ThumborServiceApp',
fd=None,
ip='0.0.0.0',
port=1234,
)
get_server_parameters_mock.return_value = server_parameters
ioloop_instance_mock = mock.Mock()
ioloop_mock.return_value = ioloop_instance_mock
main()
ioloop_instance_mock.start.assert_any_call()
def cleanup(self):
ServerTestCase.cleanup_called = True
@mock.patch.object(thumbor.server, 'HTTPServer')
@mock.patch.object(thumbor.server, 'get_server_parameters')
@mock.patch('tornado.ioloop.IOLoop.instance', create=True)
@mock.patch('sys.stdout')
def test_main_exits_on_keyboard_interrupt(self, stdout_mock, ioloop_mock, get_server_parameters_mock, server_mock):
server_parameters = mock.Mock(
config_path='./tests/fixtures/thumbor_config_server_test.conf',
log_level='DEBUG',
security_key='sec',
app_class='thumbor.app.ThumborServiceApp',
fd=None,
ip='0.0.0.0',
port=1234,
)
get_server_parameters_mock.return_value = server_parameters
old_cleanup = BaseEngine.cleanup
BaseEngine.cleanup = self.cleanup
ServerTestCase.cleanup_called = False
ioloop_instance_mock = mock.Mock()
ioloop_mock.return_value = ioloop_instance_mock
ioloop_instance_mock.start.side_effect = KeyboardInterrupt()
main()
stdout_mock.write.assert_called_with('-- thumbor closed by user interruption --\n')
self.assertTrue(ServerTestCase.cleanup_called)
BaseEngine.cleanup = old_cleanup
|
|
import os, signal, subprocess, sys
import StringIO
import ShUtil
import Test
import Util
import platform
import tempfile
import re
class InternalShellError(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
kIsWindows = platform.system() == 'Windows'
# Don't use close_fds on Windows.
kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
kAvoidDevNull = kIsWindows
# Negate if win32file is not found.
kHaveWin32File = kIsWindows
def RemoveForce(f):
try:
os.remove(f)
except OSError:
pass
def WinWaitReleased(f):
global kHaveWin32File
if not kHaveWin32File:
return
try:
import time
import win32file, pywintypes
retry_cnt = 256
while True:
try:
h = win32file.CreateFile(
f,
win32file.GENERIC_READ,
0, # Exclusive
None,
win32file.OPEN_EXISTING,
win32file.FILE_ATTRIBUTE_NORMAL,
None)
h.close()
return
except WindowsError, (winerror, strerror):
retry_cnt = retry_cnt - 1
if retry_cnt <= 0:
raise
elif winerror == 32: # ERROR_SHARING_VIOLATION
pass
else:
raise
except pywintypes.error, e:
retry_cnt = retry_cnt - 1
if retry_cnt <= 0:
raise
elif e[0]== 32: # ERROR_SHARING_VIOLATION
pass
else:
raise
time.sleep(0.01)
except ImportError, e:
kHaveWin32File = False
return
def executeCommand(command, cwd=None, env=None):
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
out,err = p.communicate()
exitCode = p.wait()
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def executeShCmd(cmd, cfg, cwd, results):
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
return executeShCmd(cmd.rhs, cfg, cwd, results)
if cmd.op == '&':
raise NotImplementedError,"unsupported test command: '&'"
if cmd.op == '||':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res != 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
if cmd.op == '&&':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res is None:
return res
if res == 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
raise ValueError,'Unknown shell command: %r' % cmd.op
assert isinstance(cmd, ShUtil.Pipeline)
procs = []
input = subprocess.PIPE
stderrTempFiles = []
opened_files = []
named_temp_files = []
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
for i,j in enumerate(cmd.commands):
# Apply the redirections, we use (N,) as a sentinal to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
# from a file are represented with a list [file, mode, file-object]
# where file-object is initially None.
redirects = [(0,), (1,), (2,)]
for r in j.redirects:
if r[0] == ('>',2):
redirects[2] = [r[1], 'w', None]
elif r[0] == ('>>',2):
redirects[2] = [r[1], 'a', None]
elif r[0] == ('>&',2) and r[1] in '012':
redirects[2] = redirects[int(r[1])]
elif r[0] == ('>&',) or r[0] == ('&>',):
redirects[1] = redirects[2] = [r[1], 'w', None]
elif r[0] == ('>',):
redirects[1] = [r[1], 'w', None]
elif r[0] == ('>>',):
redirects[1] = [r[1], 'a', None]
elif r[0] == ('<',):
redirects[0] = [r[1], 'r', None]
else:
raise NotImplementedError,"Unsupported redirect: %r" % (r,)
# Map from the final redirections to something subprocess can handle.
final_redirects = []
for index,r in enumerate(redirects):
if r == (0,):
result = input
elif r == (1,):
if index == 0:
raise NotImplementedError,"Unsupported redirect for stdin"
elif index == 1:
result = subprocess.PIPE
else:
result = subprocess.STDOUT
elif r == (2,):
if index != 2:
raise NotImplementedError,"Unsupported redirect on stdout"
result = subprocess.PIPE
else:
if r[2] is None:
if kAvoidDevNull and r[0] == '/dev/null':
r[0] = None
r[2] = tempfile.TemporaryFile(mode=r[1])
else:
r[2] = open(r[0], r[1])
# Workaround a Win32 and/or subprocess bug when appending.
#
# FIXME: Actually, this is probably an instance of PR6753.
if r[1] == 'a':
r[2].seek(0, 2)
opened_files.append(r)
result = r[2]
final_redirects.append(result)
stdin, stdout, stderr = final_redirects
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
# stderr on a pipe and treat it as stdout.
if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
stderr = subprocess.PIPE
stderrIsStdout = True
else:
stderrIsStdout = False
# Don't allow stderr on a PIPE except for the last
# process, this could deadlock.
#
# FIXME: This is slow, but so is deadlock.
if stderr == subprocess.PIPE and j != cmd.commands[-1]:
stderr = tempfile.TemporaryFile(mode='w+b')
stderrTempFiles.append((i, stderr))
# Resolve the executable path ourselves.
args = list(j.args)
args[0] = Util.which(args[0], cfg.environment['PATH'])
if not args[0]:
raise InternalShellError(j, '%r: command not found' % j.args[0])
# Replace uses of /dev/null with temporary files.
if kAvoidDevNull:
for i,arg in enumerate(args):
if arg == "/dev/null":
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
named_temp_files.append(f.name)
args[i] = f.name
procs.append(subprocess.Popen(args, cwd=cwd,
stdin = stdin,
stdout = stdout,
stderr = stderr,
env = cfg.environment,
close_fds = kUseCloseFDs))
# Immediately close stdin for any process taking stdin from us.
if stdin == subprocess.PIPE:
procs[-1].stdin.close()
procs[-1].stdin = None
# Update the current stdin source.
if stdout == subprocess.PIPE:
input = procs[-1].stdout
elif stderrIsStdout:
input = procs[-1].stderr
else:
input = subprocess.PIPE
# Explicitly close any redirected files. We need to do this now because we
# need to release any handles we may have on the temporary files (important
# on Win32, for example). Since we have already spawned the subprocess, our
# handles have already been transferred so we do not need them anymore.
for f in opened_files:
f[2].close()
# FIXME: There is probably still deadlock potential here. Yawn.
procData = [None] * len(procs)
procData[-1] = procs[-1].communicate()
for i in range(len(procs) - 1):
if procs[i].stdout is not None:
out = procs[i].stdout.read()
else:
out = ''
if procs[i].stderr is not None:
err = procs[i].stderr.read()
else:
err = ''
procData[i] = (out,err)
# Read stderr out of the temp files.
for i,f in stderrTempFiles:
f.seek(0, 0)
procData[i] = (procData[i][0], f.read())
exitCode = None
for i,(out,err) in enumerate(procData):
res = procs[i].wait()
# Detect Ctrl-C in subprocess.
if res == -signal.SIGINT:
raise KeyboardInterrupt
results.append((cmd.commands[i], out, err, res))
if cmd.pipe_err:
# Python treats the exit code as a signed char.
if res < 0:
exitCode = min(exitCode, res)
else:
exitCode = max(exitCode, res)
else:
exitCode = res
# Make sure opened_files is released by other (child) processes.
if kIsWindows:
for f in opened_files:
if f[0] is not None:
WinWaitReleased(f[0])
# Remove any named temporary files we created.
for f in named_temp_files:
RemoveForce(f)
if cmd.negate:
exitCode = not exitCode
return exitCode
def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
ln = ' &&\n'.join(commands)
try:
cmd = ShUtil.ShParser(ln, litConfig.isWindows).parse()
except:
return (Test.FAIL, "shell parser error on: %r" % ln)
results = []
try:
exitCode = executeShCmd(cmd, test.config, cwd, results)
except InternalShellError,e:
out = ''
err = e.message
exitCode = 255
out = err = ''
for i,(cmd, cmd_out,cmd_err,res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err)
return out, err, exitCode
def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd):
import TclUtil
cmds = []
for ln in commands:
# Given the unfortunate way LLVM's test are written, the line gets
# backslash substitution done twice.
ln = TclUtil.TclLexer(ln).lex_unquoted(process_all = True)
try:
tokens = list(TclUtil.TclLexer(ln).lex())
except:
return (Test.FAIL, "Tcl lexer error on: %r" % ln)
# Validate there are no control tokens.
for t in tokens:
if not isinstance(t, str):
return (Test.FAIL,
"Invalid test line: %r containing %r" % (ln, t))
try:
cmds.append(TclUtil.TclExecCommand(tokens).parse_pipeline())
except:
return (Test.FAIL, "Tcl 'exec' parse error on: %r" % ln)
if litConfig.useValgrind:
for pipeline in cmds:
if pipeline.commands:
# Only valgrind the first command in each pipeline, to avoid
# valgrinding things like grep, not, and FileCheck.
cmd = pipeline.commands[0]
cmd.args = litConfig.valgrindArgs + cmd.args
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
# FIXME: This is lame, we shouldn't need bash. See PR5240.
bashPath = litConfig.getBashPath()
if litConfig.useTclAsSh and bashPath:
script = tmpBase + '.script'
# Write script file
f = open(script,'w')
print >>f, 'set -o pipefail'
cmd.toShell(f, pipefail = True)
f.close()
if 0:
print >>sys.stdout, cmd
print >>sys.stdout, open(script).read()
print >>sys.stdout
return '', '', 0
command = [litConfig.getBashPath(), script]
out,err,exitCode = executeCommand(command, cwd=cwd,
env=test.config.environment)
return out,err,exitCode
else:
results = []
try:
exitCode = executeShCmd(cmd, test.config, cwd, results)
except InternalShellError,e:
results.append((e.command, '', e.message + '\n', 255))
exitCode = 255
out = err = ''
for i,(cmd, cmd_out, cmd_err, res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err)
return out, err, exitCode
def executeScript(test, litConfig, tmpBase, commands, cwd):
bashPath = litConfig.getBashPath();
isWin32CMDEXE = (litConfig.isWindows and not bashPath)
script = tmpBase + '.script'
if isWin32CMDEXE:
script += '.bat'
# Write script file
f = open(script,'w')
if isWin32CMDEXE:
f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
f.write(' &&\n'.join(commands))
f.write('\n')
f.close()
if isWin32CMDEXE:
command = ['cmd','/c', script]
else:
if bashPath:
command = [bashPath, script]
else:
command = ['/bin/sh', script]
if litConfig.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on clang with no real loss.
command = litConfig.valgrindArgs + command
return executeCommand(command, cwd=cwd, env=test.config.environment)
def isExpectedFail(xfails, xtargets, target_triple):
# Check if any xfail matches this target.
for item in xfails:
if item == '*' or item in target_triple:
break
else:
return False
# If so, see if it is expected to pass on this target.
#
# FIXME: Rename XTARGET to something that makes sense, like XPASS.
for item in xtargets:
if item == '*' or item in target_triple:
return False
return True
def parseIntegratedTestScript(test, normalize_slashes=False,
extra_substitutions=[]):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'XTARGET'
information. The RUN lines also will have variable substitution performed.
"""
# Get the temporary location, this is always relative to the test suite
# root, not test source root.
#
# FIXME: This should not be here?
sourcepath = test.getSourcePath()
sourcedir = os.path.dirname(sourcepath)
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpDir = os.path.join(execdir, 'Output')
tmpBase = os.path.join(tmpDir, execbase)
if test.index is not None:
tmpBase += '_%d' % test.index
# Normalize slashes, if requested.
if normalize_slashes:
sourcepath = sourcepath.replace('\\', '/')
sourcedir = sourcedir.replace('\\', '/')
tmpDir = tmpDir.replace('\\', '/')
tmpBase = tmpBase.replace('\\', '/')
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = list(extra_substitutions)
substitutions.extend([('%%', '#_MARKER_#')])
substitutions.extend(test.config.substitutions)
substitutions.extend([('%s', sourcepath),
('%S', sourcedir),
('%p', sourcedir),
('%{pathsep}', os.pathsep),
('%t', tmpBase + '.tmp'),
('%T', tmpDir),
# FIXME: Remove this once we kill DejaGNU.
('%abs_tmp', tmpBase + '.tmp'),
('#_MARKER_#', '%')])
# Collect the test lines from the script.
script = []
xfails = []
xtargets = []
requires = []
for ln in open(sourcepath):
if 'RUN:' in ln:
# Isolate the command to run.
index = ln.index('RUN:')
ln = ln[index+4:]
# Trim trailing whitespace.
ln = ln.rstrip()
# Collapse lines with trailing '\\'.
if script and script[-1][-1] == '\\':
script[-1] = script[-1][:-1] + ln
else:
script.append(ln)
elif 'XFAIL:' in ln:
items = ln[ln.index('XFAIL:') + 6:].split(',')
xfails.extend([s.strip() for s in items])
elif 'XTARGET:' in ln:
items = ln[ln.index('XTARGET:') + 8:].split(',')
xtargets.extend([s.strip() for s in items])
elif 'REQUIRES:' in ln:
items = ln[ln.index('REQUIRES:') + 9:].split(',')
requires.extend([s.strip() for s in items])
elif 'END.' in ln:
# Check for END. lines.
if ln[ln.index('END.'):].strip() == 'END.':
break
# Apply substitutions to the script. Allow full regular
# expression syntax. Replace each matching occurrence of regular
# expression pattern a with substitution b in line ln.
def processLine(ln):
# Apply substitutions
for a,b in substitutions:
if kIsWindows:
b = b.replace("\\","\\\\")
ln = re.sub(a, b, ln)
# Strip the trailing newline and any extra whitespace.
return ln.strip()
script = map(processLine, script)
# Verify the script contains a run line.
if not script:
return (Test.UNRESOLVED, "Test has no run line!")
# Check for unterminated run lines.
if script[-1][-1] == '\\':
return (Test.UNRESOLVED, "Test has unterminated run lines (with '\\')")
# Check that we have the required features:
missing_required_features = [f for f in requires
if f not in test.config.available_features]
if missing_required_features:
msg = ', '.join(missing_required_features)
return (Test.UNSUPPORTED,
"Test requires the following features: %s" % msg)
isXFail = isExpectedFail(xfails, xtargets, test.suite.config.target_triple)
return script,isXFail,tmpBase,execdir
def formatTestOutput(status, out, err, exitCode, failDueToStderr, script):
output = StringIO.StringIO()
print >>output, "Script:"
print >>output, "--"
print >>output, '\n'.join(script)
print >>output, "--"
print >>output, "Exit Code: %r" % exitCode,
if failDueToStderr:
print >>output, "(but there was output on stderr)"
else:
print >>output
if out:
print >>output, "Command Output (stdout):"
print >>output, "--"
output.write(out)
print >>output, "--"
if err:
print >>output, "Command Output (stderr):"
print >>output, "--"
output.write(err)
print >>output, "--"
return (status, output.getvalue())
def executeTclTest(test, litConfig):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
# Parse the test script, normalizing slashes in substitutions on Windows
# (since otherwise Tcl style lexing will treat them as escapes).
res = parseIntegratedTestScript(test, normalize_slashes=kIsWindows)
if len(res) == 2:
return res
script, isXFail, tmpBase, execdir = res
if litConfig.noExecute:
return (Test.PASS, '')
# Create the output directory if it does not already exist.
Util.mkdir_p(os.path.dirname(tmpBase))
res = executeTclScriptInternal(test, litConfig, tmpBase, script, execdir)
if len(res) == 2:
return res
# Test for failure. In addition to the exit code, Tcl commands are
# considered to fail if there is any standard error output.
out,err,exitCode = res
if isXFail:
ok = exitCode != 0 or err and not litConfig.ignoreStdErr
if ok:
status = Test.XFAIL
else:
status = Test.XPASS
else:
ok = exitCode == 0 and (not err or litConfig.ignoreStdErr)
if ok:
status = Test.PASS
else:
status = Test.FAIL
if ok:
return (status,'')
# Set a flag for formatTestOutput so it can explain why the test was
# considered to have failed, despite having an exit code of 0.
failDueToStderr = exitCode == 0 and err and not litConfig.ignoreStdErr
return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
def executeShTest(test, litConfig, useExternalSh,
extra_substitutions=[]):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
res = parseIntegratedTestScript(test, useExternalSh, extra_substitutions)
if len(res) == 2:
return res
script, isXFail, tmpBase, execdir = res
if litConfig.noExecute:
return (Test.PASS, '')
# Create the output directory if it does not already exist.
Util.mkdir_p(os.path.dirname(tmpBase))
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
if len(res) == 2:
return res
out,err,exitCode = res
if isXFail:
ok = exitCode != 0
if ok:
status = Test.XFAIL
else:
status = Test.XPASS
else:
ok = exitCode == 0
if ok:
status = Test.PASS
else:
status = Test.FAIL
if ok:
return (status,'')
# Sh tests are not considered to fail just from stderr output.
failDueToStderr = False
return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test simple_chrome_builder module."""
from __future__ import print_function
import os
import mock
from chromite.cros_bisect import simple_chrome_builder
from chromite.cbuildbot import commands
from chromite.lib import commandline
from chromite.lib import cros_logging as logging
from chromite.lib import cros_test_lib
from chromite.lib import gclient
from chromite.lib import git
from chromite.lib import osutils
class TestSimpleChromeBuilder(cros_test_lib.MockTempDirTestCase):
"""Tests AutotestEvaluator class."""
BOARD = 'samus'
DUT_IP = '192.168.1.1'
DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_IP)
def setUp(self):
self.default_chromium_dir = os.path.join(self.tempdir, 'chromium')
self.default_repo_dir = os.path.join(self.tempdir, 'chromium', 'src')
self.default_archive_base = os.path.join(self.tempdir, 'build')
self.gclient_path = os.path.join(self.tempdir, 'gclient')
self.log_output_args = {'log_output': True}
# The SimpleChromeBuilder class sets a 'verbose' setting based on the
# ambient logging level, so we must set the logger to the default setting
# of INFO for this test and then restore the logger to whatever it was
# originally set to when we clean the test up.
logger = logging.getLogger()
self._prev_logging_level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
def tearDown(self):
logger = logging.getLogger()
logger.setLevel(self._prev_logging_level)
def GetBuilder(self, base_dir=None, board=None, reuse_repo=True,
chromium_dir=None, build_dir=None, archive_build=True,
reuse_build=True):
"""Obtains a SimpleChromeBuilder instance.
Args:
base_dir: Base directory. Default self.tempdir.
board: Board name. Default self.BOARD.
reuse_repo: True to reuse repo.
chromium_dir: Optional. If specified, use the chromium repo the path
points to.
build_dir: Optional. Store build result to it if specified.
archive_build: True to archive build.
reuse_build: True to reuse previous build.
Returns:
A SimpleChromeBuilder instance.
"""
if base_dir is None:
base_dir = self.tempdir
if board is None:
board = self.BOARD
options = cros_test_lib.EasyAttr(
base_dir=base_dir, board=board, reuse_repo=reuse_repo,
chromium_dir=chromium_dir, build_dir=build_dir,
archive_build=archive_build, reuse_build=reuse_build)
builder = simple_chrome_builder.SimpleChromeBuilder(options)
# Override gclient path.
builder.gclient = self.gclient_path
return builder
def testInit(self):
builder = self.GetBuilder()
base_dir = self.tempdir
self.assertEqual(base_dir, builder.base_dir)
self.assertEqual(self.default_chromium_dir, builder.chromium_dir)
self.assertEqual(self.default_repo_dir, builder.repo_dir)
self.assertTrue(builder.reuse_repo)
self.assertTrue(builder.reuse_build)
self.assertTrue(builder.archive_build)
self.assertEqual(self.default_archive_base, builder.archive_base)
self.assertTrue(os.path.isdir(builder.archive_base))
self.assertDictEqual(self.log_output_args, builder.log_output_args)
def testInitMissingRequiredArgs(self):
options = cros_test_lib.EasyAttr()
with self.assertRaises(Exception) as cm:
simple_chrome_builder.SimpleChromeBuilder(options)
exception_message = str(cm.exception)
self.assertIn('Missing command line', exception_message)
self.assertIn('SimpleChromeBuilder', exception_message)
for arg in simple_chrome_builder.SimpleChromeBuilder.REQUIRED_ARGS:
self.assertIn(arg, exception_message)
def testInitCustomizedDir(self):
base_dir = self.tempdir
chromium_dir = os.path.join(base_dir, 'another_chromium')
build_dir = os.path.join(base_dir, 'another_build')
builder = self.GetBuilder(chromium_dir=chromium_dir, build_dir=build_dir)
self.assertEqual(base_dir, builder.base_dir)
self.assertEqual(chromium_dir, builder.chromium_dir)
self.assertEqual(os.path.join(chromium_dir, 'src'), builder.repo_dir)
self.assertTrue(builder.reuse_repo)
self.assertTrue(builder.reuse_build)
self.assertTrue(builder.archive_build)
self.assertEqual(build_dir, builder.archive_base)
self.assertTrue(os.path.isdir(builder.archive_base))
self.assertDictEqual(self.log_output_args, builder.log_output_args)
def testInitFlipFlags(self):
builder = self.GetBuilder(reuse_repo=False, archive_build=False,
reuse_build=False)
base_dir = self.tempdir
self.assertEqual(base_dir, builder.base_dir)
self.assertEqual(self.default_chromium_dir, builder.chromium_dir)
self.assertEqual(self.default_repo_dir, builder.repo_dir)
self.assertFalse(builder.reuse_repo)
self.assertFalse(builder.reuse_build)
self.assertFalse(builder.archive_build)
self.assertEqual(self.default_archive_base, builder.archive_base)
self.assertFalse(os.path.isdir(builder.archive_base))
self.assertDictEqual(self.log_output_args, builder.log_output_args)
def testSetUp(self):
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
command_mock.AddCmdResult(['fetch', '--nohooks', 'chromium'])
write_config_mock = self.PatchObject(gclient, 'WriteConfigFile')
git_mock = self.PatchObject(git, 'RunGit')
gsync_mock = self.PatchObject(gclient, 'Sync')
builder = self.GetBuilder()
builder.SetUp()
write_config_mock.assert_called_with(
self.gclient_path, self.default_chromium_dir, True, None, managed=False)
git_mock.assert_called_with(self.default_repo_dir,
['pull', 'origin', 'master'])
gsync_mock.assert_called_with(
self.gclient_path, self.default_chromium_dir, reset=True, nohooks=True,
verbose=False, run_args=self.log_output_args)
def testSetUpSkip(self):
write_config_mock = self.PatchObject(gclient, 'WriteConfigFile')
git_mock = self.PatchObject(git, 'RunGit')
gsync_mock = self.PatchObject(gclient, 'Sync')
# Make it looks like a git repo.
osutils.SafeMakedirs(os.path.join(self.default_repo_dir, '.git'))
builder = self.GetBuilder()
builder.SetUp()
write_config_mock.assert_not_called()
git_mock.assert_not_called()
gsync_mock.assert_not_called()
def testSetUpExistingRepoException(self):
write_config_mock = self.PatchObject(gclient, 'WriteConfigFile')
git_mock = self.PatchObject(git, 'RunGit')
gsync_mock = self.PatchObject(gclient, 'Sync')
# Make it looks like a git repo.
osutils.SafeMakedirs(os.path.join(self.default_repo_dir, '.git'))
builder = self.GetBuilder(reuse_repo=False)
self.assertRaisesRegex(Exception, 'Chromium repo exists.*',
builder.SetUp)
write_config_mock.assert_not_called()
git_mock.assert_not_called()
gsync_mock.assert_not_called()
def testSyncToHead(self):
git_mock = self.PatchObject(git, 'CleanAndCheckoutUpstream')
builder = self.GetBuilder()
builder.SyncToHead()
git_mock.assert_called_with(self.default_repo_dir)
def testGclientSync(self):
gsync_mock = self.PatchObject(gclient, 'Sync')
builder = self.GetBuilder()
builder.GclientSync()
gsync_mock.assert_called_with(
self.gclient_path, self.default_chromium_dir, reset=False,
nohooks=False, verbose=False, run_args=self.log_output_args)
builder.GclientSync(reset=True, nohooks=True)
gsync_mock.assert_called_with(
self.gclient_path, self.default_chromium_dir, reset=True,
nohooks=True, verbose=False, run_args=self.log_output_args)
def testBuildReuse(self):
commit_label = 'test'
# Let the build already be in archive.
archive_path = os.path.join(
self.default_archive_base, 'out_%s_%s' % (self.BOARD, commit_label),
'Release')
osutils.SafeMakedirs(archive_path)
builder = self.GetBuilder()
build_to_deploy = builder.Build(commit_label)
self.assertEqual(archive_path, build_to_deploy)
def _ChromeSdkRunSideEffect(self, *args, **unused_kwargs):
if args and len(args[0]) == 3:
bash_command = args[0][2]
if 'gn gen' in bash_command:
build_dir = bash_command.split()[2]
osutils.SafeMakedirs(os.path.join(self.default_repo_dir, build_dir))
return mock.DEFAULT
def testBuild(self):
gsync_mock = self.PatchObject(simple_chrome_builder.SimpleChromeBuilder,
'GclientSync')
success_result = cros_test_lib.EasyAttr(returncode=0)
chrome_sdk_run_mock = self.PatchObject(
commands.ChromeSDK, 'Run', side_effect=self._ChromeSdkRunSideEffect,
return_value=success_result)
chrome_sdk_ninja_mock = self.PatchObject(
commands.ChromeSDK, 'Ninja', return_value=success_result)
commit_label = 'test'
archive_path = os.path.join(
self.default_archive_base, 'out_%s_%s' % (self.BOARD, commit_label),
'Release')
self.assertFalse(os.path.isdir(archive_path))
builder = self.GetBuilder()
build_to_deploy = builder.Build(commit_label)
self.assertEqual(archive_path, build_to_deploy)
# Check that build_to_deploy exists after builder.Build()
self.assertTrue(os.path.isdir(archive_path))
gsync_mock.assert_called()
chrome_sdk_run_mock.assert_called_with(
['bash', '-c', 'gn gen out_%s/Release --args="$GN_ARGS"' % self.BOARD],
run_args=self.log_output_args)
chrome_sdk_ninja_mock.assert_called_with(run_args=self.log_output_args)
def testBuildNoArchive(self):
gsync_mock = self.PatchObject(simple_chrome_builder.SimpleChromeBuilder,
'GclientSync')
success_result = cros_test_lib.EasyAttr(returncode=0)
chrome_sdk_run_mock = self.PatchObject(
commands.ChromeSDK, 'Run', side_effect=self._ChromeSdkRunSideEffect,
return_value=success_result)
chrome_sdk_ninja_mock = self.PatchObject(
commands.ChromeSDK, 'Ninja', return_value=success_result)
commit_label = 'test'
archive_path = os.path.join(
self.default_archive_base, 'out_%s_%s' % (self.BOARD, commit_label),
'Release')
self.assertFalse(os.path.isdir(archive_path))
builder = self.GetBuilder(archive_build=False)
build_to_deploy = builder.Build(commit_label)
# No archive. Check that archive_path is not created.
self.assertNotEqual(archive_path, build_to_deploy)
self.assertFalse(os.path.isdir(archive_path))
self.assertEqual(os.path.join('out_%s' % self.BOARD, 'Release'),
build_to_deploy)
self.assertTrue(os.path.isdir(
os.path.join(self.default_repo_dir, build_to_deploy)))
gsync_mock.assert_called()
chrome_sdk_run_mock.assert_called_with(
['bash', '-c', 'gn gen out_%s/Release --args="$GN_ARGS"' % self.BOARD],
run_args=self.log_output_args)
chrome_sdk_ninja_mock.assert_called_with(run_args=self.log_output_args)
def testDeploy(self):
chrome_sdk_run_mock = self.PatchObject(commands.ChromeSDK, 'Run')
build_to_deploy = os.path.join('out_%s' % self.BOARD, 'Release')
commit_label = 'test'
builder = self.GetBuilder()
builder.Deploy(self.DUT, build_to_deploy, commit_label)
chrome_sdk_run_mock.assert_called_with(
['deploy_chrome', '--build-dir', build_to_deploy, '--to', self.DUT_IP,
'--force'],
run_args=self.log_output_args)
def testDeployWithPort(self):
port = '9999'
dut = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(
self.DUT_IP + ':' + port)
chrome_sdk_run_mock = self.PatchObject(commands.ChromeSDK, 'Run')
build_to_deploy = os.path.join('out_%s' % self.BOARD, 'Release')
commit_label = 'test'
builder = self.GetBuilder()
builder.Deploy(dut, build_to_deploy, commit_label)
chrome_sdk_run_mock.assert_called_with(
['deploy_chrome', '--build-dir', build_to_deploy, '--to', self.DUT_IP,
'--force', '--port', port],
run_args=self.log_output_args)
|
|
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Target and sampling related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from utils import box_utils
from utils.object_detection import balanced_positive_negative_sampler
def box_matching(boxes, gt_boxes, gt_classes, gt_attributes):
"""Match boxes to groundtruth boxes.
Given the proposal boxes and the groundtruth boxes, classes and attributes,
perform the groundtruth matching by taking the argmax of the IoU between boxes
and groundtruth boxes.
Args:
boxes: a tensor of shape of [batch_size, N, 4] representing the box
coordiantes to be matched to groundtruth boxes.
gt_boxes: a tensor of shape of [batch_size, MAX_INSTANCES, 4] representing
the groundtruth box coordinates. It is padded with -1s to indicate the
invalid boxes.
gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box
classes. It is padded with -1s to indicate the invalid classes.
gt_attributes: [batch_size, MAX_NUM_INSTANCES, num_attributes] representing
the groundtruth attributes. It is padded with -1s to indicate the invalid
attributes.
Returns:
matched_gt_boxes: a tensor of shape of [batch_size, N, 4], representing
the matched groundtruth box coordinates for each input box. If the box
does not overlap with any groundtruth boxes, the matched boxes of it
will be set to all 0s.
matched_gt_classes: a tensor of shape of [batch_size, N], representing
the matched groundtruth classes for each input box. If the box does not
overlap with any groundtruth boxes, the matched box classes of it will
be set to 0, which corresponds to the background class.
matched_gt_attributes: a tensor of shape of [batch_size, N,
num_attributes], representing the matched groundtruth attributes for each
input box. If the box does not overlap with any groundtruth boxes, the
matched box attributes of it will be set to all 0s.
matched_gt_indices: a tensor of shape of [batch_size, N], representing
the indices of the matched groundtruth boxes in the original gt_boxes
tensor. If the box does not overlap with any groundtruth boxes, the
index of the matched groundtruth will be set to -1.
matched_iou: a tensor of shape of [batch_size, N], representing the IoU
between the box and its matched groundtruth box. The matched IoU is the
maximum IoU of the box and all the groundtruth boxes.
iou: a tensor of shape of [batch_size, N, K], representing the IoU matrix
between boxes and the groundtruth boxes. The IoU between a box and the
invalid groundtruth boxes whose coordinates are [-1, -1, -1, -1] is -1.
"""
# Compute IoU between boxes and gt_boxes.
# iou <- [batch_size, N, K]
iou = box_utils.bbox_overlap(boxes, gt_boxes)
# max_iou <- [batch_size, N]
# 0.0 -> no match to gt, or -1.0 match to no gt
matched_iou = tf.reduce_max(iou, axis=-1)
# background_box_mask <- bool, [batch_size, N]
background_box_mask = tf.less_equal(matched_iou, 0.0)
argmax_iou_indices = tf.argmax(iou, axis=-1, output_type=tf.int32)
argmax_iou_indices_shape = tf.shape(argmax_iou_indices)
batch_indices = (
tf.expand_dims(tf.range(argmax_iou_indices_shape[0]), axis=-1) *
tf.ones([1, argmax_iou_indices_shape[-1]], dtype=tf.int32))
gather_nd_indices = tf.stack([batch_indices, argmax_iou_indices], axis=-1)
matched_gt_boxes = tf.gather_nd(gt_boxes, gather_nd_indices)
matched_gt_boxes = tf.where(
tf.tile(tf.expand_dims(background_box_mask, axis=-1), [1, 1, 4]),
tf.zeros_like(matched_gt_boxes, dtype=tf.float32),
matched_gt_boxes)
matched_gt_classes = tf.gather_nd(gt_classes, gather_nd_indices)
matched_gt_classes = tf.where(
background_box_mask,
tf.zeros_like(matched_gt_classes),
matched_gt_classes)
_, _, num_attributes = gt_attributes.get_shape().as_list()
matched_gt_attributes = tf.gather_nd(gt_attributes, gather_nd_indices)
matched_gt_attributes = tf.where(
tf.tile(
tf.expand_dims(background_box_mask, axis=-1), [1, 1, num_attributes]),
tf.zeros_like(matched_gt_attributes, dtype=tf.float32),
matched_gt_attributes)
matched_gt_indices = tf.where(
background_box_mask,
-tf.ones_like(argmax_iou_indices),
argmax_iou_indices)
return (matched_gt_boxes, matched_gt_classes, matched_gt_attributes,
matched_gt_indices, matched_iou, iou)
def assign_and_sample_proposals(proposed_boxes,
gt_boxes,
gt_classes,
gt_attributes,
num_samples_per_image=512,
mix_gt_boxes=True,
fg_fraction=0.25,
fg_iou_thresh=0.5,
bg_iou_thresh_hi=0.5,
bg_iou_thresh_lo=0.0):
"""Assigns the proposals with groundtruth classes and performs subsmpling.
Given `proposed_boxes`, `gt_boxes`, `gt_classes` and `gt_attributes`, the
function uses the following algorithm to generate the final
`num_samples_per_image` RoIs.
1. Calculates the IoU between each proposal box and each gt_boxes.
2. Assigns each proposed box with a groundtruth class and box by choosing
the largest IoU overlap.
3. Samples `num_samples_per_image` boxes from all proposed boxes, and
returns box_targets, class_targets, and RoIs.
Args:
proposed_boxes: a tensor of shape of [batch_size, N, 4]. N is the number
of proposals before groundtruth assignment. The last dimension is the
box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax]
format.
gt_boxes: a tensor of shape of [batch_size, MAX_NUM_INSTANCES, 4].
The coordinates of gt_boxes are in the pixel coordinates of the scaled
image. This tensor might have padding of values -1 indicating the invalid
box coordinates.
gt_classes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with values of -1 indicating the invalid
classes.
gt_attributes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES,
num_attributes]. This tensor might have paddings with values of -1
indicating the invalid attributes.
num_samples_per_image: an integer represents RoI minibatch size per image.
mix_gt_boxes: a bool indicating whether to mix the groundtruth boxes before
sampling proposals.
fg_fraction: a float represents the target fraction of RoI minibatch that
is labeled foreground (i.e., class > 0).
fg_iou_thresh: a float represents the IoU overlap threshold for an RoI to be
considered foreground (if >= fg_iou_thresh).
bg_iou_thresh_hi: a float represents the IoU overlap threshold for an RoI to
be considered background (class = 0 if overlap in [LO, HI)).
bg_iou_thresh_lo: a float represents the IoU overlap threshold for an RoI to
be considered background (class = 0 if overlap in [LO, HI)).
Returns:
sampled_rois: a tensor of shape of [batch_size, K, 4], representing the
coordinates of the sampled RoIs, where K is the number of the sampled
RoIs, i.e. K = num_samples_per_image.
sampled_gt_boxes: a tensor of shape of [batch_size, K, 4], storing the
box coordinates of the matched groundtruth boxes of the samples RoIs.
sampled_gt_classes: a tensor of shape of [batch_size, K], storing the
classes of the matched groundtruth boxes of the sampled RoIs.
sampled_gt_attributes: a tensor of shape of [batch_size, K,
num_attributes], storing the attributes of the matched groundtruth
attributes of the sampled RoIs.
sampled_gt_indices: a tensor of shape of [batch_size, K], storing the
indices of the sampled groudntruth boxes in the original `gt_boxes`
tensor, i.e. gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i].
"""
with tf.name_scope('sample_proposals'):
if mix_gt_boxes:
boxes = tf.concat([proposed_boxes, gt_boxes], axis=1)
else:
boxes = proposed_boxes
(matched_gt_boxes, matched_gt_classes, matched_gt_attributes,
matched_gt_indices, matched_iou, _) = box_matching(
boxes, gt_boxes, gt_classes, gt_attributes)
positive_match = tf.greater(matched_iou, fg_iou_thresh)
negative_match = tf.logical_and(
tf.greater_equal(matched_iou, bg_iou_thresh_lo),
tf.less(matched_iou, bg_iou_thresh_hi))
ignored_match = tf.less(matched_iou, 0.0)
# re-assign negatively matched boxes to the background class.
matched_gt_classes = tf.where(
negative_match, tf.zeros_like(matched_gt_classes), matched_gt_classes)
matched_gt_indices = tf.where(
negative_match, tf.zeros_like(matched_gt_indices), matched_gt_indices)
sample_candidates = tf.logical_and(
tf.logical_or(positive_match, negative_match),
tf.logical_not(ignored_match))
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
positive_fraction=fg_fraction, is_static=True))
batch_size, _ = sample_candidates.get_shape().as_list()
sampled_indicators = []
for i in range(batch_size):
sampled_indicator = sampler.subsample(
sample_candidates[i], num_samples_per_image, positive_match[i])
sampled_indicators.append(sampled_indicator)
sampled_indicators = tf.stack(sampled_indicators)
_, sampled_indices = tf.nn.top_k(
tf.cast(sampled_indicators, dtype=tf.int32),
k=num_samples_per_image,
sorted=True)
sampled_indices_shape = tf.shape(sampled_indices)
batch_indices = (
tf.expand_dims(tf.range(sampled_indices_shape[0]), axis=-1) *
tf.ones([1, sampled_indices_shape[-1]], dtype=tf.int32))
gather_nd_indices = tf.stack([batch_indices, sampled_indices], axis=-1)
sampled_rois = tf.gather_nd(boxes, gather_nd_indices)
sampled_gt_boxes = tf.gather_nd(matched_gt_boxes, gather_nd_indices)
sampled_gt_classes = tf.gather_nd(
matched_gt_classes, gather_nd_indices)
sampled_gt_attributes = tf.gather_nd(
matched_gt_attributes, gather_nd_indices)
sampled_gt_indices = tf.gather_nd(
matched_gt_indices, gather_nd_indices)
return (sampled_rois, sampled_gt_boxes, sampled_gt_classes,
sampled_gt_attributes, sampled_gt_indices)
class ROISampler(object):
"""Samples RoIs and creates training targets."""
def __init__(self, params):
self._num_samples_per_image = params.num_samples_per_image
self._fg_fraction = params.fg_fraction
self._fg_iou_thresh = params.fg_iou_thresh
self._bg_iou_thresh_hi = params.bg_iou_thresh_hi
self._bg_iou_thresh_lo = params.bg_iou_thresh_lo
self._mix_gt_boxes = params.mix_gt_boxes
def __call__(self, rois, gt_boxes, gt_classes, gt_attributes):
"""Sample and assign RoIs for training.
Args:
rois: a tensor of shape of [batch_size, N, 4]. N is the number
of proposals before groundtruth assignment. The last dimension is the
box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax]
format.
gt_boxes: a tensor of shape of [batch_size, MAX_NUM_INSTANCES, 4].
The coordinates of gt_boxes are in the pixel coordinates of the scaled
image. This tensor might have padding of values -1 indicating the
invalid box coordinates.
gt_classes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with values of -1 indicating the invalid
classes.
gt_attributes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES,
num_attributes]. This tensor might have paddings with values of -1
indicating the invalid attributes.
Returns:
sampled_rois: a tensor of shape of [batch_size, K, 4], representing the
coordinates of the sampled RoIs, where K is the number of the sampled
RoIs, i.e. K = num_samples_per_image.
sampled_gt_boxes: a tensor of shape of [batch_size, K, 4], storing the
box coordinates of the matched groundtruth boxes of the samples RoIs.
sampled_gt_classes: a tensor of shape of [batch_size, K], storing the
classes of the matched groundtruth boxes of the sampled RoIs.
sampled_gt_attributes: a tensor of shape of [batch_size, K,
num_attributes], storing the attributes of the matched groundtruth
attributes of the sampled RoIs.
sampled_gt_indices: a tensor of shape of [batch_size, K], storing the
indices of the sampled groudntruth boxes in the original `gt_boxes`,
i.e. gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i].
"""
(sampled_rois, sampled_gt_boxes, sampled_gt_classes, sampled_gt_attributes,
sampled_gt_indices) = assign_and_sample_proposals(
rois,
gt_boxes,
gt_classes,
gt_attributes,
num_samples_per_image=self._num_samples_per_image,
mix_gt_boxes=self._mix_gt_boxes,
fg_fraction=self._fg_fraction,
fg_iou_thresh=self._fg_iou_thresh,
bg_iou_thresh_hi=self._bg_iou_thresh_hi,
bg_iou_thresh_lo=self._bg_iou_thresh_lo)
return (sampled_rois, sampled_gt_boxes, sampled_gt_classes,
sampled_gt_attributes, sampled_gt_indices)
|
|
# SDT2PrintMarkdown.py
#
# Print SDT2 to markdown
from .SDT2Classes import *
hideDetails = False
tables = False
pageBreakBeforeMCandDevices = False
pageBreakToken = '\n<!--BREAK-->'
# tabulator level
tab = 0
def incTab():
global tab
tab += 1
def decTab():
global tab
if (tab > 0):
tab -= 1
def newLine():
global tab
result = '\n'
for i in range(tab):
result += '\t'
return result
# header level
headerLevel = 1
def incHeaderLevel():
global headerLevel
headerLevel += 1
def decHeaderLevel():
global headerLevel
headerLevel -= 1
def markdownHeader(text):
global headerLevel
result = '\n\n'
for i in range(headerLevel):
result += '#'
result += ' ' + text
return result
#
# Print functions
#
def print2DomainMarkdown(domain, options):
global hideDetails, pageBreakToken, pageBreakBeforeMCandDevices
hideDetails = options['hideDetails']
tables = options['markdowntables']
pageBreakBeforeMCandDevices = options['pageBreakBeforeMCandDevices']
if tables:
print('Tables are not supported for input format "sdt2"')
return ''
result = ''
result += markdownHeader('Domain "' + domain.id + '"')
if (len(domain.includes) > 0):
result += newLine() + '- **Includes**'
for include in domain.includes:
result += printInclude(include)
if (len(domain.modules) > 0):
incHeaderLevel()
result += markdownHeader('ModuleClasses')
for module in domain.modules:
if pageBreakBeforeMCandDevices:
result += newLine() + pageBreakToken
result += newLine() + printModuleClass(module)
decHeaderLevel()
if (len(domain.rootDevices) > 0):
incHeaderLevel()
result += markdownHeader('RootDevices')
for rootDevice in domain.rootDevices:
if pageBreakBeforeMCandDevices:
result += newLine() + pageBreakToken
result += newLine() + printRootDevice(rootDevice)
decHeaderLevel()
return result
def printInclude(include):
incTab()
result = newLine() + '- Parse: ' + include.parse
result += ', Href: ' + include.href
decTab()
return result
#
# RootDevice, Device
#
def printRootDevice(rootDevice):
global hideDetails
incHeaderLevel()
result = markdownHeader('RootDevice "' + rootDevice.id + '"')
if (rootDevice.doc and hideDetails == False):
result += newLine() + printDoc(rootDevice.doc)
if (rootDevice.deviceInfo != None and hideDetails == False):
result += newLine() + printDeviceInfo(rootDevice.deviceInfo)
if (len(rootDevice.modules) > 0):
incHeaderLevel()
result += markdownHeader('Modules')
for module in rootDevice.modules:
result += newLine() + printModule(module)
decHeaderLevel()
if (len(rootDevice.devices) > 0):
incHeaderLevel()
result += markdownHeader('Devices')
for device in rootDevice.devices:
result += printDevice(device)
decHeaderLevel()
decTab()
decHeaderLevel()
return result
def printDevice(device):
global hideDetails
incHeaderLevel()
result = markdownHeader('Device "' + device.id + '"')
if (device.doc):
result += newLine() + printDoc(device.doc)
if (device.deviceInfo != None and hideDetails == False):
result += newLine() + printDeviceInfo(device.deviceInfo)
if (len(device.modules) > 0):
incHeaderLevel()
result += newLine() + markdownHeader('Modules')
for module in device.modules:
result += newLine() + printModule(module)
decHeaderLevel()
decHeaderLevel()
return result
#
# DeviceInfo
#
def printDeviceInfo(deviceInfo):
incHeaderLevel()
result = markdownHeader('DeviceInfo')
if (deviceInfo.name != None):
result += newLine() + '- Name: ' + deviceInfo.name
if (deviceInfo.vendor != None):
result += newLine() + '- Vendor: ' + deviceInfo.vendor
if (deviceInfo.serialNumber != None):
result += newLine() + '- SerialNumber: ' + deviceInfo.serialNumber
if (deviceInfo.vendorURL != None):
result += newLine() + '- VendorURL: ' + deviceInfo.vendorURL
if (deviceInfo.firmwareVersion != None):
result += newLine() + '- FirmwareVersion: ' + deviceInfo.firmwareVersion
decHeaderLevel()
return result
#
# Print Module, ModuleClass
#
def printModule(module):
return printModuleDetails(module)
def printModuleClass(moduleClass):
return printModuleDetails(moduleClass)
def printModuleDetails(module):
global hideDetails
result = '- **' + module.name + '**'
if (hideDetails):
return result;
incTab()
if (module.doc != None):
result += ' ' + newLine() + printDoc(module.doc)
if (module.extends != None):
result += printExtends(module.extends)
if (len(module.actions) > 0):
result += newLine() + '- Actions'
for action in module.actions:
result += printAction(action)
if (len(module.data) > 0):
result += newLine() + '- Data'
for data in module.data:
result += printDataPoint(data)
if (len(module.events) > 0):
result += newLine() + '- Events'
for event in module.events:
result += printEvent(event)
decTab()
return result
def printExtends(extends):
result = newLine() + '- Extends'
incTab()
result += newLine() + '- Domain: **' + extends.domain + '**, Class: **' + extends.clazz + '**'
decTab()
return result
#
# Action, Argument
#
def printAction(action):
incTab()
result = newLine() + '- **' + action.name + '**'
incTab()
if (action.doc != None):
result += ' ' + newLine() + printDoc(action.doc)
if (action.type != None):
result += newLine() + '- Return Type: ' + action.type
if (len(action.arg) > 0):
result += newLine() + '- Arguments'
for argument in action.arg:
result += printArgument(argument)
decTab()
decTab()
return result
def printArgument(action):
incTab()
result = newLine() + '- '
if (action.name != None):
result += '**' + action.name + '**'
if (action.type != None):
result += ' (' + action.type + ')'
decTab()
return result
#
# Event
#
def printEvent(event):
incTab()
result = newLine() + '- **' + event.name + '**'
incTab()
if (event.doc != None):
result += ' ' + newLine() + printDoc(event.doc)
if (len(event.data) > 0):
result += newLine() + '- **Data**'
for dataPoint in event.data:
result += printDataPoint(dataPoint)
decTab()
decTab()
return result
#
# DataPoint
#
def printDataPoint(datapoint):
incTab()
result = newLine() + '- **' + datapoint.name + '**'
if (datapoint.type != None):
result += ' (' + datapoint.type + ')'
incTab()
if (datapoint.doc != None):
result += ' ' + newLine() + printDoc(datapoint.doc)
if (datapoint.writable != None):
result += newLine() + '- Writable: ' + datapoint.writable
if (datapoint.readable != None):
result += newLine() + '- Readable: ' + datapoint.readable
if (datapoint.eventable != None):
result += newLine() + '- Eventable: ' + datapoint.eventable
decTab()
decTab()
return result
#
# Doc
#
def printDoc(doc):
result = doc.content.strip()
return result
|
|
"""CherryPy Application and Tree objects."""
import os
import sys
import cherrypy
from cherrypy._cpcompat import ntou, py3k
from cherrypy import _cpconfig, _cplogging, _cprequest, _cpwsgi, tools
from cherrypy.lib import httputil
class Application(object):
"""A CherryPy Application.
Servers and gateways should not instantiate Request objects directly.
Instead, they should ask an Application object for a request object.
An instance of this class may also be used as a WSGI callable
(WSGI application object) for itself.
"""
root = None
"""The top-most container of page handlers for this app. Handlers should
be arranged in a hierarchy of attributes, matching the expected URI
hierarchy; the default dispatcher then searches this hierarchy for a
matching handler. When using a dispatcher other than the default,
this value may be None."""
config = {}
"""A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict
of {key: value} pairs."""
namespaces = _cpconfig.NamespaceSet()
toolboxes = {'tools': cherrypy.tools}
log = None
"""A LogManager instance. See _cplogging."""
wsgiapp = None
"""A CPWSGIApp instance. See _cpwsgi."""
request_class = _cprequest.Request
response_class = _cprequest.Response
relative_urls = False
def __init__(self, root, script_name="", config=None):
self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root)
self.root = root
self.script_name = script_name
self.wsgiapp = _cpwsgi.CPWSGIApp(self)
self.namespaces = self.namespaces.copy()
self.namespaces["log"] = lambda k, v: setattr(self.log, k, v)
self.namespaces["wsgi"] = self.wsgiapp.namespace_handler
self.config = self.__class__.config.copy()
if config:
self.merge(config)
def __repr__(self):
return "%s.%s(%r, %r)" % (self.__module__, self.__class__.__name__,
self.root, self.script_name)
script_name_doc = """The URI "mount point" for this app. A mount point is that portion of
the URI which is constant for all URIs that are serviced by this
application; it does not include scheme, host, or proxy ("virtual host")
portions of the URI.
For example, if script_name is "/my/cool/app", then the URL
"http://www.example.com/my/cool/app/page1" might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
def _get_script_name(self):
if self._script_name is None:
# None signals that the script name should be pulled from WSGI environ.
return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip("/")
return self._script_name
def _set_script_name(self, value):
if value:
value = value.rstrip("/")
self._script_name = value
script_name = property(fget=_get_script_name, fset=_set_script_name,
doc=script_name_doc)
def merge(self, config):
"""Merge the given config into self.config."""
_cpconfig.merge(self.config, config)
# Handle namespaces specified in config.
self.namespaces(self.config.get("/", {}))
def find_config(self, path, key, default=None):
"""Return the most-specific value for key along path, or default."""
trail = path or "/"
while trail:
nodeconf = self.config.get(trail, {})
if key in nodeconf:
return nodeconf[key]
lastslash = trail.rfind("/")
if lastslash == -1:
break
elif lastslash == 0 and trail != "/":
trail = "/"
else:
trail = trail[:lastslash]
return default
def get_serving(self, local, remote, scheme, sproto):
"""Create and return a Request and Response object."""
req = self.request_class(local, remote, scheme, sproto)
req.app = self
for name, toolbox in self.toolboxes.items():
req.namespaces[name] = toolbox
resp = self.response_class()
cherrypy.serving.load(req, resp)
cherrypy.engine.publish('acquire_thread')
cherrypy.engine.publish('before_request')
return req, resp
def release_serving(self):
"""Release the current serving (request and response)."""
req = cherrypy.serving.request
cherrypy.engine.publish('after_request')
try:
req.close()
except:
cherrypy.log(traceback=True, severity=40)
cherrypy.serving.clear()
def __call__(self, environ, start_response):
return self.wsgiapp(environ, start_response)
class Tree(object):
"""A registry of CherryPy applications, mounted at diverse points.
An instance of this class may also be used as a WSGI callable
(WSGI application object), in which case it dispatches to all
mounted apps.
"""
apps = {}
"""
A dict of the form {script name: application}, where "script name"
is a string declaring the URI mount point (no trailing slash), and
"application" is an instance of cherrypy.Application (or an arbitrary
WSGI callable if you happen to be using a WSGI server)."""
def __init__(self):
self.apps = {}
def mount(self, root, script_name="", config=None):
"""Mount a new app from a root object, script_name, and config.
root
An instance of a "controller class" (a collection of page
handler methods) which represents the root of the application.
This may also be an Application instance, or None if using
a dispatcher other than the default.
script_name
A string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the
URL at which to mount the given root. For example, if root.index()
will handle requests to "http://www.example.com:8080/dept/app1/",
then the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the
root of the URI, it MUST be an empty string (not "/").
config
A file or dict containing application config.
"""
if script_name is None:
raise TypeError(
"The 'script_name' argument may not be None. Application "
"objects may, however, possess a script_name of None (in "
"order to inpect the WSGI environ for SCRIPT_NAME upon each "
"request). You cannot mount such Applications on this Tree; "
"you must pass them to a WSGI server interface directly.")
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
if isinstance(root, Application):
app = root
if script_name != "" and script_name != app.script_name:
raise ValueError("Cannot specify a different script name and "
"pass an Application instance to cherrypy.mount")
script_name = app.script_name
else:
app = Application(root, script_name)
# If mounted at "", add favicon.ico
if (script_name == "" and root is not None
and not hasattr(root, "favicon_ico")):
favicon = os.path.join(os.getcwd(), os.path.dirname(__file__),
"favicon.ico")
root.favicon_ico = tools.staticfile.handler(favicon)
if config:
app.merge(config)
self.apps[script_name] = app
return app
def graft(self, wsgi_callable, script_name=""):
"""Mount a wsgi callable at the given script_name."""
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
self.apps[script_name] = wsgi_callable
def script_name(self, path=None):
"""The script_name of the app at the given path, or None.
If path is None, cherrypy.request is used.
"""
if path is None:
try:
request = cherrypy.serving.request
path = httputil.urljoin(request.script_name,
request.path_info)
except AttributeError:
return None
while True:
if path in self.apps:
return path
if path == "":
return None
# Move one node up the tree and try again.
path = path[:path.rfind("/")]
def __call__(self, environ, start_response):
# If you're calling this, then you're probably setting SCRIPT_NAME
# to '' (some WSGI servers always set SCRIPT_NAME to '').
# Try to look up the app using the full path.
env1x = environ
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
env1x = _cpwsgi.downgrade_wsgi_ux_to_1x(environ)
path = httputil.urljoin(env1x.get('SCRIPT_NAME', ''),
env1x.get('PATH_INFO', ''))
sn = self.script_name(path or "/")
if sn is None:
start_response('404 Not Found', [])
return []
app = self.apps[sn]
# Correct the SCRIPT_NAME and PATH_INFO environ entries.
environ = environ.copy()
if not py3k:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
# Python 2/WSGI u.0: all strings MUST be of type unicode
enc = environ[ntou('wsgi.url_encoding')]
environ[ntou('SCRIPT_NAME')] = sn.decode(enc)
environ[ntou('PATH_INFO')] = path[len(sn.rstrip("/")):].decode(enc)
else:
# Python 2/WSGI 1.x: all strings MUST be of type str
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip("/")):]
else:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
# Python 3/WSGI u.0: all strings MUST be full unicode
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip("/")):]
else:
# Python 3/WSGI 1.x: all strings MUST be ISO-8859-1 str
environ['SCRIPT_NAME'] = sn.encode('utf-8').decode('ISO-8859-1')
environ['PATH_INFO'] = path[len(sn.rstrip("/")):].encode('utf-8').decode('ISO-8859-1')
return app(environ, start_response)
|
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'0', # 0x50
'1', # 0x51
'2', # 0x52
'3', # 0x53
'4', # 0x54
'5', # 0x55
'6', # 0x56
'7', # 0x57
'8', # 0x58
'9', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.common.types import criteria
from google.ads.googleads.v9.common.types import custom_parameter
from google.ads.googleads.v9.enums.types import (
ad_group_criterion_approval_status,
)
from google.ads.googleads.v9.enums.types import ad_group_criterion_status
from google.ads.googleads.v9.enums.types import bidding_source
from google.ads.googleads.v9.enums.types import criterion_system_serving_status
from google.ads.googleads.v9.enums.types import criterion_type
from google.ads.googleads.v9.enums.types import quality_score_bucket
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"AdGroupCriterion",},
)
class AdGroupCriterion(proto.Message):
r"""An ad group criterion.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
resource_name (str):
Immutable. The resource name of the ad group criterion. Ad
group criterion resource names have the form:
``customers/{customer_id}/adGroupCriteria/{ad_group_id}~{criterion_id}``
criterion_id (int):
Output only. The ID of the criterion.
This field is ignored for mutates.
This field is a member of `oneof`_ ``_criterion_id``.
display_name (str):
Output only. The display name of the
criterion.
This field is ignored for mutates.
status (google.ads.googleads.v9.enums.types.AdGroupCriterionStatusEnum.AdGroupCriterionStatus):
The status of the criterion.
This is the status of the ad group criterion
entity, set by the client. Note: UI reports may
incorporate additional information that affects
whether a criterion is eligible to run. In some
cases a criterion that's REMOVED in the API can
still show as enabled in the UI. For example,
campaigns by default show to users of all age
ranges unless excluded. The UI will show each
age range as "enabled", since they're eligible
to see the ads; but AdGroupCriterion.status will
show "removed", since no positive criterion was
added.
quality_info (google.ads.googleads.v9.resources.types.AdGroupCriterion.QualityInfo):
Output only. Information regarding the
quality of the criterion.
ad_group (str):
Immutable. The ad group to which the
criterion belongs.
This field is a member of `oneof`_ ``_ad_group``.
type_ (google.ads.googleads.v9.enums.types.CriterionTypeEnum.CriterionType):
Output only. The type of the criterion.
negative (bool):
Immutable. Whether to target (``false``) or exclude
(``true``) the criterion.
This field is immutable. To switch a criterion from positive
to negative, remove then re-add it.
This field is a member of `oneof`_ ``_negative``.
system_serving_status (google.ads.googleads.v9.enums.types.CriterionSystemServingStatusEnum.CriterionSystemServingStatus):
Output only. Serving status of the criterion.
approval_status (google.ads.googleads.v9.enums.types.AdGroupCriterionApprovalStatusEnum.AdGroupCriterionApprovalStatus):
Output only. Approval status of the
criterion.
disapproval_reasons (Sequence[str]):
Output only. List of disapproval reasons of
the criterion.
The different reasons for disapproving a
criterion can be found here:
https://support.google.com/adspolicy/answer/6008942
This field is read-only.
labels (Sequence[str]):
Output only. The resource names of labels
attached to this ad group criterion.
bid_modifier (float):
The modifier for the bid when the criterion
matches. The modifier must be in the range: 0.1
- 10.0. Most targetable criteria types support
modifiers.
This field is a member of `oneof`_ ``_bid_modifier``.
cpc_bid_micros (int):
The CPC (cost-per-click) bid.
This field is a member of `oneof`_ ``_cpc_bid_micros``.
cpm_bid_micros (int):
The CPM (cost-per-thousand viewable
impressions) bid.
This field is a member of `oneof`_ ``_cpm_bid_micros``.
cpv_bid_micros (int):
The CPV (cost-per-view) bid.
This field is a member of `oneof`_ ``_cpv_bid_micros``.
percent_cpc_bid_micros (int):
The CPC bid amount, expressed as a fraction of the
advertised price for some good or service. The valid range
for the fraction is [0,1) and the value stored here is
1,000,000 \* [fraction].
This field is a member of `oneof`_ ``_percent_cpc_bid_micros``.
effective_cpc_bid_micros (int):
Output only. The effective CPC (cost-per-
lick) bid.
This field is a member of `oneof`_ ``_effective_cpc_bid_micros``.
effective_cpm_bid_micros (int):
Output only. The effective CPM (cost-per-
housand viewable impressions) bid.
This field is a member of `oneof`_ ``_effective_cpm_bid_micros``.
effective_cpv_bid_micros (int):
Output only. The effective CPV (cost-per-
iew) bid.
This field is a member of `oneof`_ ``_effective_cpv_bid_micros``.
effective_percent_cpc_bid_micros (int):
Output only. The effective Percent CPC bid
amount.
This field is a member of `oneof`_ ``_effective_percent_cpc_bid_micros``.
effective_cpc_bid_source (google.ads.googleads.v9.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective CPC bid.
effective_cpm_bid_source (google.ads.googleads.v9.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective CPM bid.
effective_cpv_bid_source (google.ads.googleads.v9.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective CPV bid.
effective_percent_cpc_bid_source (google.ads.googleads.v9.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective Percent
CPC bid.
position_estimates (google.ads.googleads.v9.resources.types.AdGroupCriterion.PositionEstimates):
Output only. Estimates for criterion bids at
various positions.
final_urls (Sequence[str]):
The list of possible final URLs after all
cross-domain redirects for the ad.
final_mobile_urls (Sequence[str]):
The list of possible final mobile URLs after
all cross-domain redirects.
final_url_suffix (str):
URL template for appending params to final
URL.
This field is a member of `oneof`_ ``_final_url_suffix``.
tracking_url_template (str):
The URL template for constructing a tracking
URL.
This field is a member of `oneof`_ ``_tracking_url_template``.
url_custom_parameters (Sequence[google.ads.googleads.v9.common.types.CustomParameter]):
The list of mappings used to substitute custom parameter
tags in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
keyword (google.ads.googleads.v9.common.types.KeywordInfo):
Immutable. Keyword.
This field is a member of `oneof`_ ``criterion``.
placement (google.ads.googleads.v9.common.types.PlacementInfo):
Immutable. Placement.
This field is a member of `oneof`_ ``criterion``.
mobile_app_category (google.ads.googleads.v9.common.types.MobileAppCategoryInfo):
Immutable. Mobile app category.
This field is a member of `oneof`_ ``criterion``.
mobile_application (google.ads.googleads.v9.common.types.MobileApplicationInfo):
Immutable. Mobile application.
This field is a member of `oneof`_ ``criterion``.
listing_group (google.ads.googleads.v9.common.types.ListingGroupInfo):
Immutable. Listing group.
This field is a member of `oneof`_ ``criterion``.
age_range (google.ads.googleads.v9.common.types.AgeRangeInfo):
Immutable. Age range.
This field is a member of `oneof`_ ``criterion``.
gender (google.ads.googleads.v9.common.types.GenderInfo):
Immutable. Gender.
This field is a member of `oneof`_ ``criterion``.
income_range (google.ads.googleads.v9.common.types.IncomeRangeInfo):
Immutable. Income range.
This field is a member of `oneof`_ ``criterion``.
parental_status (google.ads.googleads.v9.common.types.ParentalStatusInfo):
Immutable. Parental status.
This field is a member of `oneof`_ ``criterion``.
user_list (google.ads.googleads.v9.common.types.UserListInfo):
Immutable. User List.
This field is a member of `oneof`_ ``criterion``.
youtube_video (google.ads.googleads.v9.common.types.YouTubeVideoInfo):
Immutable. YouTube Video.
This field is a member of `oneof`_ ``criterion``.
youtube_channel (google.ads.googleads.v9.common.types.YouTubeChannelInfo):
Immutable. YouTube Channel.
This field is a member of `oneof`_ ``criterion``.
topic (google.ads.googleads.v9.common.types.TopicInfo):
Immutable. Topic.
This field is a member of `oneof`_ ``criterion``.
user_interest (google.ads.googleads.v9.common.types.UserInterestInfo):
Immutable. User Interest.
This field is a member of `oneof`_ ``criterion``.
webpage (google.ads.googleads.v9.common.types.WebpageInfo):
Immutable. Webpage
This field is a member of `oneof`_ ``criterion``.
app_payment_model (google.ads.googleads.v9.common.types.AppPaymentModelInfo):
Immutable. App Payment Model.
This field is a member of `oneof`_ ``criterion``.
custom_affinity (google.ads.googleads.v9.common.types.CustomAffinityInfo):
Immutable. Custom Affinity.
This field is a member of `oneof`_ ``criterion``.
custom_intent (google.ads.googleads.v9.common.types.CustomIntentInfo):
Immutable. Custom Intent.
This field is a member of `oneof`_ ``criterion``.
custom_audience (google.ads.googleads.v9.common.types.CustomAudienceInfo):
Immutable. Custom Audience.
This field is a member of `oneof`_ ``criterion``.
combined_audience (google.ads.googleads.v9.common.types.CombinedAudienceInfo):
Immutable. Combined Audience.
This field is a member of `oneof`_ ``criterion``.
"""
class QualityInfo(proto.Message):
r"""A container for ad group criterion quality information.
Attributes:
quality_score (int):
Output only. The quality score.
This field may not be populated if Google does
not have enough information to determine a
value.
This field is a member of `oneof`_ ``_quality_score``.
creative_quality_score (google.ads.googleads.v9.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
Output only. The performance of the ad
compared to other advertisers.
post_click_quality_score (google.ads.googleads.v9.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
Output only. The quality score of the landing
page.
search_predicted_ctr (google.ads.googleads.v9.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
Output only. The click-through rate compared
to that of other advertisers.
"""
quality_score = proto.Field(proto.INT32, number=5, optional=True,)
creative_quality_score = proto.Field(
proto.ENUM,
number=2,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
post_click_quality_score = proto.Field(
proto.ENUM,
number=3,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
search_predicted_ctr = proto.Field(
proto.ENUM,
number=4,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
class PositionEstimates(proto.Message):
r"""Estimates for criterion bids at various positions.
Attributes:
first_page_cpc_micros (int):
Output only. The estimate of the CPC bid
required for ad to be shown on first page of
search results.
This field is a member of `oneof`_ ``_first_page_cpc_micros``.
first_position_cpc_micros (int):
Output only. The estimate of the CPC bid
required for ad to be displayed in first
position, at the top of the first page of search
results.
This field is a member of `oneof`_ ``_first_position_cpc_micros``.
top_of_page_cpc_micros (int):
Output only. The estimate of the CPC bid
required for ad to be displayed at the top of
the first page of search results.
This field is a member of `oneof`_ ``_top_of_page_cpc_micros``.
estimated_add_clicks_at_first_position_cpc (int):
Output only. Estimate of how many clicks per week you might
get by changing your keyword bid to the value in
first_position_cpc_micros.
This field is a member of `oneof`_ ``_estimated_add_clicks_at_first_position_cpc``.
estimated_add_cost_at_first_position_cpc (int):
Output only. Estimate of how your cost per week might change
when changing your keyword bid to the value in
first_position_cpc_micros.
This field is a member of `oneof`_ ``_estimated_add_cost_at_first_position_cpc``.
"""
first_page_cpc_micros = proto.Field(
proto.INT64, number=6, optional=True,
)
first_position_cpc_micros = proto.Field(
proto.INT64, number=7, optional=True,
)
top_of_page_cpc_micros = proto.Field(
proto.INT64, number=8, optional=True,
)
estimated_add_clicks_at_first_position_cpc = proto.Field(
proto.INT64, number=9, optional=True,
)
estimated_add_cost_at_first_position_cpc = proto.Field(
proto.INT64, number=10, optional=True,
)
resource_name = proto.Field(proto.STRING, number=1,)
criterion_id = proto.Field(proto.INT64, number=56, optional=True,)
display_name = proto.Field(proto.STRING, number=77,)
status = proto.Field(
proto.ENUM,
number=3,
enum=ad_group_criterion_status.AdGroupCriterionStatusEnum.AdGroupCriterionStatus,
)
quality_info = proto.Field(proto.MESSAGE, number=4, message=QualityInfo,)
ad_group = proto.Field(proto.STRING, number=57, optional=True,)
type_ = proto.Field(
proto.ENUM,
number=25,
enum=criterion_type.CriterionTypeEnum.CriterionType,
)
negative = proto.Field(proto.BOOL, number=58, optional=True,)
system_serving_status = proto.Field(
proto.ENUM,
number=52,
enum=criterion_system_serving_status.CriterionSystemServingStatusEnum.CriterionSystemServingStatus,
)
approval_status = proto.Field(
proto.ENUM,
number=53,
enum=ad_group_criterion_approval_status.AdGroupCriterionApprovalStatusEnum.AdGroupCriterionApprovalStatus,
)
disapproval_reasons = proto.RepeatedField(proto.STRING, number=59,)
labels = proto.RepeatedField(proto.STRING, number=60,)
bid_modifier = proto.Field(proto.DOUBLE, number=61, optional=True,)
cpc_bid_micros = proto.Field(proto.INT64, number=62, optional=True,)
cpm_bid_micros = proto.Field(proto.INT64, number=63, optional=True,)
cpv_bid_micros = proto.Field(proto.INT64, number=64, optional=True,)
percent_cpc_bid_micros = proto.Field(proto.INT64, number=65, optional=True,)
effective_cpc_bid_micros = proto.Field(
proto.INT64, number=66, optional=True,
)
effective_cpm_bid_micros = proto.Field(
proto.INT64, number=67, optional=True,
)
effective_cpv_bid_micros = proto.Field(
proto.INT64, number=68, optional=True,
)
effective_percent_cpc_bid_micros = proto.Field(
proto.INT64, number=69, optional=True,
)
effective_cpc_bid_source = proto.Field(
proto.ENUM,
number=21,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_cpm_bid_source = proto.Field(
proto.ENUM,
number=22,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_cpv_bid_source = proto.Field(
proto.ENUM,
number=23,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_percent_cpc_bid_source = proto.Field(
proto.ENUM,
number=35,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
position_estimates = proto.Field(
proto.MESSAGE, number=10, message=PositionEstimates,
)
final_urls = proto.RepeatedField(proto.STRING, number=70,)
final_mobile_urls = proto.RepeatedField(proto.STRING, number=71,)
final_url_suffix = proto.Field(proto.STRING, number=72, optional=True,)
tracking_url_template = proto.Field(proto.STRING, number=73, optional=True,)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE, number=14, message=custom_parameter.CustomParameter,
)
keyword = proto.Field(
proto.MESSAGE,
number=27,
oneof="criterion",
message=criteria.KeywordInfo,
)
placement = proto.Field(
proto.MESSAGE,
number=28,
oneof="criterion",
message=criteria.PlacementInfo,
)
mobile_app_category = proto.Field(
proto.MESSAGE,
number=29,
oneof="criterion",
message=criteria.MobileAppCategoryInfo,
)
mobile_application = proto.Field(
proto.MESSAGE,
number=30,
oneof="criterion",
message=criteria.MobileApplicationInfo,
)
listing_group = proto.Field(
proto.MESSAGE,
number=32,
oneof="criterion",
message=criteria.ListingGroupInfo,
)
age_range = proto.Field(
proto.MESSAGE,
number=36,
oneof="criterion",
message=criteria.AgeRangeInfo,
)
gender = proto.Field(
proto.MESSAGE,
number=37,
oneof="criterion",
message=criteria.GenderInfo,
)
income_range = proto.Field(
proto.MESSAGE,
number=38,
oneof="criterion",
message=criteria.IncomeRangeInfo,
)
parental_status = proto.Field(
proto.MESSAGE,
number=39,
oneof="criterion",
message=criteria.ParentalStatusInfo,
)
user_list = proto.Field(
proto.MESSAGE,
number=42,
oneof="criterion",
message=criteria.UserListInfo,
)
youtube_video = proto.Field(
proto.MESSAGE,
number=40,
oneof="criterion",
message=criteria.YouTubeVideoInfo,
)
youtube_channel = proto.Field(
proto.MESSAGE,
number=41,
oneof="criterion",
message=criteria.YouTubeChannelInfo,
)
topic = proto.Field(
proto.MESSAGE, number=43, oneof="criterion", message=criteria.TopicInfo,
)
user_interest = proto.Field(
proto.MESSAGE,
number=45,
oneof="criterion",
message=criteria.UserInterestInfo,
)
webpage = proto.Field(
proto.MESSAGE,
number=46,
oneof="criterion",
message=criteria.WebpageInfo,
)
app_payment_model = proto.Field(
proto.MESSAGE,
number=47,
oneof="criterion",
message=criteria.AppPaymentModelInfo,
)
custom_affinity = proto.Field(
proto.MESSAGE,
number=48,
oneof="criterion",
message=criteria.CustomAffinityInfo,
)
custom_intent = proto.Field(
proto.MESSAGE,
number=49,
oneof="criterion",
message=criteria.CustomIntentInfo,
)
custom_audience = proto.Field(
proto.MESSAGE,
number=74,
oneof="criterion",
message=criteria.CustomAudienceInfo,
)
combined_audience = proto.Field(
proto.MESSAGE,
number=75,
oneof="criterion",
message=criteria.CombinedAudienceInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for resource tracker claims."""
import uuid
import mock
from nova.compute import claims
from nova import context
from nova import exception
from nova import objects
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.pci import fakes as pci_fakes
class FakeResourceHandler(object):
test_called = False
usage_is_instance = False
def test_resources(self, usage, limits):
self.test_called = True
self.usage_is_itype = usage.get('name') == 'fakeitype'
return []
class DummyTracker(object):
icalled = False
rcalled = False
def __init__(self):
self.new_pci_tracker()
def abort_instance_claim(self, *args, **kwargs):
self.icalled = True
def drop_move_claim(self, *args, **kwargs):
self.rcalled = True
def new_pci_tracker(self):
ctxt = context.RequestContext('testuser', 'testproject')
self.pci_tracker = pci_manager.PciDevTracker(ctxt)
class ClaimTestCase(test.NoDBTestCase):
def setUp(self):
super(ClaimTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.instance = None
self.resources = self._fake_resources()
self.tracker = DummyTracker()
self.empty_requests = objects.InstancePCIRequests(
requests=[]
)
def _claim(self, limits=None, overhead=None, requests=None, **kwargs):
numa_topology = kwargs.pop('numa_topology', None)
instance = self._fake_instance(**kwargs)
instance.flavor = self._fake_instance_type(**kwargs)
if numa_topology:
db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance.uuid,
'numa_topology': numa_topology._to_json(),
'pci_requests': (requests or self.empty_requests).to_json()
}
else:
db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
requests = requests or self.empty_requests
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value=db_numa_topology)
def get_claim(mock_extra_get):
return claims.Claim(self.context, instance, self.tracker,
self.resources, requests, overhead=overhead,
limits=limits)
return get_claim()
def _fake_instance(self, **kwargs):
instance = {
'uuid': str(uuid.uuid1()),
'memory_mb': 1024,
'root_gb': 10,
'ephemeral_gb': 5,
'vcpus': 1,
'system_metadata': {},
'numa_topology': None
}
instance.update(**kwargs)
return fake_instance.fake_instance_obj(self.context, **instance)
def _fake_instance_type(self, **kwargs):
instance_type = {
'id': 1,
'name': 'fakeitype',
'memory_mb': 1024,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 5
}
instance_type.update(**kwargs)
return objects.Flavor(**instance_type)
def _fake_resources(self, values=None):
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0,
'numa_topology': objects.NUMATopology(
cells=[objects.NUMACell(id=1, cpuset=set([1, 2]), memory=512,
memory_usage=0, cpu_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=512,
memory_usage=0, cpu_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))]
)._to_json()
}
if values:
resources.update(values)
return objects.ComputeNode(**resources)
def test_memory_unlimited(self):
self._claim(memory_mb=99999999)
def test_disk_unlimited_root(self):
self._claim(root_gb=999999)
def test_disk_unlimited_ephemeral(self):
self._claim(ephemeral_gb=999999)
def test_memory_with_overhead(self):
overhead = {'memory_mb': 8}
limits = {'memory_mb': 2048}
self._claim(memory_mb=2040, limits=limits,
overhead=overhead)
def test_memory_with_overhead_insufficient(self):
overhead = {'memory_mb': 9}
limits = {'memory_mb': 2048}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
memory_mb=2040)
def test_memory_oversubscription(self):
self._claim(memory_mb=4096)
def test_disk_with_overhead(self):
overhead = {'memory_mb': 0,
'disk_gb': 1}
limits = {'disk_gb': 100}
claim_obj = self._claim(root_gb=99, ephemeral_gb=0, limits=limits,
overhead=overhead)
self.assertEqual(100, claim_obj.disk_gb)
def test_disk_with_overhead_insufficient(self):
overhead = {'memory_mb': 0,
'disk_gb': 2}
limits = {'disk_gb': 100}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
root_gb=99, ephemeral_gb=0)
def test_disk_with_overhead_insufficient_no_root(self):
overhead = {'memory_mb': 0,
'disk_gb': 2}
limits = {'disk_gb': 1}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
root_gb=0, ephemeral_gb=0)
def test_memory_insufficient(self):
limits = {'memory_mb': 8192}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, memory_mb=16384)
def test_disk_oversubscription(self):
limits = {'disk_gb': 60}
self._claim(root_gb=10, ephemeral_gb=40,
limits=limits)
def test_disk_insufficient(self):
limits = {'disk_gb': 45}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
def test_disk_and_memory_insufficient(self):
limits = {'disk_gb': 45, 'memory_mb': 8192}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"memory.*disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
memory_mb=16384)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=True)
def test_pci_pass(self, mock_pci_supports_requests):
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self._claim(requests=requests)
mock_pci_supports_requests.assert_called_once_with([request])
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=False)
def test_pci_fail(self, mock_pci_supports_requests):
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
'Claim pci failed.',
self._claim, requests=requests)
mock_pci_supports_requests.assert_called_once_with([request])
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests')
def test_pci_pass_no_requests(self, mock_pci_supports_requests):
self._claim()
self.assertFalse(mock_pci_supports_requests.called)
def test_numa_topology_no_limit(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(numa_topology=huge_instance)
def test_numa_topology_fails(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2, 3, 4, 5]), memory=2048)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
limits={'numa_topology': limit_topo},
numa_topology=huge_instance)
def test_numa_topology_passes(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self._claim(limits={'numa_topology': limit_topo},
numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_numa_topology_with_pci(self, mock_get_by_instance):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 1,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
mock_get_by_instance.return_value = requests
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(requests=requests, numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_numa_topology_with_pci_fail(self, mock_get_by_instance):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 1,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
dev_dict2 = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 2,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict, dev_dict2])
request = objects.InstancePCIRequest(count=2,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
mock_get_by_instance.return_value = requests
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
requests=requests,
numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_numa_topology_with_pci_no_numa_info(self, mock_get_by_instance):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': None,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
mock_get_by_instance.return_value = requests
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(requests=requests, numa_topology=huge_instance)
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.icalled)
def _abort(self):
claim = None
try:
with self._claim(memory_mb=4096) as claim:
raise test.TestingException("abort")
except test.TestingException:
pass
return claim
class MoveClaimTestCase(ClaimTestCase):
def _claim(self, limits=None, overhead=None, requests=None,
image_meta=None, **kwargs):
instance_type = self._fake_instance_type(**kwargs)
numa_topology = kwargs.pop('numa_topology', None)
image_meta = image_meta or {}
self.instance = self._fake_instance(**kwargs)
self.instance.numa_topology = None
if numa_topology:
self.db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': self.instance.uuid,
'numa_topology': numa_topology._to_json(),
'pci_requests': (requests or self.empty_requests).to_json()
}
else:
self.db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
requests = requests or self.empty_requests
@mock.patch('nova.virt.hardware.numa_get_constraints',
return_value=numa_topology)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value=self.db_numa_topology)
def get_claim(mock_extra_get, mock_numa_get):
return claims.MoveClaim(self.context, self.instance, instance_type,
image_meta, self.tracker, self.resources,
requests, overhead=overhead,
limits=limits)
return get_claim()
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)
def test_image_meta(self):
claim = self._claim()
self.assertIsInstance(claim.image_meta, objects.ImageMeta)
def test_image_meta_object_passed(self):
image_meta = objects.ImageMeta()
claim = self._claim(image_meta=image_meta)
self.assertIsInstance(claim.image_meta, objects.ImageMeta)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations:
"""PacketCapturesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> "_models.PacketCaptureResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureResult"]:
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_02_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureResult":
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def _get_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureQueryStatusResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
async def begin_get_status(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureQueryStatusResult"]:
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_02_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCaptureListResult"]:
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
|
|
#! /usr/bin/env python
'''
Andrew Till
Fall 2014
PhD Research
This clustering module clusters indicators and uses labels to compute a generalized energy mesh:
Create observations from subset of indicators, viz. indicators[strt:end]
Cluster observations and extract labels
Combine labels from each subset
Merge labels into existing group structure
Create generalized energy mesh using the dual of the input grid
'''
#STDLIB
import os
import time
#TPL
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import spsolve
from sklearn import cluster
from sklearn import neighbors
import matplotlib as mpl
#import matplotlib.ticker as mtick
import matplotlib.pyplot as plt
mpl.rcParams.update({'font.size': 18, 'lines.linewidth': 2})
#MINE
import plotutil as putil
from directories import get_common_directories
def define_defaults():
'''Specify default parameters'''
# Main parameters
verbosity = False
resolution = 9
#workOpt = 'amg'
#workOpt = 'tmg'
#workOpt = 'mg'
workOpt = 'har'
numElements = 64
# Misc parameters
showNumbers = False
condenseSubelements = True
plotOpt = 'half' # none, first, last, firstlast, all, half
energyPenalty = 3.6E-1
dpi = 100
# Specify range of interest (for output fluxes and plotting)
numCoarseGroups = 0
#coarseBdrs = [6E-1, 3E0, 54.7, 2E3, 2.5E4]
coarseBdrs = [3.0, 54.7, 1.06E3]
# How to assign the number of elements per coarse group
apportionOpt = 'equal'
# Specify how many elements to use in each coarse group
listNumElements = []
# Specify which set of materials to use
#materialOpt = '4'
#materialOpt = 'c5g7'
materialOpt = 'manual'
# If materialOpt is 'manual', list of materials to use
materialsList = ['deb']
importancesLst = []
return {'verbosity': verbosity, 'numelements': numElements, 'apportionopt': apportionOpt, 'workopt': workOpt, 'resolution':resolution, 'coarsebdrs': coarseBdrs, 'numcoarsegroups': numCoarseGroups, 'materialopt': materialOpt, 'shownumbers': showNumbers, 'condensesubelements': condenseSubelements, 'plotopt': plotOpt, 'energypenalty': energyPenalty, 'dpi': dpi, 'listnumelements': listNumElements, 'listmaterials': materialsList}
def do_all(inputDict):
'''Driver function to read indicators, compute observations, perform clustering, write energy mesh'''
''' >>>>> NB: See compute_map() for external calls! <<<<<< '''
# Create timeDict to house timing results
timeDict = {}
# Create dataDict to house problem parameters
# Future functions should be assumed to modify dataDict
dataDict = {}
# Initialize dataDict
copy_inputDict_to_dataDict(inputDict, dataDict)
populate_directories(dataDict)
# Determine work options
parse_work_opt(dataDict)
# Read in energy mesh and indicators
indicators = read_energy_and_indicators(dataDict)
# Define observations and rescale energy; determine va
observations = compute_observations(dataDict, indicators)
# Determine coarse group structure and the number of energy elements per coarse group
globalLabels, thermalOffset = apportion_elements(dataDict, observations)
# Determine number of neighbors for the clustering
find_num_neighbors(dataDict, timeDict, observations)
# Loop over each coarse group
offset = thermalOffset
print_timing_header()
for coarseGroup in range(len(dataDict['coarseBdrs'])-1):
# Cluster within each coarse group and plot
offset = cluster_one_coarse_group(
dataDict, timeDict, observations, globalLabels, offset, coarseGroup)
# Create one plot over all energy
plot_summary(dataDict, observations, globalLabels)
# Write out generalized energy mesh
write_mesh(dataDict, globalLabels)
###############################################################################
def copy_inputDict_to_dataDict(inputDict, dataDict):
'''Capitalize entries in dataDict using camelCase'''
# Inputs / outputs
dataDict['verbosity'] = inputDict['verbosity']
dataDict['workOpt'] = inputDict['workopt']
dataDict['useSigt'] = inputDict['sigt']
dataDict['resolution'] = inputDict['resolution']
dataDict['materialOpt'] = inputDict['materialopt']
dataDict['materialsList'] = inputDict['listmaterials']
dataDict['importancesList'] = inputDict['listimportances']
dataDict['numElements'] = inputDict['numelements']
dataDict['numElementsIsTotal'] = inputDict['numelementsistotal']
dataDict['numElementsList'] = inputDict['listnumelements']
dataDict['apportionOpt'] = inputDict['apportionopt']
dataDict['coarseBdrs'] = inputDict['coarsebdrs']
dataDict['numCoarseGroups'] = inputDict['numcoarsegroups']
dataDict['showNumbers'] = inputDict['shownumbers']
dataDict['condenseSubelements'] = inputDict['condensesubelements']
dataDict['plotOpt'] = inputDict['plotopt']
dataDict['energyPenalty'] = inputDict['energypenalty']
dataDict['dpi'] = inputDict['dpi']
def populate_directories(dataDict):
'''Add directory entries to dataDict'''
# Inputs
dirDict = get_common_directories()
# Outputs
dataDict['indicatorsDatDirr'] = dirDict['dat/indicators']
dataDict['energyDatDirr'] = dirDict['dat/energy_groups']
dataDict['plotDirr'] = dirDict['figures/clustering']
def parse_work_opt(dataDict):
'''Populate work option parameters'''
# Inputs
workOpt = dataDict['workOpt']
forceContiguity = False
useEqualMGSpacing = False
useEqualIndexSpacing = False
if workOpt == 'amg':
forceContiguity = True
elif workOpt == 'tmg':
forceContiguity = True
useEqualIndexSpacing = True
elif workOpt == 'mg':
forceContiguity = True
useEqualMGSpacing = True
if not forceContiguity:
clusterName = 'har'
elif useEqualMGSpacing:
clusterName = 'mg'
elif useEqualIndexSpacing:
clusterName = 'tmg'
else:
clusterName = 'amg'
# Outputs
dataDict['forceContiguity'] = forceContiguity
dataDict['useEqualMGSpacing'] = useEqualMGSpacing
dataDict['useEqualIndexSpacing'] = useEqualIndexSpacing
dataDict['clusterName'] = clusterName
def read_energy_and_indicators(dataDict):
'''Read input fine energy mesh and indicators that live on that mesh'''
# Inputs
resolution = dataDict['resolution']
materialOpt = dataDict['materialOpt']
materialsList = dataDict['materialsList']
importancesList = dataDict['importancesList']
useSigt = dataDict['useSigt']
energyDatDirr = dataDict['energyDatDirr']
indicatorsDatDirr = dataDict['indicatorsDatDirr']
# Read in hyperfine MG energy mesh
energyName = 'res-{0}.txt'.format(resolution)
energyPath = os.path.join(energyDatDirr, energyName)
groupBdrs = load_txt(energyPath, skiprows=2, usecols=[1])
numGroups = len(groupBdrs) - 1
dE = - np.diff(groupBdrs)
groupBdrs[groupBdrs == 0] = 1E-5
# Read indicator relative importances
if materialOpt == 'c5g7':
#importanceDict = {'cUO2': 4, 'clowMOX': 2, 'cmedMOX': 2, 'chighMOX': 2, 'cCR': 1}
importanceDict = {'cUO2': 1, 'chighMOX': 1}
elif materialOpt == 'graphite':
importanceDict = {'graphite': 1}
elif materialOpt == 'iron':
importanceDict = {'iron': 1}
elif materialOpt == 'kpin':
importanceDict = {'kFUEL': 1}
elif materialOpt == 'kenrichedpin':
importanceDict = {'kEFUEL': 4, 'kFUEL': 1}
elif materialOpt == 'kcladpin':
importanceDict = {'kEFUEL': 10, 'kFUEL': 2.5, 'kZR': 1}
elif materialOpt == 'kpin2d':
importanceDict = {'kRFUEL': 1}
elif materialOpt == 'kenrichedpin2d':
importanceDict = {'kREFUEL': 4, 'kRFUEL': 1}
elif materialOpt == 'kmoxpin2d':
importanceDict = {'kRMFUEL': 4, 'kRFUEL': 1}
elif materialOpt == 'kmoxenrichedpin2d':
importanceDict = {'kRMFUEL': 4, 'kREFUEL': 4, 'kRFUEL': 1}
elif materialOpt == 'trigafuel':
importanceDict = {'tFUEL': 1}
elif materialOpt == 'ctrigafuel':
importanceDict = {'tcFUEL': 1}
elif materialOpt == 'ctrigafuel_0':
importanceDict = {'tdFUEL_0': 1}
elif materialOpt == 'trigamore':
importanceDict = {'tFUEL': 10, 'tCLAD': 2, 'tZIRC': 2, 'tIRRADIATIONTUBE': 1}
elif materialOpt == 'CASL':
importanceDict = {'CASLfuel_P4_211': 1, 'CASLfuel_P4_262': 1, 'CASLfuel_P5_31': 1}
elif materialOpt == 'manual':
if not importancesList:
importancesList = [1]*len(materialsList)
importanceDict = {material:importance for material,importance in zip(
materialsList, importancesList)}
# Appending to materialNames does not alias onto importanceDict
materialNames = importanceDict.keys()
numMaterials = len(materialNames)
# Use infinite medium flux, infinite medium flux with escape xs, and energy itself
numIndicators = 2 * numMaterials + 1
# Read indicators
indicators = np.zeros((numIndicators, numGroups))
i = 0
for materialName in materialNames:
weight = np.power(10, importanceDict[materialName])
fluxName = 'inf_flux_{0}_{1}.txt'.format(materialName, resolution)
fluxPath = os.path.join(indicatorsDatDirr, fluxName)
indicators[i, :] = weight * load_txt(fluxPath) / dE
if useSigt:
fluxName = 'tot_xs_{0}_{1}.txt'.format(materialName, resolution)
fluxPath = os.path.join(indicatorsDatDirr, fluxName)
indicators[i+1, :] = weight * load_txt(fluxPath)
else:
fluxName = 'inf_flux_{0}_e_{1}.txt'.format(materialName, resolution)
fluxPath = os.path.join(indicatorsDatDirr, fluxName)
indicators[i+1, :] = weight * load_txt(fluxPath) / dE
i += 2
energyGrid = np.sqrt(groupBdrs[1:] * groupBdrs[:-1])
indicators[-1, :] = energyGrid
materialNames += 'E'
# Outputs
dataDict['dE'] = dE
dataDict['groupBdrs'] = groupBdrs
dataDict['energyGrid'] = energyGrid
dataDict['materialNames'] = materialNames
dataDict['numGroups'] = numGroups
dataDict['numIndicators'] = numIndicators
return indicators
def compute_observations(dataDict, indicators):
'''Compute observations from indicators.
Only uses first and last values of coarseBdrs, which are not changed in apportion_elements()'''
# Inputs
energyPenalty = dataDict['energyPenalty']
groupBdrs = dataDict['groupBdrs']
coarseBdrs = dataDict['coarseBdrs']
numIndicators, numGroups = indicators.shape
observations = np.log10(indicators)
medians = np.median(observations, axis=1)
observations -= medians[:, np.newaxis]
#
obsRange = np.max(observations[:-1,:]) - np.min(observations[:-1,:])
strt = np.argmin(np.abs(groupBdrs - coarseBdrs[-1]))
end = np.argmin(np.abs(groupBdrs - coarseBdrs[0]))
energyRange = np.max(observations[-1,strt:end]) - np.min(observations[-1,strt:end])
observations[-1,:] *= np.sqrt(numIndicators) * energyPenalty * (obsRange / energyRange)
# Transpose to be [numGroups, numPoints]. Copy for later speed
observations = observations.transpose().copy()
# Outputs
return observations
def apportion_elements(dataDict, observations):
'''Determine the coarse group boundaries and the number of elements in each coarse group.
The first and last coarse group boundaries determine the extent of the RRR.
coarseBdrs are replaced with an equal lethargy grid if numCoarseGroups is nonzero.
numElementsList has precedence over numElements/apportionOpt if both are given.
numElements refers to the elements in the RRR unless numElementsIsTotal is True.
'''
# Inputs
verbosity = dataDict['verbosity']
coarseBdrs = dataDict['coarseBdrs']
numCoarseGroups = dataDict['numCoarseGroups']
numElementsList = dataDict['numElementsList']
groupBdrs = dataDict['groupBdrs']
apportionOpt = dataDict['apportionOpt']
numElements = dataDict['numElements']
numElementsIsTotal = dataDict['numElementsIsTotal']
numGroups = dataDict['numGroups']
indicatorsDatDirr = dataDict['indicatorsDatDirr']
# Determine the coarse group boundaries
if numCoarseGroups != 0:
# Use an equal lethargy grid with numCoarseGroups groups
coarseBdrs = np.logspace(np.log10(coarseBdrs[0]), np.log10(coarseBdrs[-1]), numCoarseGroups+1)
else:
numCoarseGroups = len(coarseBdrs) - 1
# Determine the total number of elements
if numElementsList:
numElementsIsTotal = False
numElements = np.sum(numElementsList)
numElementsIsRRR = not(numElementsIsTotal)
# Determine the RRR bounding indices on the fine group structure
strt = np.argmin(np.abs(groupBdrs - coarseBdrs[-1]))
end = np.argmin(np.abs(groupBdrs - coarseBdrs[0]))
# Initialize the global labels, which map from subelement index to element index
globalLabels = np.zeros(numGroups, dtype=np.int)
numThermal = numGroups - end
numFast = strt
numFastAndThermal = numThermal + numFast
# On the old group structure,
# [:strt) are fast, [end:) are thermal, and [strt:end) are resonance
# On the new element structure,
# [:newStrt) are fast, [:newEnd) are thermal, and [newStrt:newEnd) are resonance
newStrt = strt
if numElementsIsRRR:
# numElements is just the number of resonance elements
newEnd = newStrt + numElements
else:
# numElements is the total number of elements:
newEnd = end + (numElements - numGroups)
thermalOffset = newEnd
globalLabels[:strt] = np.arange(numFast)
globalLabels[end:] = np.arange(numThermal) + thermalOffset
# Determine the total number of elements
numElementsTot = numElements
if numElementsIsRRR:
numElementsTot += numFastAndThermal
numElementsRRR = numElementsTot - numFastAndThermal
# Determine the number of elements per coarse group and store in numClustersList
if numElementsList:
numClustersList = numElementsList
apportionOpt = 'manual'
elif apportionOpt in ['var', 'max', 'L1', 'birch']:
numClustersList = auto_apportion(observations, numElementsRRR, groupBdrs, coarseBdrs, apportionOpt, verbosity)
else: # apportionOpt == 'equal'
# Apportion as equally as possible. Give high-energy coarse groups the remainder
numElementsPerCoarseGroup = numElementsRRR // numCoarseGroups
remainder = numElementsRRR % numCoarseGroups
numClustersList = numElementsPerCoarseGroup * np.ones(numCoarseGroups, dtype=np.int)
if remainder:
numClustersList[-remainder:] += 1
# Check for validity of the number of clusters list
if np.any(numClustersList <= 0):
minNumElements = numFastAndThermal + len(coarseBdrs) - 1
raise ValueError('{0} clusters specified, but at least {1} should have been used'.format(numElementsTot, minNumElements))
# Output 0
# Print the number of elements per coarse group
print 'final elements per coarse group ({0}):\n'.format(apportionOpt), numClustersList
# Output 1
# Save the number of elements per coarse group
baseName = 'aptn_{0}'.format(apportionOpt)
#filename = '{0}_{1}_{2}.txt'.format(baseName, numElements, resolution)
filename = '{0}_e{1}_g{2}.txt'.format(baseName, numElements, numCoarseGroups)
filePath = os.path.join(indicatorsDatDirr, filename)
# Output
with open(filePath, 'w') as fid:
fid.write('# Energy mesh with {0} coarse groups and {1} resonance elements\n'.format(numCoarseGroups, numElementsRRR))
fid.write('# coarse_group upper_bound(eV) num_elements\n')
for coarseGroup in range(numCoarseGroups):
energy = coarseBdrs[coarseGroup]
numElem = numClustersList[coarseGroup]
# Write the same number of digits as NJOY. This prints from coarseGroup, not groupBdrs.
fid.write('{0:g} {1:.6e} {2}\n'.format(coarseGroup, energy, numElem))
fid.write('{0:g} {1:.6e} {2}\n'.format(-1, coarseBdrs[-1], 0))
# Output 2
dataDict['apportionOpt'] = apportionOpt
dataDict['numCoarseGroups'] = numCoarseGroups
dataDict['coarseBdrs'] = coarseBdrs
dataDict['numElementsIsTotal'] = numElementsIsTotal
dataDict['numElementsList'] = numElementsList
dataDict['numElements'] = numElements
dataDict['numElementsTot'] = numElementsTot
dataDict['numClustersList'] = numClustersList
return globalLabels, thermalOffset
def auto_apportion(observations, numElementsRRR, groupBdrs, coarseBdrs, apportionOpt, verbosity):
'''Assign the number of clusters / elements proportional to the relative variance within a coarse group'''
numCoarseGroups = len(coarseBdrs) - 1
metric = np.zeros(numCoarseGroups)
numFineGroupsPerCoarseGroup = np.zeros(numCoarseGroups, dtype=np.int)
timeBirch = 0.0
if numCoarseGroups == 1:
return np.array([numElementsRRR])
for coarseGroup in range(len(coarseBdrs)-1):
# Only look at the fine groups within the current coarse group
strt = np.argmin(np.abs(groupBdrs - coarseBdrs[coarseGroup+1]))
end = np.argmin(np.abs(groupBdrs - coarseBdrs[coarseGroup]))
obs = observations[strt:end, :]
# Points are groups and dim are materials
numPoints, numDim = obs.shape
numFineGroupsPerCoarseGroup[coarseGroup] = numPoints
if apportionOpt == 'var':
# For each dimension, compute the average over the points
means = np.mean(obs, axis=0)
# The variance metric is the sum of the square errors from the means
# var is actually a standard deviation
var = np.sqrt(np.sum(np.square(obs - means[np.newaxis,:])) / (numPoints * numDim))
metric[coarseGroup] = var
elif apportionOpt == 'max':
# The max error metric is the maximum obs range in over all dimensions,
# where each obs range is the difference between the largest and smallest
# point values for that dim
maxErr = np.max(np.max(obs, axis=0) - np.min(obs, axis=0))
metric[coarseGroup] = maxErr
elif apportionOpt == 'L1':
# The L1 error metric is the unnormalized sum of the absolute pointwise differences,
# where each pointwise difference is a maximum over all dimensions
L1Err = np.sum(np.max(np.abs(obs[:-1,:] - obs[1:,:]), axis=1))
metric[coarseGroup] = L1Err
elif apportionOpt == 'birch':
# The BIRCH error metric uses the number of clusters required to reduce the variance of an
# energy element to threshold. A value of 1.0 for threshold would produce clusters
# that span a factor of 10 in flux / xs space (based on how obs are defined).
# Do not include energy penalty when looking at clusters.
threshold = 0.05
clusterer = cluster.Birch(n_clusters=None, threshold=threshold)
t0 = time.time()
numClusters = len(np.unique(clusterer.fit_predict(obs[:,:-1])))
timeBirch += time.time() - t0
# Converted to float implicitly
metric[coarseGroup] = numClusters
# Normalize the metric
metric /= np.sum(metric)
# Each coarse group needs at least one energy element and the number of energy elements
# per coarse group should be proportional to the normalized metric (relative variance)
desiredElementsPerCoarseGroup = np.minimum(numFineGroupsPerCoarseGroup,
np.maximum(1, metric * numElementsRRR))
sumDesiredElements = np.sum(desiredElementsPerCoarseGroup)
# Compute a new metric that takes into account the maximum(1,:)
newMetric = (desiredElementsPerCoarseGroup - 1) / (sumDesiredElements - numCoarseGroups)
fractions, floored = np.modf(newMetric * (numElementsRRR - numCoarseGroups))
elementsPerCoarseGroup = np.array(floored, dtype=np.int) + 1
# Add elements to the largest coarse groups with the largest fractional number of elements
remainder = numElementsRRR - np.sum(elementsPerCoarseGroup)
if remainder:
locLargestFrac = np.argsort(fractions)[-remainder:]
elementsPerCoarseGroup[locLargestFrac] += 1
if verbosity and apportionOpt == 'birch':
print 'Birch clustering took {0} s'.format(timeBirch)
if verbosity:
print 'desired elements per coarse group:\n', desiredElementsPerCoarseGroup
if remainder:
print 'remainder: {0}; smallest large fractional: {1:.3f}'.format(
remainder, fractions[locLargestFrac[0]])
# Outputs
return elementsPerCoarseGroup
def find_num_neighbors(dataDict, timeDict, observations):
'''Use connected components to determine the number of neighbors'''
# (no inputs from dataDict)
t0 = time.time()
numMinNeighbors = find_minimum_num_neighbors(observations[:,:-1])
numNeighbors = max(15+numMinNeighbors, 200) # Changed from 100
timeInitialNeighbors = time.time() - t0
# Outputs
dataDict['numNeighbors'] = numNeighbors
timeDict['timeInitialNeighbors'] = timeInitialNeighbors
def cluster_one_coarse_group(dataDict, timeDict, observations, globalLabels, offset, coarseGroup):
'''Apply clustering to one coarse group and return the labels of the clustering'''
# Inputs
groupBdrs = dataDict['groupBdrs']
coarseBdrs = dataDict['coarseBdrs']
energyGrid = dataDict['energyGrid']
numClustersList = dataDict['numClustersList']
numNeighbors = dataDict['numNeighbors']
forceContiguity = dataDict['forceContiguity']
useEqualMGSpacing = dataDict['useEqualMGSpacing']
useEqualIndexSpacing = dataDict['useEqualIndexSpacing']
plotOpt = dataDict['plotOpt']
# Slice arrays for current coarse group
strt = np.argmin(np.abs(groupBdrs - coarseBdrs[coarseGroup+1]))
end = np.argmin(np.abs(groupBdrs - coarseBdrs[coarseGroup]))
obs = observations[strt:end,:]
eGrid = energyGrid[strt:end]
gBdr = groupBdrs[strt:end+1]
numGroups, numPoints = obs.shape
numClusters = numClustersList[coarseGroup]
# Create connectivity graph based on number of neighbors
t0 = time.time()
useNeighbors = min(numGroups, numNeighbors)
knnGraph = neighbors.kneighbors_graph(obs[:,:-1], useNeighbors, include_self=True)
timeNeighbors = time.time() - t0
connectivityGraph = knnGraph
# Perform appropriate clustering for the current coarse group
t0 = time.time()
if not forceContiguity:
labels = cluster_using_hierarchical_agglomeration(obs, numClusters, connectivityGraph)
elif useEqualMGSpacing:
labels = cluster_using_equal_energy_spacing(gBdr, numClusters)
elif useEqualIndexSpacing:
labels = cluster_using_equal_topological_spacing(obs, numClusters)
else:
labels = cluster_using_mg_squared_error(obs, numClusters)
timeCluster = time.time() - t0
# Reorder labels for maximal downscattering (make labels descendingly sorted)
temp = np.zeros((numClusters, 1))
labels, temp = reorder_codebook(labels, temp, eGrid)
# Output 0: Set global labels based on local labels and global offset (aliased)
offset -= numClusters
globalLabels[strt:end] = labels + offset
uniqueLabels = np.unique(labels)
# Output 1: Print number of neighbors and required times
timeDict['timeNeighbors'] = timeNeighbors
timeDict['timeCluster'] = timeCluster
print_timing(dataDict, timeDict, numGroups, coarseGroup)
# Output 2: Plot observations colored by label / cluster
if plotOpt not in ['none', 'sum']:
plot_clustering(dataDict, coarseGroup, uniqueLabels, labels, eGrid, obs, numClusters, numPoints, offset)
# Output3:
return offset
###############################################################################
def print_timing_header():
'''Print the header for print_timing()'''
# Output
print 'coarseGroup, numNeighbors, fracNeighbors, numGroups, timeInitialNeighbors, timeNeighbors, timeCluster'
def print_timing(dataDict, timeDict, numGroups, coarseGroup):
'''Print interesting size and time information'''
# Inputs
numNeighbors = dataDict['numNeighbors']
timeInitialNeighbors = timeDict['timeInitialNeighbors']
timeNeighbors = timeDict['timeNeighbors']
timeCluster = timeDict['timeCluster']
# Output
print coarseGroup, numNeighbors, float(numNeighbors) / numGroups, numGroups, timeInitialNeighbors, timeNeighbors, timeCluster
def plot_summary(dataDict, observations, globalLabels):
'''Plot the entire energy range range'''
# Inputs
groupBdrs = dataDict['groupBdrs']
coarseBdrs = dataDict['coarseBdrs']
energyGrid = dataDict['energyGrid']
numElements = dataDict['numElements']
# Slice arrays for the RRR
strt = np.argmin(np.abs(groupBdrs - coarseBdrs[-1]))
end = np.argmin(np.abs(groupBdrs - coarseBdrs[0]))
obs = observations[strt:end,:]
eGrid = energyGrid[strt:end]
gBdr = groupBdrs[strt:end+1]
numGroups, numPoints = obs.shape
labels = globalLabels[strt:end].copy()
offset = np.min(labels)
labels -= offset
uniqueLabels = np.unique(labels)
numClusters = numElements
coarseGroup = 'sum'
# Output
plot_clustering(dataDict, coarseGroup, uniqueLabels, labels, eGrid, obs, numClusters, numPoints, offset)
def plot_clustering(dataDict, coarseGroup, uniqueLabels, labels, eGrid, obs, numClusters, numPoints, offset):
'''Plot observations in coarse group'''
# Inputs
forceContiguity = dataDict['forceContiguity']
clusterName = dataDict['clusterName']
materialNames = dataDict['materialNames']
numElements = dataDict['numElements']
numCoarseGroups = dataDict['numCoarseGroups']
plotOpt = dataDict['plotOpt']
plotDirr = dataDict['plotDirr']
showNumbers = dataDict['showNumbers']
dpi = dataDict['dpi']
if plotOpt != 'none':
colors = putil.get_colors(max(uniqueLabels)-min(uniqueLabels)+1)
#if forceContiguity and not showNumbers:
if True or numClusters < 1000:
colors = colors[np.argsort(np.random.random(colors.shape[0]))]
if plotOpt == 'first':
pointsToPlot = [0]
elif plotOpt == 'last':
pointsToPlot = [numPoints-1]
elif plotOpt == 'firstlast':
pointsToPlot = [0, numPoints-1]
elif plotOpt == 'half':
pointsToPlot = range(0, numPoints-1, 2)
else:
pointsToPlot = range(numPoints)
avgLabels, avgEGrid, avgObs = average_observations(labels, eGrid, obs)
for ip in pointsToPlot:
material = materialNames[ip/2]
if ip % 2 == 1:
material += '_e'
print 'Saving plot for {0}, {1}, {2}'.format(material, numElements, coarseGroup)
plt.figure(3)
plt.clf()
labelColors = [colors[label] for label in labels]
#plt.semilogx(eGrid, obs[:,ip], '-', color=[0.2, 0.2, 0.2], rasterized=True)
plt.scatter(eGrid, obs[:,ip], c=labelColors, edgecolor='none', rasterized=True)
plt.xscale('log')
if showNumbers:
for label in uniqueLabels:
mask = (avgLabels == label)
color = colors[label]
marker = r'${0}$'.format(label + offset)
sz = 10
if len(marker) == 5:
sz = 12
white = [1, 1, 1, 1.0]
plt.semilogx(avgEGrid[mask], avgObs[mask,ip], linestyle='', markersize=sz, color=white, markeredgecolor=white, rasterized=False, marker='o')
plt.semilogx(avgEGrid[mask], avgObs[mask,ip], linestyle='', markersize=sz, color=color, markeredgecolor=color, rasterized=False, marker=marker)
if (eGrid[0] / eGrid[-1]) < 5:
plt.xscale('linear')
plt.xlabel('Energy (eV)')
plt.ylabel('Observation (arb.)')
plt.title('{0} elements'.format(len(uniqueLabels)))
plt.xlim(np.min(eGrid), np.max(eGrid))
# Hack the zoom
#plt.xlim([1E3,1E7])
#plt.xlim([7.E6, 1.E7])
baseName = 'p_obs'
if forceContiguity:
baseName += '_{0}'.format(clusterName)
effCoarseGroups = 'of_{0}'.format(numCoarseGroups - 1)
if coarseGroup == 'sum':
effCoarseGroups = '{0}'.format(numCoarseGroups)
plotName = '{0}_{1}_{2}_{3}_{4}.pdf'.format(
baseName, numElements, coarseGroup, effCoarseGroups, material)
plotPath = os.path.join(plotDirr, plotName)
# Output
plt.tight_layout()
plt.savefig(plotPath, dpi=dpi)
def write_mesh(dataDict, globalLabels):
'''Write the energy mesh by writing subelements'''
# Inputs
forceContiguity = dataDict['forceContiguity']
condenseSubelements = dataDict['condenseSubelements']
resolution = dataDict['resolution']
groupBdrs = dataDict['groupBdrs']
clusterName = dataDict['clusterName']
numElements = dataDict['numElements']
numElementsTot = dataDict['numElementsTot']
energyDatDirr = dataDict['energyDatDirr']
if condenseSubelements:
globalLabels, groupBdrs = condense_subelements(globalLabels, groupBdrs)
numSubelements = len(globalLabels)
baseName = 'clust'
if forceContiguity:
baseName += '-{0}'.format(clusterName)
filename = '{0}-{1}-{2}.txt'.format(baseName, numElements, resolution)
filePath = os.path.join(energyDatDirr, filename)
# Output
with open(filePath, 'w') as fid:
fid.write('# Energy mesh with {0} elements and {1} subelements\n'.format(numElementsTot, numSubelements))
fid.write('# element upper bound region\n')
for label, energy in zip(globalLabels, groupBdrs[:-1]):
if energy > 2.5E4:
energyType = 'fast'
elif energy <= 3.0:
energyType = 'thermal'
else:
energyType = 'resonance'
fid.write('{0:g} {1:.8e} {2}\n'.format(label, energy, energyType))
fid.write('-1 {0:.8e} thermal\n'.format(groupBdrs[-1]))
###############################################################################
def compute_map(inputDict):
'''Callable interface to use these clustering methods from another script.
Returns the uncondensed labels'''
# Create timeDict to house timing results
timeDict = {}
# Create dataDict to house problem parameters. Future functions should be assumed to modify dataDict
dataDict = {}
# Initialize dataDict, assuming inputDict has the observations
# indicators sets: dE, groupBdrs, energyGrid (just energyAvg), materialNames, numGroups, numIndicators
observations = extract_external_inputDict(inputDict, dataDict) #(not complete)
# Determine work options
parse_work_opt(dataDict)
# Determine coarse group structure and the number of energy elements per coarse group
globalLabels, thermalOffset = apportion_elements(dataDict, observations)
# Determine number of neighbors for the clustering
find_num_neighbors(dataDict, timeDict, observations)
# Loop over each coarse group
offset = thermalOffset
print_timing_header()
for coarseGroup in range(len(dataDict['coarseBdrs'])-1):
# Cluster within each coarse group and plot
offset = cluster_one_coarse_group(
dataDict, timeDict, observations, globalLabels, offset, coarseGroup)
return globalLabels
###############################################################################
def reorder_codebook(codebook, centroids, energyAvg, positionFunc=np.mean):
'''Sort so group with highest value of positionFunc is cluster 0'''
masks = get_masks(codebook)
numGroups, numClusters = masks.shape
energyCentroids = np.zeros(numClusters)
newCodebook = np.zeros(codebook.size, dtype=np.int64)
newCentroids = np.zeros(centroids.shape)
for i in range(numClusters):
energyCentroids[i] = positionFunc(energyAvg[masks[:,i]])
clusterOrder = np.argsort(-energyCentroids)
for ic, cluster in enumerate(clusterOrder):
newCodebook[codebook==cluster] = ic
newCentroids[ic,:] = centroids[cluster,:]
return newCodebook, newCentroids
def get_masks(codebook):
minCode = min(codebook)
maxCode = max(codebook)
numCodes = maxCode - minCode + 1
numGroups = len(codebook)
masks = np.zeros([numGroups, numCodes], dtype=bool)
for i in range(minCode, maxCode+1):
masks[:,i] = codebook==i
return masks
###############################################################################
def cluster_using_mg_squared_error(observations, numGroups):
'''This may return fewer groups than numGroups when numGroups is large'''
'''If this fails to produce the desired number of groups, split up existing groups starting with LOW energies and going to HIGH'''
# Calculate the max difference in obs over all space/angle points between neighboring energy points
numPoints, numDim = observations.shape
obsErr = np.zeros(numPoints)
obsErr[1:] = np.max(np.abs(observations[:-1,:] - observations[1:,:]) , axis=1)
# Sum this error
cumErr = np.cumsum(np.square(obsErr))
totErr = cumErr[-1]
# Evenly divide the total error into numGroups groups
errPerGroup = totErr / numGroups
labels = np.zeros(numPoints, dtype=np.int)
groupBdrLower = 0
for g in range(numGroups):
desiredErr = (g + 1) * errPerGroup
groupBdrUpper = np.argmin(np.abs(cumErr - desiredErr)) + 1
if groupBdrLower >= groupBdrUpper:
groupBdrUpper = groupBdrLower + 1
labels[groupBdrLower:groupBdrUpper] = g
groupBdrLower = groupBdrUpper
return labels
def cluster_using_equal_topological_spacing(observations, numGroups):
'''Split observations into numGroups pieces so that an even number of points go into each piece'''
numPoints, numDim = observations.shape
pointsPerGroup = numPoints // numGroups
remainder = numPoints % numGroups
labels = np.zeros(numPoints, dtype=np.int)
groupBdrLower = 0
for g in range(numGroups):
groupBdrUpper = groupBdrLower + pointsPerGroup
if g < remainder:
groupBdrUpper += 1
labels[groupBdrLower:groupBdrUpper] = g
groupBdrLower = groupBdrUpper
return labels
def cluster_using_equal_energy_spacing(fineGroupBdrs, numGroups):
'''This may return fewer groups than numGroups when fineGroupBdrs has unequal spacing and numGroups is large'''
'''If this fails to produce the desired number of groups, split up existing groups starting with HIGH energies and going to LOW'''
# Calculate an energy mesh using equal log spacing with numGroups groups
numFineGroups = len(fineGroupBdrs) - 1
desiredGroupBdrs = np.logspace(np.log10(fineGroupBdrs[0]), np.log10(fineGroupBdrs[-1]), numGroups+1)
# Get as close as possible to this mesh using the fineGroupBdrs
labels = np.zeros(numFineGroups, dtype=np.int)
groupBdrLower = 0
for g in range(numGroups):
groupBdrUpper = np.argmin(np.abs(fineGroupBdrs - desiredGroupBdrs[g+1]))
if groupBdrLower >= groupBdrUpper:
groupBdrUpper = groupBdrLower + 1
labels[groupBdrLower:groupBdrUpper] = g
groupBdrLower = groupBdrUpper
return labels
def cluster_using_hierarchical_agglomeration(obs, numClusters, connectivityGraph):
'''Use hierarhical agglomerative clustering with a connectivity graph'''
har = cluster.AgglomerativeClustering(n_clusters=numClusters, connectivity=connectivityGraph)
har.fit(obs)
return har.labels_
# Hack to use Birch:
#birch = cluster.Birch(n_clusters=numClusters, threshold=0.05)
#return birch.fit_predict(obs)
###############################################################################
def condense_subelements(labels, energies):
'''Combine all energy points that have the same labels. If dual, combine all energy groups that have the same labels, and interpret energies as group boundaries. Output a dual mesh'''
isDual = True
if len(energies) == len(labels):
isDual = False
energies = energies.copy()
small = 1E-5
energies[energies == 0] = small
if isDual:
numBdrs = len(energies)
toKeep = np.ones(numBdrs, dtype=bool)
toKeep[1:-1] = (labels[1:] != labels[:-1])
labelsOut = labels[toKeep[:-1]]
energiesOut = energies[toKeep]
else:
numEnergies = len(energies)
toKeep = np.ones(numEnergies, dtype=bool)
toKeep = (labels[1:] != labels[:-1])
groupsToKeep = np.sum(toKeep) + 1
labelsOut = np.zeros(groupsToKeep, dtype=np.int)
labelsOut[:-1] = labels[toKeep]
labelsOut[-1] = labels[-1]
energiesOut = np.zeros(groupsToKeep+1)
energiesOut[0] = energies[0]
toKeepIndices = np.where(toKeep)[0]
energiesOut[1:-1] = np.sqrt(energies[toKeepIndices] * energies[toKeepIndices+1])
energiesOut[-1] = energies[-1]
return labelsOut, energiesOut
def average_observations(labels, energies, observations):
'''Currently only works for primal meshes'''
energies = energies.copy()
small = 1E-5
energies[energies == 0] = small
#
numGroups, numPoints = observations.shape
#
toKeep = np.ones(numGroups+1, dtype=bool)
toKeep[1:-1] = (labels[1:] != labels[:-1])
subelementBdrs = np.where(toKeep)[0]
numSubelements = len(subelementBdrs) - 1
observationsOut = np.zeros((numSubelements, numPoints))
energiesOut = np.zeros(numSubelements)
labelsOut = np.zeros(numSubelements, dtype=int)
for i in range(numSubelements):
strt, end = subelementBdrs[i], subelementBdrs[i+1]
observationsOut[i, :] = np.mean(observations[strt:end, :], axis=0)
energiesOut[i] = np.exp(np.mean(np.log(energies[strt:end])))
labelsOut[i] = labels[strt]
return labelsOut, energiesOut, observationsOut
###############################################################################
def find_minimum_neighbors_radius(observations):
'''Find the minimum radius such that the number of connected components is 1.
First, find a bounding interval. Then use bisection within the interval.'''
#
radiusNeighbors = 0.10
bounds = [0, np.inf]
foundBounds = [False, False]
while not np.all(foundBounds):
numRadComponents, radLabels = sparse.csgraph.connected_components(
neighbors.radius_neighbors_graph(observations, radiusNeighbors))
if numRadComponents > 1:
bounds[0] = radiusNeighbors
foundBounds[0] = True
radiusNeighbors *= 2
else:
bounds[1] = radiusNeighbors
foundBounds[1] = True
radiusNeighbors /= 2
#
converged = False
tol = 1E-2
its = 0
maxIts = 10
while (its < maxIts and not converged):
its += 1
radiusNeighbors = 0.5 * (bounds[0] + bounds[1])
numRadComponents, radLabels = sparse.csgraph.connected_components(
neighbors.radius_neighbors_graph(observations, radiusNeighbors))
if numRadComponents > 1:
bounds[0] = radiusNeighbors
else:
bounds[1] = radiusNeighbors
sz = (bounds[1] - bounds[0]) / radiusNeighbors
if sz <= tol:
converged = True
radiusNeighbors = bounds[1]
return radiusNeighbors
def get_index_neighbors(length):
'''Return a neighbors graph that is tridiagonal, viz., the neighbors are determined by index'''
arr = np.ones(length)
return sparse.spdiags([arr, arr, arr], [-1, 0, 1], length, length)
def find_minimum_num_neighbors(observations):
'''Find the minimum number of neighbors such that the number of connected components is 1.
First, find a bounding interval. Then use bisection within the interval.'''
#
numNeighbors = 10
numPoints = observations.shape[0]
bounds = [0, numPoints]
foundBounds = [False, False]
while not np.all(foundBounds):
numKnnComponents, knnLabels = sparse.csgraph.connected_components(
neighbors.kneighbors_graph(observations, numNeighbors, include_self=True))
if numKnnComponents > 1:
bounds[0] = numNeighbors
foundBounds[0] = True
numNeighbors *= 2
else:
bounds[1] = numNeighbors
foundBounds[1] = True
numNeighbors /= 2
if numNeighbors == 0:
bounds[0] = 0
foundBounds[0] = True
#
converged = False
minSz = 1
its = 0
maxIts = 10
while (its < maxIts and not converged):
its += 1
numNeighbors = int(np.round(0.5 * (bounds[0] + bounds[1])))
numKnnComponents, knnLabels = sparse.csgraph.connected_components(
neighbors.kneighbors_graph(observations, numNeighbors, include_self=True))
if numKnnComponents > 1:
bounds[0] = numNeighbors
else:
bounds[1] = numNeighbors
sz = bounds[1] - bounds[0]
if sz <= minSz:
converged = True
numNeighbors = bounds[1]
if not converged:
print "Not converged!"
return numNeighbors
###############################################################################
def load_txt(filePath, skiprows=1, usecols=[1]):
return np.loadtxt(filePath, skiprows=skiprows, usecols=usecols)
###############################################################################
def define_input_parser():
import argparse
#
parser = argparse.ArgumentParser(description='Minimizer of observation errors and creator of generalized energy meshes.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
defaults = define_defaults()
# If nothing is specified, verbosity is False. If -v or --verbose is specified, verbosity is 1. If -v N is specified, verbosity is N.
parser.add_argument('-v', '--verbose', dest='verbosity', nargs='?', const=1, default=defaults['verbosity'], choices=[0,1,2,3,4], type=int)
parser.add_argument('-e', '--elements', dest='numelements', help="Number of total energy elements to use for the energy mesh. Ignored if the 'numcoarsegroups' option is used.", type=int, default=defaults['numelements'])
parser.add_argument('-T', '--totalnumelements', dest='numelementsistotal', help="If specified, the number of energy elements is taken to be the total number of elements. If not specified, the number of energy elements is taken to be the number of elements in the resolved resonance region. Ignored if the 'numcoarsegroups' option is used.", action='store_true', default=False)
parser.add_argument('-a', '--apportionopt', help="Specify how to assign the number of energy elements per coarse group, if not explicitly specified using 'listnumelements'. 'equal' means use an equal number of elements per coarse group. 'var', 'max', 'birch', and 'L1' are four ways that assign elements in proportion to the relative variance within a coarse group. 'L1' is not normalized to the number of fine points per coarse group and is more useful for 'amg' and 'tmg' work options. 'birch' uses the number of Birch clusters, and is approximate.", choices=['equal', 'var', 'max', 'birch', 'L1'], default=defaults['apportionopt'])
parser.add_argument('-S', '--sigt', help='If specified, use Sigma_t as the other indicator. If not specified, use phi calculated with an escape cross sections as the other indicator. Always use the infinite-medium flux as the first indicator. Fluxes are normalized in shape to be more constant: the Maxwellian-1/E-fission source shape is divided out.', action='store_true', default=False)
parser.add_argument('-w', '--workopt', help='What to do. har means do hierarchical agglomerative clustering (with restricted connectivity based on a set number of nearest neighbors). mg means do even spacing in lethargy (or as close as possible given the input energy mesh). amg means minimize squared error within each group. tmg means evenly divide in index (topology) space from input grid', choices=['amg','tmg','mg','har'], default=defaults['workopt'])
parser.add_argument('-c', '--coarsebdrs', help='The resolved resonance range and how it is to be split into coarse groups (one clustering calculation per coarse group).', type=float, nargs='+', default=defaults['coarsebdrs'])
parser.add_argument('-n', '--numcoarsegroups', help="The number of coarse groups to be used. If nonzero, overwrites the internal members of 'coarsebdrs'", type=int, default=defaults['numcoarsegroups'])
parser.add_argument('-l', '--listnumelements', help='Number of elements to be put in each coarse boundary. Number of arguments should be one less than the number of coarse boundaries. Takes priority over "elements" if set', type=int, nargs='+', default=defaults['listnumelements'])
parser.add_argument('-r', '--resolution', help='Resolution to use for the pointwise flux calculations', type=int, choices=range(11), default=defaults['resolution'])
parser.add_argument('-m', '--materialopt', help="Unless 'manual' is used, specifies a set of materials to use. If 'manual' is used, give a space-separated list of material names in 'listmaterials'.", choices=['4','5','c5g7', 'graphite', 'iron', 'kpin', 'kenrichedpin', 'kcladpin', 'kpin2d', 'kenrichedpin2d', 'kmoxpin2d', 'kmoxenrichedpin2d', 'trigafuel', 'ctrigafuel', 'ctrigafuel_0', 'trigamore', 'CASL', 'manual'], default=defaults['materialopt'])
parser.add_argument('-i', '--indicatormaterials', dest='listmaterials', help="When manual 'materialopt' is used, specify the materials to use.", nargs='+', default=defaults['listmaterials'])
parser.add_argument('-I', '--importances', dest='listimportances', help="When manual 'materialopt' is used, specify the weightings (importances) to use when clustering.", nargs='+', type=int, default=[])
parser.add_argument('-p', '--plotopt', help='Which observations to plot', choices=['none', 'first', 'last', 'firstlast', 'half', 'all', 'sum'], default=defaults['plotopt'])
parser.add_argument('-s', '--shownumbers', help='If true, show element numbers on the plots', type=int, default=defaults['shownumbers'])
parser.add_argument('-E', '--energypenalty', help='The energy variable is added to the observations to encourage contiguity for high numbers of elements. A value of 0 will not penalize in energy at all. A very large value will yield equal-lethargy-spaced MG', type=float, default=defaults['energypenalty'])
parser.add_argument('-C', '--condensesubelements', help='If true, condense contiguous energy ranges before outputting', type=int, default=defaults['condensesubelements'])
parser.add_argument('-d', '--dpi', help='Resolution to use for output plots', type=int, default=defaults['dpi'])
return parser
###############################################################################
if __name__ == '__main__':
parser = define_input_parser()
inputDict = vars(parser.parse_args())
if inputDict['verbosity'] > 1:
print 'Summary of inputs:'
print inputDict
do_all(inputDict)
|
|
import tensorflow as tf
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from library.utils import file_utils
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import math
import os, glob, time
from os.path import basename
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
from library.preprocessing import ZCA
# Resources
# https://www.youtube.com/watch?v=3VEXX73tnw4
class TFMLPClassifier:
def __init__(self, logs=True, log_dir='./logs/', learning_rate=0.01, activation_fn='softmax', restore=True,
num_iterations=100, device='cpu', session_type='default', descent_method='adam',
init_weights='random', display_step=10, reg_const=0.01, regularize=False,
learning_rate_type='constant', model_name='./model/mlp_classifier_model.ckpt',
save_model=False, transform=True, test_log=True, transform_method='StandardScaler',
tolerance=1e-7, train_validate_split=None, separate_writer=False, batch_size=100,
nodes_in_layers=[5, 5], activation_req=False, verbose=False):
# Docs
self.tensorboard_logs = logs
self.verbose = verbose
self.tensorboard_log_dir = log_dir
self.merged_summary_op = None
self.summary_writer = None
self.train_writer = None
self.validate_writer = None
self.test_writer = None
self.model = None
self.model_name = model_name
self.save_model = save_model
self.train_loss_summary = None
self.train_acc_summary = None
self.validate_acc_summary = None
self.test_acc_summary = None
self.w_hist = None
self.w_im = None
self.b_hist = None
self.restore = restore
self.separate_writer = separate_writer
#
self.session = None
self.device = device
self.session_type = session_type
# Parameters
self.learning_rate = learning_rate
self.max_iterations = num_iterations
self.display_step = display_step
self.tolerance = tolerance
self.descent_method = descent_method
self.init_weights = init_weights
self.reg_const = reg_const
self.regularize = regularize
self.activation = activation_fn
self.learning_rate_type = learning_rate_type
self.batch_size = batch_size
self.hidden_layers = nodes_in_layers
# Data transform methods
self.transform = transform
self.transform_method = transform_method
# Graph inputs
self.x = None
self.y_true = None
self.y_true_cls = None
self.num_features = None
self.num_classes = None
# Validation and testing
self.y_pred = None
self.y_pred_cls = None
#
self.init_var = None
self.last_epoch = 0
self.global_step = 0
self.optimizer = None
self.train_accuracy = None
self.validate_accuracy = None
self.test_log = test_log
self.test_accuracy = None
self.activation_req = activation_req
self.weights = {}
self.biases = {}
self.layers = {}
self.output_layer = None
self.loss = None
self.correct_prediction = None
self.cross_entropy = None
#
self.train_validate_split = train_validate_split
def print_parameters(self):
print('Linear Classifier')
def make_placeholders_for_inputs(self, num_features, num_classes):
with tf.device('/cpu:0'):
with tf.name_scope('Inputs'):
with tf.name_scope('Data'):
self.x = tf.placeholder(tf.float32, [None, num_features], name='X')
with tf.name_scope('Train_Labels'):
self.y_true = tf.placeholder(tf.float32, [None, num_classes], name='y_label')
self.y_true_cls = tf.placeholder(tf.int64, [None], name='y_class')
with tf.name_scope('Input_Image'):
image_shaped_input = tf.reshape(self.x, [-1, 32, 32, 3])
tf.summary.image('Training_Images', image_shaped_input, 1)
def make_weights(self, num_features, num_classes, number_of_layers=1):
prev_layer_weights = num_features
for layer_no in range(number_of_layers):
weight = None
weight_key = 'weight_' + str(layer_no)
if self.init_weights == 'zeros':
weight = tf.Variable(tf.zeros([prev_layer_weights, self.hidden_layers[layer_no]]),
name='W_'+str(layer_no)+'_zeros')
elif self.init_weights == 'random':
weight = tf.Variable(tf.random_normal([prev_layer_weights, self.hidden_layers[layer_no]]),
name='W_'+str(layer_no)+'_random_normal')
else:
weight = tf.Variable(tf.random_normal([prev_layer_weights, self.hidden_layers[layer_no]]),
name='W_'+str(layer_no)+'_random_normal')
self.weights[weight_key] = weight
prev_layer_weights = self.hidden_layers[layer_no]
def make_bias(self, num_features, num_classes, number_of_layers=1):
for layer_no in range(number_of_layers):
bias = None
bias_key = 'bias_' + str(layer_no)
bias = tf.Variable(tf.random_normal([self.hidden_layers[layer_no]]), name='b_'+str(layer_no))
self.biases[bias_key] = bias
def make_layers(self, number_of_layers):
prev_layer = self.x
for layer_no in range(number_of_layers):
layer = None
weight_key = 'weight_' + str(layer_no)
bias_key = 'bias_' + str(layer_no)
layer_key = 'layer_' + str(layer_no)
if self.activation == 'sigmoid':
layer = tf.nn.sigmoid(tf.add(tf.matmul(prev_layer, self.weights[weight_key]), self.biases[bias_key]),
name='Layer_'+str(layer_no)+'_sigmoid')
elif self.activation == 'softmax':
layer = tf.nn.softmax(tf.add(tf.matmul(prev_layer, self.weights[weight_key]), self.biases[bias_key]),
name='Layer_' + str(layer_no)+'_softmax')
elif self.activation == 'relu':
layer = tf.nn.relu(tf.add(tf.matmul(prev_layer, self.weights[weight_key]), self.biases[bias_key]),
name='Layer_' + str(layer_no)+'_relu')
else:
layer = tf.nn.relu(tf.add(tf.matmul(prev_layer, self.weights[weight_key]), self.biases[bias_key]),
name='Layer_' + str(layer_no)+'_relu')
self.layers[layer_key] = layer
prev_layer = self.layers[layer_key]
def make_output_layer(self):
layer_key = layer_key = 'layer_' + str(len(self.hidden_layers)-1)
print(len(self.hidden_layers))
print(layer_key)
print(self.weights.keys())
print(self.biases.keys())
print('output')
output = tf.Variable( tf.random_normal([self.hidden_layers[-1], self.num_classes]))
print('bias')
bias_output = tf.Variable(tf.random_normal([self.num_classes]))
print('layer')
self.output_layer = tf.add(tf.matmul(self.layers[layer_key], output), bias_output, name='out_layer')
def make_parameters(self, num_features, num_classes):
with tf.device('/cpu:0'):
number_of_layers = len(self.hidden_layers)
with tf.name_scope('Parameters'):
with tf.name_scope('Weights'):
self.make_weights(num_features, num_classes, number_of_layers)
with tf.name_scope('Bias'):
self.make_bias(num_features, num_classes, number_of_layers)
with tf.name_scope('Hidden_Layers'):
self.make_layers(number_of_layers)
with tf.name_scope('Output_Layer'):
self.make_output_layer()
def make_predictions(self):
with tf.device('/cpu:0'):
with tf.name_scope('Predictions'):
if self.activation_req is True:
if self.activation == 'softmax':
self.y_pred = tf.nn.softmax(self.output_layer)
elif self.activation == 'relu':
self.y_pred = tf.nn.relu(self.output_layer)
elif self.activation == 'sigmoid':
self.y_pred = tf.nn.sigmoid(self.output_layer)
else:
self.y_pred = self.output_layer
self.y_pred_cls = tf.argmax(self.y_pred, dimension=1)
def make_optimization(self):
with tf.device('/cpu:0'):
with tf.name_scope('Cross_Entropy'):
self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.output_layer,
labels=self.y_true)
with tf.name_scope('Loss_Function'):
if self.regularize is True:
ridge_param = tf.cast(tf.constant(self.reg_const), dtype=tf.float32)
ridge_loss = tf.reduce_mean(tf.square(self.weights))
self.loss = tf.add(tf.reduce_mean(self.cross_entropy), tf.multiply(ridge_param, ridge_loss))
else:
self.loss = tf.reduce_mean(self.cross_entropy)
self.train_loss_summary = tf.summary.scalar('Training_Error', self.loss)
with tf.name_scope('Optimizer'):
if self.learning_rate_type == 'exponential':
learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step,
self.display_step, 0.96, staircase=True)
else:
learning_rate = self.learning_rate
if self.descent_method == 'gradient':
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(self.loss)
elif self.descent_method == 'adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\
.minimize(self.loss)
else:
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(self.loss)
self.correct_prediction = tf.equal(self.y_pred_cls, self.y_true_cls)
def make_accuracy(self):
with tf.device('/cpu:0'):
with tf.name_scope('Train_Accuracy'):
self.train_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
if self.separate_writer is True:
self.train_acc_summary = tf.summary.scalar('Train_Accuracy', self.train_accuracy)
else:
self.train_acc_summary = tf.summary.scalar('Train_Accuracy', self.train_accuracy)
if self.train_validate_split is not None:
with tf.name_scope('Validate_Accuracy'):
self.validate_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
if self.separate_writer is True:
self.validate_acc_summary = tf.summary.scalar('Validate_Accuracy', self.validate_accuracy)
else:
self.validate_acc_summary = tf.summary.scalar('Validation_Accuracy', self.validate_accuracy)
if self.test_log is True:
with tf.name_scope('Test_Accuracy'):
self.test_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
if self.separate_writer is True:
self.test_acc_summary = tf.summary.scalar('Test_Accuracy', self.test_accuracy)
else:
self.test_acc_summary = tf.summary.scalar('Test_Accuracy', self.test_accuracy)
def create_graph(self, num_features, num_classes):
self.num_features = num_features
self.num_classes = num_classes
self.global_step = tf.Variable(0, name='last_successful_epoch', trainable=False, dtype=tf.int32)
self.last_epoch = tf.assign(self.global_step, self.global_step + 1, name='assign_updated_epoch')
# Step 1: Creating placeholders for inputs
self.make_placeholders_for_inputs(num_features, num_classes)
# Step 2: Creating initial parameters for the variables
self.make_parameters(num_features, num_classes)
# Step 3: Make predictions for the data
self.make_predictions()
# Step 4: Perform optimization operation
self.make_optimization()
# Step 5: Calculate accuracies
self.make_accuracy()
# Step 6: Initialize all the required variables
with tf.device('/cpu:0'):
self.init_var = tf.global_variables_initializer()
if self.verbose is True:
print('X : ' + str(self.x))
print('Y_true : ' + str(self.y_true))
print('Y_true_cls : ' + str(self.y_true_cls))
print('W : ' + str(self.weights))
for i in range(len(self.hidden_layers)):
weight_key = 'weight_' + str(i)
print(' weight_%d : %s' % (i,str(self.weights[weight_key])))
print('b : ' + str(self.biases))
for i in range(len(self.hidden_layers)):
bias_key = 'bias_' + str(i)
print(' bias_%d : %s' % (i,str(self.biases[bias_key])))
print('Layers : ' + str(self.layers))
for i in range(len(self.hidden_layers)):
layer_key = 'layer_' + str(i)
print(' layer_%d : %s' % (i,str(self.layers[layer_key])))
print('Output layer : ' + str(self.output_layer))
print('Y_pred : ' + str(self.y_pred))
print('Y_pred_cls : ' + str(self.y_pred_cls))
print('cross_entropy : ' + str(self.cross_entropy))
print('train_loss : ' + str(self.loss))
print('optimizer : ' + str(self.optimizer))
print('correct_prediction : ' + str(self.correct_prediction))
print('Train Accuracy : ' + str(self.train_accuracy))
print('Validate Accuracy : ' + str(self.validate_accuracy))
print('Test Accuracy : ' + str(self.test_accuracy))
return True
def fit(self, data, labels, classes, test_data=None, test_labels=None, test_classes=None):
if self.device == 'cpu':
print('Using CPU')
config = tf.ConfigProto(
log_device_placement=True,
allow_soft_placement=True,
#allow_growth=True,
#device_count={'CPU': 0}
)
else:
print('Using GPU')
config = tf.ConfigProto(
log_device_placement=True,
allow_soft_placement=True,
#allow_growth=True,
#device_count={'GPU': 0}
)
if self.session_type == 'default':
self.session = tf.Session(config=config)
if self.session_type == 'interactive':
self.session = tf.InteractiveSession(config=config)
print('Session: ' + str(self.session))
self.session.run(self.init_var)
if self.tensorboard_logs is True:
file_utils.mkdir_p(self.tensorboard_log_dir)
self.merged_summary_op = tf.summary.merge_all()
if self.restore is False:
file_utils.delete_all_files_in_dir(self.tensorboard_log_dir)
if self.separate_writer is False:
self.summary_writer = tf.summary.FileWriter(self.tensorboard_log_dir, graph=self.session.graph)
else:
self.train_writer = tf.summary.FileWriter(self.tensorboard_log_dir + 'train',
graph=self.session.graph)
if self.train_validate_split is not None:
self.validate_writer = tf.summary.FileWriter(self.tensorboard_log_dir + 'validate',
graph=self.session.graph)
if self.test_log is True:
self.test_writer = tf.summary.FileWriter(self.tensorboard_log_dir + 'test',
graph=self.session.graph)
if self.save_model is True:
self.model = tf.train.Saver(max_to_keep=5)
if self.train_validate_split is not None:
train_data, validate_data, train_labels, validate_labels, train_classes, validate_classes = \
train_test_split(data, labels, classes, train_size=self.train_validate_split)
if self.verbose is True:
print('Data shape: ' + str(data.shape))
print('Labels shape: ' + str(labels.shape))
print('Classes shape: ' + str(classes.shape))
print('Train Data shape: ' + str(train_data.shape))
print('Train Labels shape: ' + str(train_labels.shape))
print('Train Classes shape: ' + str(train_classes.shape))
print('Validate Data shape: ' + str(validate_data.shape))
print('Validate Labels shape: ' + str(validate_labels.shape))
print('Validate Classes shape: ' + str(validate_classes.shape))
if self.test_log is False:
self.optimize(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes)
else:
self.optimize(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes, test_data=test_data,
test_labels=test_labels, test_classes=test_classes)
else:
if self.test_log is False:
self.optimize(data, labels, classes)
else:
self.optimize(data, labels, classes, test_data=test_data,
test_labels=test_labels, test_classes=test_classes)
def optimize(self, train_data, train_labels, train_classes,
validate_data=None, validate_labels=None, validate_classes=None,
test_data = None, test_labels = None, test_classes = None):
if self.transform is True:
if self.transform_method == 'StandardScaler':
ss = StandardScaler()
train_data = ss.fit_transform(train_data)
if self.train_validate_split is not None:
validate_data = ss.fit_transform(validate_data)
if self.test_log is True:
test_data = ss.fit_transform(test_data)
if self.transform_method == 'MinMaxScaler':
ss = MinMaxScaler()
train_data = ss.fit_transform(train_data)
if self.train_validate_split is not None:
validate_data = ss.fit_transform(validate_data)
if self.test_log is True:
test_data = ss.fit_transform(test_data)
file_name = os.path.splitext(os.path.abspath(self.model_name))[0]
num_files = len(sorted(glob.glob(os.path.abspath(file_name + '*.meta'))))
if num_files > 0:
checkpoint_file = os.path.abspath(sorted(glob.glob(file_name + '*.data-00000-of-00001'), reverse=True)[0])
if os.path.exists(checkpoint_file):
print('Restoring model from %s' % checkpoint_file)
meta_file = os.path.abspath(sorted(glob.glob(file_name + '*.meta'), reverse=True)[0])
print('Loading: %s' %meta_file)
saver = tf.train.import_meta_graph(meta_file)
print('Loading: %s' %os.path.abspath(checkpoint_file))
cpk = tf.train.latest_checkpoint(os.path.dirname(meta_file))
print('Checkpoint: ' + str(cpk))
print('Tensors')
print(print_tensors_in_checkpoint_file(file_name=cpk, all_tensors='', tensor_name=''))
saver.restore(self.session, tf.train.latest_checkpoint(os.path.dirname(meta_file)))
print('Last epoch to restore: ' + str(self.session.run(self.global_step)))
if self.train_validate_split is not None:
if self.test_log is False:
self.run(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes)
else:
self.run(train_data, train_labels, train_classes,
validate_data=validate_data, validate_labels=validate_labels,
validate_classes=validate_classes, test_data=test_data,
test_labels=test_labels, test_classes=test_classes)
else:
if self.test_log is False:
self.run(train_data, train_labels, train_classes)
else:
self.run(train_data, train_labels, train_classes,
test_data=test_data, test_labels=test_labels, test_classes=test_classes)
def run(self, train_data, train_labels, train_classes,
validate_data=None, validate_labels=None, validate_classes=None,
test_data=None, test_labels=None, test_classes=None):
if self.train_validate_split is not None:
feed_dict_validate = {self.x: validate_data,
self.y_true: validate_labels,
self.y_true_cls: validate_classes}
if self.test_log is True:
feed_dict_test = {self.x: test_data,
self.y_true: test_labels,
self.y_true_cls: test_classes}
epoch = self.session.run(self.global_step)
print('Last successful epoch: ' + str(epoch))
converged = False
prev_cost = 0
start = time.time()
end_batch_index = 0
num_batches = int(train_data.shape[0] / self.batch_size)
while (epoch != self.max_iterations) and converged is False:
start_batch_index = 0
for batch in range(num_batches):
# print('Training on batch %d' %batch)
end_batch_index = start_batch_index + self.batch_size
if end_batch_index < train_data.shape[0]:
train_batch_data = train_data[start_batch_index:end_batch_index, :]
train_batch_labels = train_labels[start_batch_index:end_batch_index, :]
train_batch_classes = train_classes[start_batch_index:end_batch_index]
else:
train_batch_data = train_data[start_batch_index:, :]
train_batch_labels = train_labels[start_batch_index:, :]
train_batch_classes = train_classes[start_batch_index:]
feed_dict_train = {self.x: train_batch_data,
self.y_true: train_batch_labels,
self.y_true_cls: train_batch_classes}
_, cost, train_acc, curr_epoch = self.session.run([self.optimizer, self.loss, self.train_accuracy,
self.last_epoch], feed_dict=feed_dict_train)
train_loss_summary = self.session.run(self.train_loss_summary, feed_dict=feed_dict_train)
train_acc_summary = self.session.run(self.train_acc_summary, feed_dict=feed_dict_train)
start_batch_index += self.batch_size
if self.train_validate_split is not None:
validate_acc, validate_summary = \
self.session.run([self.validate_accuracy, self.validate_acc_summary],
feed_dict=feed_dict_validate)
if self.test_log is True:
test_acc, test_summary = \
self.session.run([self.test_accuracy, self.test_acc_summary],
feed_dict=feed_dict_test)
if self.separate_writer is False:
self.summary_writer.add_summary(train_loss_summary, epoch)
self.summary_writer.add_summary(train_acc_summary, epoch)
self.summary_writer.add_summary(validate_summary, epoch)
self.summary_writer.add_summary(test_summary, epoch)
else:
self.train_writer.add_summary(train_loss_summary, epoch)
self.train_writer.add_summary(train_acc_summary, epoch)
if self.train_validate_split is not None:
self.validate_writer.add_summary(validate_summary, epoch)
if self.test_log is True:
self.test_writer.add_summary(test_summary, epoch)
if epoch % self.display_step == 0:
duration = time.time() - start
if self.train_validate_split is not None and self.test_log is False:
print('>>> Epoch [%*d/%*d] | Error: %.4f | Train Acc.: %.4f | Validate Acc.: %.4f | '
'Duration: %.4f seconds'
%(int(len(str(self.max_iterations))), epoch, int(len(str(self.max_iterations))),
self.max_iterations, cost, train_acc, validate_acc, duration))
elif self.train_validate_split is not None and self.test_log is True:
print('>>> Epoch [%*d/%*d] | Error: %.4f | Train Acc.: %.4f | Validate Acc.: %.4f | '
'Test Acc.: %.4f | Duration: %.4f seconds'
%(int(len(str(self.max_iterations))), epoch, int(len(str(self.max_iterations))),
self.max_iterations, cost, train_acc, validate_acc, test_acc, duration))
elif self.train_validate_split is None and self.test_log is True:
print('>>> Epoch [%*d/%*d] | Error: %.4f | Train Acc.: %.4f | '
'Test Acc.: %.4f | Duration: %.4f seconds'
%(int(len(str(self.max_iterations))), epoch, int(len(str(self.max_iterations))),
self.max_iterations, cost, train_acc, test_acc, duration))
else:
print('>>> Epoch [%*d/%*d] | Error: %.4f | Train Acc.: %.4f | Duration of run: %.4f seconds'
% (int(len(str(self.max_iterations))), epoch, int(len(str(self.max_iterations))),
self.max_iterations, cost, train_acc))
start = time.time()
if self.save_model is True:
model_directory = os.path.dirname(self.model_name)
file_utils.mkdir_p(model_directory)
self.model.save(self.session, self.model_name, global_step=epoch)
if epoch == 0:
prev_cost = cost
else:
if math.fabs(cost-prev_cost) < self.tolerance:
converged = False
epoch += 1
# print('Current success step: ' + str(self.session.run(self.global_step)))
def fit_and_test(self, data, labels, classes, test_data, test_labels, test_classes):
self.fit(data, labels, classes)
def predict(self, data):
if self.transform is True:
if self.transform_method == 'StandardScaler':
ss = StandardScaler()
data = ss.fit_transform(data)
if self.transform_method == 'MinMaxScaler':
ss = MinMaxScaler()
data = ss.fit_transform(data)
feed_dict_data = {self.x: data}
predictions = self.session.run(self.y_pred_cls, feed_dict=feed_dict_data)
predictions = np.array(predictions)
return predictions
def load_model(self, model_name):
self.model.restore(self.session, model_name)
def close(self):
self.session.close()
def print_accuracy(self, test_data, test_labels, test_classes):
predict_classes = self.predict(test_data)
return accuracy_score(test_classes, predict_classes, normalize=True)
def print_classification_results(self, test_data, test_labels, test_classes):
if self.transform is True:
if self.transform_method == 'StandardScaler':
ss = StandardScaler()
test_data = ss.fit_transform(test_data)
if self.transform_method == 'MinMaxScaler':
ss = MinMaxScaler()
test_data = ss.fit_transform(test_data)
feed_dict_test = {self.x: test_data,
self.y_true: test_labels,
self.y_true_cls: test_classes}
cls_true = test_classes
cls_pred = self.session.run(self.y_pred_cls, feed_dict=feed_dict_test)
cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred)
print('Confusion matrix')
print(cm)
print('Detailed classification report')
print(classification_report(y_true=cls_true, y_pred=cls_pred))
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
if self.separate_writer is False:
self.summary_writer.close()
else:
self.train_writer.close()
if self.train_validate_split is not None:
self.validate_writer.close()
if self.test_log is True:
self.test_writer.close()
def __del__(self):
self.session.close()
if self.separate_writer is False:
self.summary_writer.close()
else:
self.train_writer.close()
if self.train_validate_split is not None:
self.validate_writer.close()
if self.test_log is True:
self.test_writer.close()
|
|
# -*- coding: utf-8 -*-
# This file is part of MediaFile.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Automatically-generated blanket testing for the MediaFile metadata
layer.
"""
from __future__ import division, absolute_import, print_function
import os
import shutil
import datetime
import time
import unittest
from six import assertCountEqual
from test import _common
from mediafile import MediaFile, Image, \
ImageType, CoverArtField, UnreadableFileError
import mutagen
class ArtTestMixin(object):
"""Test reads and writes of the ``art`` property.
"""
@property
def png_data(self):
if not self._png_data:
image_file = os.path.join(_common.RSRC, b'image-2x3.png')
with open(image_file, 'rb') as f:
self._png_data = f.read()
return self._png_data
_png_data = None
@property
def jpg_data(self):
if not self._jpg_data:
image_file = os.path.join(_common.RSRC, b'image-2x3.jpg')
with open(image_file, 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
@property
def tiff_data(self):
if not self._jpg_data:
image_file = os.path.join(_common.RSRC, b'image-2x3.tiff')
with open(image_file, 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
def test_set_png_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.art, self.png_data)
def test_set_jpg_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.art, self.jpg_data)
def test_delete_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNotNone(mediafile.art)
del mediafile.art
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNone(mediafile.art)
class ImageStructureTestMixin(ArtTestMixin):
"""Test reading and writing multiple image tags.
The tests use the `image` media file fixture. The tags of these files
include two images, on in the PNG format, the other in JPEG format. If
the tag format supports it they also include additional metadata.
"""
def test_read_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = next(i for i in mediafile.images
if i.mime_type == 'image/png')
self.assertEqual(image.data, self.png_data)
self.assertExtendedImageAttributes(image, desc=u'album cover',
type=ImageType.front)
image = next(i for i in mediafile.images
if i.mime_type == 'image/jpeg')
self.assertEqual(image.data, self.jpg_data)
self.assertExtendedImageAttributes(image, desc=u'the artist',
type=ImageType.artist)
def test_set_image_structure(self):
mediafile = self._mediafile_fixture('empty')
image = Image(data=self.png_data, desc=u'album cover',
type=ImageType.front)
mediafile.images = [image]
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(len(mediafile.images), 1)
image = mediafile.images[0]
self.assertEqual(image.data, self.png_data)
self.assertEqual(image.mime_type, 'image/png')
self.assertExtendedImageAttributes(image, desc=u'album cover',
type=ImageType.front)
def test_add_image_structure(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.png_data, desc=u'the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(len(mediafile.images), 3)
images = (i for i in mediafile.images if i.desc == u'the composer')
image = next(images, None)
self.assertExtendedImageAttributes(
image, desc=u'the composer', type=ImageType.composer
)
def test_delete_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
del mediafile.images
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(len(mediafile.images), 0)
def test_guess_cover(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
cover = CoverArtField.guess_cover_image(mediafile.images)
self.assertEqual(cover.desc, u'album cover')
self.assertEqual(mediafile.art, cover.data)
def assertExtendedImageAttributes(self, image, **kwargs): # noqa
"""Ignore extended image attributes in the base tests.
"""
pass
class ExtendedImageStructureTestMixin(ImageStructureTestMixin):
"""Checks for additional attributes in the image structure.
Like the base `ImageStructureTestMixin`, per-format test classes
should include this mixin to add image-related tests.
"""
def assertExtendedImageAttributes(self, image, desc=None, type=None): # noqa
self.assertEqual(image.desc, desc)
self.assertEqual(image.type, type)
def test_add_tiff_image(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.tiff_data, desc=u'the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(len(mediafile.images), 3)
# WMA does not preserve the order, so we have to work around this
image = list(filter(lambda i: i.mime_type == 'image/tiff',
mediafile.images))[0]
self.assertExtendedImageAttributes(
image, desc=u'the composer', type=ImageType.composer)
class LazySaveTestMixin(object):
"""Mediafile should only write changes when tags have changed
"""
@unittest.skip(u'not yet implemented')
def test_unmodified(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.filename)
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
mediafile.save()
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
@unittest.skip(u'not yet implemented')
def test_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.filename)
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.save()
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
def test_update_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.filename)
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
mediafile.update({'title': mediafile.title})
mediafile.save()
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
@unittest.skip(u'not yet implemented')
def test_tag_value_change(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.filename)
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.album = u'another'
mediafile.save()
self.assertNotEqual(os.stat(mediafile.filename).st_mtime, mtime)
def test_update_changed_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.filename)
self.assertEqual(os.stat(mediafile.filename).st_mtime, mtime)
mediafile.update({'title': mediafile.title, 'album': u'another'})
mediafile.save()
self.assertNotEqual(os.stat(mediafile.filename).st_mtime, mtime)
def _set_past_mtime(self, path):
mtime = round(time.time() - 10000)
os.utime(path, (mtime, mtime))
return mtime
class GenreListTestMixin(object):
"""Tests access to the ``genres`` property as a list.
"""
def test_read_genre_list(self):
mediafile = self._mediafile_fixture('full')
assertCountEqual(self, mediafile.genres, ['the genre'])
def test_write_genre_list(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.filename)
assertCountEqual(self, mediafile.genres, [u'one', u'two'])
def test_write_genre_list_get_first(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.genre, u'one')
def test_append_genre_list(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.genre, u'the genre')
mediafile.genres += [u'another']
mediafile.save()
mediafile = MediaFile(mediafile.filename)
assertCountEqual(self, mediafile.genres, [u'the genre', u'another'])
class ReadWriteTestBase(ArtTestMixin, GenreListTestMixin,
_common.TempDirMixin):
"""Test writing and reading tags. Subclasses must set ``extension``
and ``audio_properties``.
The basic tests for all audio formats encompass three files provided
in our `rsrc` folder: `full.*`, `empty.*`, and `unparseable.*`.
Respectively, they should contain a full slate of common fields
listed in `full_initial_tags` below; no fields contents at all; and
an unparseable release date field.
To add support for a new file format to MediaFile, add these three
files and then create a `ReadWriteTestBase` subclass by copying n'
pasting one of the existing subclasses below. You will want to
update the `format` field in that subclass, and you will probably
need to fiddle with the `bitrate` and other format-specific fields.
You can also add image tests (using an additional `image.*` fixture
file) by including one of the image-related mixins.
"""
full_initial_tags = {
'title': u'full',
'artist': u'the artist',
'album': u'the album',
'genre': u'the genre',
'composer': u'the composer',
'grouping': u'the grouping',
'year': 2001,
'month': None,
'day': None,
'date': datetime.date(2001, 1, 1),
'track': 2,
'tracktotal': 3,
'disc': 4,
'disctotal': 5,
'lyrics': u'the lyrics',
'comments': u'the comments',
'bpm': 6,
'comp': True,
'mb_trackid': '8b882575-08a5-4452-a7a7-cbb8a1531f9e',
'mb_releasetrackid': 'c29f3a57-b439-46fd-a2e2-93776b1371e0',
'mb_albumid': '9e873859-8aa4-4790-b985-5a953e8ef628',
'mb_artistid': '7cf0ea9d-86b9-4dad-ba9e-2355a64899ea',
'art': None,
'label': u'the label',
}
tag_fields = [
'title',
'artist',
'album',
'genre',
'lyricist',
'composer',
'composer_sort',
'arranger',
'grouping',
'year',
'month',
'day',
'date',
'track',
'tracktotal',
'disc',
'disctotal',
'lyrics',
'comments',
'copyright',
'bpm',
'comp',
'mb_trackid',
'mb_releasetrackid',
'mb_workid',
'mb_albumid',
'mb_artistid',
'art',
'label',
'rg_track_peak',
'rg_track_gain',
'rg_album_peak',
'rg_album_gain',
'r128_track_gain',
'r128_album_gain',
'albumartist',
'mb_albumartistid',
'artist_sort',
'albumartist_sort',
'acoustid_fingerprint',
'acoustid_id',
'mb_releasegroupid',
'asin',
'catalognum',
'barcode',
'isrc',
'disctitle',
'script',
'language',
'country',
'albumstatus',
'media',
'albumdisambig',
'artist_credit',
'albumartist_credit',
'original_year',
'original_month',
'original_day',
'original_date',
'initial_key',
]
def setUp(self):
self.create_temp_dir()
def tearDown(self):
self.remove_temp_dir()
def test_read_nonexisting(self):
mediafile = self._mediafile_fixture('full')
os.remove(mediafile.filename)
self.assertRaises(UnreadableFileError, MediaFile, mediafile.filename)
def test_save_nonexisting(self):
mediafile = self._mediafile_fixture('full')
os.remove(mediafile.filename)
try:
mediafile.save()
except UnreadableFileError:
pass
def test_delete_nonexisting(self):
mediafile = self._mediafile_fixture('full')
os.remove(mediafile.filename)
try:
mediafile.delete()
except UnreadableFileError:
pass
def test_read_audio_properties(self):
mediafile = self._mediafile_fixture('full')
for key, value in self.audio_properties.items():
if isinstance(value, float):
self.assertAlmostEqual(getattr(mediafile, key), value,
delta=0.1)
else:
self.assertEqual(getattr(mediafile, key), value)
def test_read_full(self):
mediafile = self._mediafile_fixture('full')
self.assertTags(mediafile, self.full_initial_tags)
def test_read_empty(self):
mediafile = self._mediafile_fixture('empty')
for field in self.tag_fields:
value = getattr(mediafile, field)
if isinstance(value, list):
assert not value
else:
self.assertIsNone(value)
def test_write_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertTags(mediafile, tags)
def test_update_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertTags(mediafile, tags)
def test_overwrite_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
# Make sure the tags are already set when writing a second time
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertTags(mediafile, tags)
def test_update_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
# Make sure the tags are already set when writing a second time
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertTags(mediafile, tags)
def test_write_date_components(self):
mediafile = self._mediafile_fixture('full')
mediafile.year = 2001
mediafile.month = 1
mediafile.day = 2
mediafile.original_year = 1999
mediafile.original_month = 12
mediafile.original_day = 30
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_write_incomplete_date_components(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2001
mediafile.month = None
mediafile.day = 2
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.year, 2001)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 1))
def test_write_dates(self):
mediafile = self._mediafile_fixture('full')
mediafile.date = datetime.date(2001, 1, 2)
mediafile.original_date = datetime.date(1999, 12, 30)
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_r128_gain_stored_as_q8_number(self):
def round_trip(x):
q_num = round(x * pow(2, 8))
return q_num / pow(2, 8)
mediafile = self._mediafile_fixture('full')
track = -1.1
self.assertNotEqual(track, round_trip(track))
mediafile.r128_track_gain = track
self.assertEqual(mediafile.r128_track_gain, round_trip(track))
album = 4.2
self.assertNotEqual(album, round_trip(album))
mediafile.r128_album_gain = album
self.assertEqual(mediafile.r128_album_gain, round_trip(album))
def test_write_packed(self):
mediafile = self._mediafile_fixture('empty')
mediafile.tracktotal = 2
mediafile.track = 1
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.track, 1)
self.assertEqual(mediafile.tracktotal, 2)
def test_write_counters_without_total(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.track, 2)
self.assertEqual(mediafile.tracktotal, 3)
self.assertEqual(mediafile.disc, 4)
self.assertEqual(mediafile.disctotal, 5)
mediafile.track = 10
delattr(mediafile, 'tracktotal')
mediafile.disc = 10
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.track, 10)
self.assertEqual(mediafile.tracktotal, None)
self.assertEqual(mediafile.disc, 10)
self.assertEqual(mediafile.disctotal, None)
def test_unparseable_date(self):
"""The `unparseable.*` fixture should not crash but should return None
for all parts of the release date.
"""
mediafile = self._mediafile_fixture('unparseable')
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_tag(self):
mediafile = self._mediafile_fixture('full')
keys = self.full_initial_tags.keys()
for key in set(keys) - set(['art', 'month', 'day']):
self.assertIsNotNone(getattr(mediafile, key))
for key in keys:
delattr(mediafile, key)
mediafile.save()
mediafile = MediaFile(mediafile.filename)
for key in keys:
value = getattr(mediafile, key)
if isinstance(value, list):
assert not value
else:
self.assertIsNone(value)
def test_delete_packed_total(self):
mediafile = self._mediafile_fixture('full')
delattr(mediafile, 'tracktotal')
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.track, self.full_initial_tags['track'])
self.assertEqual(mediafile.disc, self.full_initial_tags['disc'])
def test_delete_partial_date(self):
mediafile = self._mediafile_fixture('empty')
mediafile.date = datetime.date(2001, 12, 3)
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNotNone(mediafile.month)
self.assertIsNotNone(mediafile.day)
delattr(mediafile, 'month')
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_year(self):
mediafile = self._mediafile_fixture('full')
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
delattr(mediafile, 'year')
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
def assertTags(self, mediafile, tags): # noqa
errors = []
for key, value in tags.items():
try:
value2 = getattr(mediafile, key)
except AttributeError:
errors.append(u'Tag %s does not exist' % key)
else:
if value2 != value:
errors.append(u'Tag %s: %r != %r' % (key, value2, value))
if any(errors):
errors = [u'Tags did not match'] + errors
self.fail('\n '.join(errors))
def _mediafile_fixture(self, name):
name = name + '.' + self.extension
if not isinstance(name, bytes):
name = name.encode('utf8')
src = os.path.join(_common.RSRC, name)
target = os.path.join(self.temp_dir, name)
shutil.copy(src, target)
return MediaFile(target)
def _generate_tags(self, base=None):
"""Return dictionary of tags, mapping tag names to values.
"""
tags = {}
for key in self.tag_fields:
if key.startswith('rg_'):
# ReplayGain is float
tags[key] = 1.0
elif key.startswith('r128_'):
# R128 is int
tags[key] = -1
else:
tags[key] = 'value\u2010%s' % key
for key in ['disc', 'disctotal', 'track', 'tracktotal', 'bpm']:
tags[key] = 1
for key in ['artists', 'albumartists']:
tags[key] = ['multival', 'test']
tags['art'] = self.jpg_data
tags['comp'] = True
tags['url'] = "https://example.com/"
date = datetime.date(2001, 4, 3)
tags['date'] = date
tags['year'] = date.year
tags['month'] = date.month
tags['day'] = date.day
original_date = datetime.date(1999, 5, 6)
tags['original_date'] = original_date
tags['original_year'] = original_date.year
tags['original_month'] = original_date.month
tags['original_day'] = original_date.day
return tags
class PartialTestMixin(object):
tags_without_total = {
'track': 2,
'tracktotal': 0,
'disc': 4,
'disctotal': 0,
}
def test_read_track_without_total(self):
mediafile = self._mediafile_fixture('partial')
self.assertEqual(mediafile.track, 2)
self.assertIsNone(mediafile.tracktotal)
self.assertEqual(mediafile.disc, 4)
self.assertIsNone(mediafile.disctotal)
class MP3Test(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'mp3'
audio_properties = {
'length': 1.0,
'bitrate': 80000,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': 'MP3',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_unknown_apic_type(self):
mediafile = self._mediafile_fixture('image_unknown_type')
self.assertEqual(mediafile.images[0].type, ImageType.other)
def test_bitrate_mode(self):
mediafile = self._mediafile_fixture('cbr')
self.assertEqual(mediafile.bitrate_mode, 'CBR')
def test_encoder_info(self):
mediafile = self._mediafile_fixture('cbr')
self.assertEqual(mediafile.encoder_info, 'LAME 3.100.0+')
def test_encoder_settings(self):
mediafile = self._mediafile_fixture('cbr')
self.assertEqual(mediafile.encoder_settings, '-b 80')
class MP4Test(ReadWriteTestBase, PartialTestMixin,
ImageStructureTestMixin, unittest.TestCase):
extension = 'm4a'
audio_properties = {
'length': 1.0,
'bitrate': 64000,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': 'AAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 2,
}
def test_add_tiff_image_fails(self):
mediafile = self._mediafile_fixture('empty')
with self.assertRaises(ValueError):
mediafile.images = [Image(data=self.tiff_data)]
def test_guess_cover(self):
# There is no metadata associated with images, we pick one at random
pass
class AlacTest(ReadWriteTestBase, unittest.TestCase):
extension = 'alac.m4a'
audio_properties = {
'length': 1.0,
'bitrate': 21830,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
# 'format': 'ALAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class MusepackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'mpc'
audio_properties = {
'length': 1.0,
'bitrate': 24023,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'Musepack',
'samplerate': 44100,
'bitdepth': 0,
'channels': 2,
}
class WMATest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'wma'
audio_properties = {
'length': 1.0,
'bitrate': 128000,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'Windows Media',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_write_genre_list_get_first(self):
# WMA does not preserve list order
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIn(mediafile.genre, [u'one', u'two'])
def test_read_pure_tags(self):
mediafile = self._mediafile_fixture('pure')
self.assertEqual(mediafile.comments, u'the comments')
self.assertEqual(mediafile.title, u'the title')
self.assertEqual(mediafile.artist, u'the artist')
class OggTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ogg'
audio_properties = {
'length': 1.0,
'bitrate': 48000,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'OGG',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_read_date_from_year_tag(self):
mediafile = self._mediafile_fixture('year')
self.assertEqual(mediafile.year, 2000)
self.assertEqual(mediafile.date, datetime.date(2000, 1, 1))
def test_write_date_to_year_tag(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2000
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.mgfile['YEAR'], [u'2000'])
def test_legacy_coverart_tag(self):
mediafile = self._mediafile_fixture('coverart')
self.assertTrue('coverart' in mediafile.mgfile)
self.assertEqual(mediafile.art, self.png_data)
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertFalse('coverart' in mediafile.mgfile)
def test_date_tag_with_slashes(self):
mediafile = self._mediafile_fixture('date_with_slashes')
self.assertEqual(mediafile.year, 2005)
self.assertEqual(mediafile.month, 6)
self.assertEqual(mediafile.day, 5)
class FlacTest(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'flac'
audio_properties = {
'length': 1.0,
'bitrate': 108688,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'FLAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class ApeTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ape'
audio_properties = {
'length': 1.0,
'bitrate': 112608,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'APE',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class WavpackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'wv'
audio_properties = {
'length': 1.0,
'bitrate': 109312,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'WavPack',
'samplerate': 44100,
'bitdepth': 16 if mutagen.version >= (1, 45, 0) else 0,
'channels': 1,
}
class OpusTest(ReadWriteTestBase, unittest.TestCase):
extension = 'opus'
audio_properties = {
'length': 1.0,
'bitrate': 66792,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'Opus',
'samplerate': 48000,
'bitdepth': 0,
'channels': 1,
}
class AIFFTest(ReadWriteTestBase, unittest.TestCase):
extension = 'aiff'
audio_properties = {
'length': 1.0,
'bitrate': 705600,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'AIFF',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class WAVETest(ReadWriteTestBase, unittest.TestCase):
extension = 'wav'
audio_properties = {
'length': 1.0,
'bitrate': 88200,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'WAVE',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
full_initial_tags = {
'title': u'full',
'artist': u'the artist',
'album': u'the album',
'genre': u'the genre',
'track': 2,
'tracktotal': 3,
}
tag_fields = [
'title',
'artist',
'album',
'genre',
'track',
'original_year',
'original_month',
'original_day',
'original_date',
]
# Only a small subset of fields are supported by LIST/INFO
# metadata format in WAVE, so some fields have been removed
# from the inherited test cases below. Concerned fields are
# commented above each test case.
# Missing fields: disc, disctotal
def test_write_counters_without_total(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.track, 2)
self.assertEqual(mediafile.tracktotal, 3)
# Missing fields: date, year
def test_delete_year(self):
mediafile = self._mediafile_fixture('full')
self.assertIsNotNone(mediafile.original_year)
delattr(mediafile, 'original_year')
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNone(mediafile.original_year)
# Missing fields: disctotal
def test_delete_packed_total(self):
mediafile = self._mediafile_fixture('full')
delattr(mediafile, 'tracktotal')
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.track, self.full_initial_tags['track'])
# Check whether we have a Mutagen version with DSF support. We can
# remove this once we require a version that includes the feature.
try:
import mutagen.dsf # noqa
except ImportError:
HAVE_DSF = False
else:
HAVE_DSF = True
@unittest.skipIf(not HAVE_DSF, "Mutagen does not have DSF support")
class DSFTest(ReadWriteTestBase, unittest.TestCase):
extension = 'dsf'
audio_properties = {
'length': 0.01,
'bitrate': 11289600,
'bitrate_mode': '',
'encoder_info': '',
'encoder_settings': '',
'format': u'DSD Stream File',
'samplerate': 5644800,
'bitdepth': 1,
'channels': 2,
}
class MediaFieldTest(unittest.TestCase):
def test_properties_from_fields(self):
path = os.path.join(_common.RSRC, b'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.fields():
self.assertTrue(hasattr(mediafile, field))
def test_properties_from_readable_fields(self):
path = os.path.join(_common.RSRC, b'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.readable_fields():
self.assertTrue(hasattr(mediafile, field))
def test_known_fields(self):
fields = list(ReadWriteTestBase.tag_fields)
fields.extend(
('encoder', 'images', 'genres', 'albumtype', 'artists',
'albumartists', 'url', 'mb_artistids', 'mb_albumartistids')
)
assertCountEqual(self, MediaFile.fields(), fields)
def test_fields_in_readable_fields(self):
readable = MediaFile.readable_fields()
for field in MediaFile.fields():
self.assertIn(field, readable)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
#! /usr/bin/env python
"""DBF accessing helpers.
FIXME: more documentation needed
Examples:
Create new table, setup structure, add records:
dbf = Dbf(filename, new=True)
dbf.addField(
("NAME", "C", 15),
("SURNAME", "C", 25),
("INITIALS", "C", 10),
("BIRTHDATE", "D"),
)
for (n, s, i, b) in (
("John", "Miller", "YC", (1980, 10, 11)),
("Andy", "Larkin", "", (1980, 4, 11)),
):
rec = dbf.newRecord()
rec["NAME"] = n
rec["SURNAME"] = s
rec["INITIALS"] = i
rec["BIRTHDATE"] = b
rec.store()
dbf.close()
Open existed dbf, read some data:
dbf = Dbf(filename, True)
for rec in dbf:
for fldName in dbf.fieldNames:
print '%s:\t %s (%s)' % (fldName, rec[fldName],
type(rec[fldName]))
print
dbf.close()
"""
"""History (most recent first):
11-feb-2007 [als] export INVALID_VALUE;
Dbf: added .ignoreErrors, .INVALID_VALUE
04-jul-2006 [als] added export declaration
20-dec-2005 [yc] removed fromStream and newDbf methods:
use argument of __init__ call must be used instead;
added class fields pointing to the header and
record classes.
17-dec-2005 [yc] split to several modules; reimplemented
13-dec-2005 [yc] adapted to the changes of the `strutil` module.
13-sep-2002 [als] support FoxPro Timestamp datatype
15-nov-1999 [jjk] documentation updates, add demo
24-aug-1998 [jjk] add some encodeValue methods (not tested), other tweaks
08-jun-1998 [jjk] fix problems, add more features
20-feb-1998 [jjk] fix problems, add more features
19-feb-1998 [jjk] add create/write capabilities
18-feb-1998 [jjk] from dbfload.py
"""
__version__ = "$Revision: 1.7 $"[11:-2]
__date__ = "$Date: 2007/02/11 09:23:13 $"[7:-2]
__author__ = "Jeff Kunce <[email protected]>"
__all__ = ["Dbf"]
from . import header
from . import record
from utils import INVALID_VALUE
class Dbf(object):
"""DBF accessor.
FIXME:
docs and examples needed (dont' forget to tell
about problems adding new fields on the fly)
Implementation notes:
``_new`` field is used to indicate whether this is
a new data table. `addField` could be used only for
the new tables! If at least one record was appended
to the table it's structure couldn't be changed.
"""
__slots__ = ("name", "header", "stream",
"_changed", "_new", "_ignore_errors")
HeaderClass = header.DbfHeader
RecordClass = record.DbfRecord
INVALID_VALUE = INVALID_VALUE
# initialization and creation helpers
def __init__(self, f, readOnly=False, new=False, ignoreErrors=False):
"""Initialize instance.
Arguments:
f:
Filename or file-like object.
new:
True if new data table must be created. Assume
data table exists if this argument is False.
readOnly:
if ``f`` argument is a string file will
be opend in read-only mode; in other cases
this argument is ignored. This argument is ignored
even if ``new`` argument is True.
headerObj:
`header.DbfHeader` instance or None. If this argument
is None, new empty header will be used with the
all fields set by default.
ignoreErrors:
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
"""
if isinstance(f, basestring):
# a filename
self.name = f
if new:
# new table (table file must be
# created or opened and truncated)
self.stream = file(f, "w+b")
else:
# tabe file must exist
self.stream = file(f, ("r+b", "rb")[bool(readOnly)])
else:
# a stream
self.name = getattr(f, "name", "")
self.stream = f
if new:
# if this is a new table, header will be empty
self.header = self.HeaderClass()
else:
# or instantiated using stream
self.header = self.HeaderClass.fromStream(self.stream)
self.ignoreErrors = ignoreErrors
self._new = bool(new)
self._changed = False
# properties
closed = property(lambda self: self.stream.closed)
recordCount = property(lambda self: self.header.recordCount)
fieldNames = property(
lambda self: [_fld.name for _fld in self.header.fields])
fieldDefs = property(lambda self: self.header.fields)
changed = property(lambda self: self._changed or self.header.changed)
def ignoreErrors(self, value):
"""Update `ignoreErrors` flag on the header object and self"""
self.header.ignoreErrors = self._ignore_errors = bool(value)
ignoreErrors = property(
lambda self: self._ignore_errors,
ignoreErrors,
doc="""Error processing mode for DBF field value conversion
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
""")
# protected methods
def _fixIndex(self, index):
"""Return fixed index.
This method fails if index isn't a numeric object
(long or int). Or index isn't in a valid range
(less or equal to the number of records in the db).
If ``index`` is a negative number, it will be
treated as a negative indexes for list objects.
Return:
Return value is numeric object maning valid index.
"""
if not isinstance(index, (int, long)):
raise TypeError("Index must be a numeric object")
if index < 0:
# index from the right side
# fix it to the left-side index
index += len(self) + 1
if index >= len(self):
raise IndexError("Record index out of range")
return index
# iterface methods
def close(self):
self.flush()
self.stream.close()
def flush(self):
"""Flush data to the associated stream."""
if self.changed:
self.header.setCurrentDate()
self.header.write(self.stream)
self.stream.flush()
self._changed = False
def indexOfFieldName(self, name):
"""Index of field named ``name``."""
# FIXME: move this to header class
return self.header.fields.index(name)
def newRecord(self):
"""Return new record, which belong to this table."""
return self.RecordClass(self)
def append(self, record):
"""Append ``record`` to the database."""
record.index = self.header.recordCount
record._write()
self.header.recordCount += 1
self._changed = True
self._new = False
def addField(self, *defs):
"""Add field definitions.
For more information see `header.DbfHeader.addField`.
"""
if self._new:
self.header.addField(*defs)
else:
raise TypeError("At least one record was added, "
"structure can't be changed")
# 'magic' methods (representation and sequence interface)
def __repr__(self):
return "Dbf stream '%s'\n" % self.stream + repr(self.header)
def __len__(self):
"""Return number of records."""
return self.recordCount
def __getitem__(self, index):
"""Return `DbfRecord` instance."""
return self.RecordClass.fromStream(self, self._fixIndex(index))
def __setitem__(self, index, record):
"""Write `DbfRecord` instance to the stream."""
record.index = self._fixIndex(index)
record._write()
self._changed = True
self._new = False
# def __del__(self):
# """Flush stream upon deletion of the object."""
# self.flush()
def demo_read(filename):
_dbf = Dbf(filename, True)
for _rec in _dbf:
print
print(repr(_rec))
_dbf.close()
def demo_create(filename):
_dbf = Dbf(filename, new=True)
_dbf.addField(
("NAME", "C", 15),
("SURNAME", "C", 25),
("INITIALS", "C", 10),
("BIRTHDATE", "D"),
)
for (_n, _s, _i, _b) in (
("John", "Miller", "YC", (1981, 1, 2)),
("Andy", "Larkin", "AL", (1982, 3, 4)),
("Bill", "Clinth", "", (1983, 5, 6)),
("Bobb", "McNail", "", (1984, 7, 8)),
):
_rec = _dbf.newRecord()
_rec["NAME"] = _n
_rec["SURNAME"] = _s
_rec["INITIALS"] = _i
_rec["BIRTHDATE"] = _b
_rec.store()
print(repr(_dbf))
_dbf.close()
if __name__ == '__main__':
import sys
_name = len(sys.argv) > 1 and sys.argv[1] or "county.dbf"
demo_create(_name)
demo_read(_name)
# vim: set et sw=4 sts=4 :
|
|
"""Helper objects for maintaining PLM state and interfaces."""
import logging
import binascii
import insteonplm.utils
__all__ = "Address"
_LOGGER = logging.getLogger(__name__)
class Address:
"""Datatype definition for INSTEON device address handling."""
def __init__(self, addr):
"""Create an Address object."""
self._is_x10 = False
self.addr = self._normalize(addr)
def __repr__(self):
"""Representation of the Address object."""
return self.id
def __str__(self):
"""Return the Address object as a string."""
return self.id
def __eq__(self, other):
"""Test for equality."""
equals = False
if hasattr(other, "addr"):
equals = self.addr == other.addr
return equals
def __ne__(self, other):
"""Test for not equals."""
not_equals = True
if hasattr(other, "addr"):
not_equals = self.addr != other.addr
return not_equals
def __lt__(self, other):
"""Test for less than."""
if isinstance(other, Address):
return str(self) < str(other)
raise TypeError
def __gt__(self, other):
"""Test for greater than."""
if isinstance(other, Address):
return str(self) > str(other)
raise TypeError
def __hash__(self):
"""Create a hash code for the Address object."""
return hash(self.id)
def matches_pattern(self, other):
"""Test Address object matches the pattern of another object."""
matches = False
if hasattr(other, "addr"):
if self.addr is None or other.addr is None:
matches = True
else:
matches = self.addr == other.addr
return matches
def _normalize(self, addr):
"""Take any format of address and turn it into a hex string."""
normalize = None
if isinstance(addr, Address):
normalize = addr.addr
self._is_x10 = addr.is_x10
elif isinstance(addr, bytearray):
normalize = binascii.unhexlify(binascii.hexlify(addr).decode())
elif isinstance(addr, bytes):
normalize = addr
elif isinstance(addr, str):
addr = addr.replace(".", "")
addr = addr[0:6]
if addr[0:3].lower() == "x10":
x10_addr = Address.x10(addr[3:4], int(addr[4:6]))
normalize = x10_addr.addr
self._is_x10 = True
else:
normalize = binascii.unhexlify(addr.lower())
elif addr is None:
normalize = None
else:
_LOGGER.warning(
"Address class init with unknown type %s: %r", type(addr), addr
)
return normalize
@property
def human(self):
"""Emit the address in human-readible format (AA.BB.CC)."""
addrstr = "00.00.00"
if self.addr:
if self._is_x10:
housecode_byte = self.addr[1]
housecode = insteonplm.utils.byte_to_housecode(housecode_byte)
unitcode_byte = self.addr[2]
unitcode = insteonplm.utils.byte_to_unitcode(unitcode_byte)
addrstr = "X10.{}.{:02d}".format(housecode.upper(), unitcode)
else:
addrstr = "{}.{}.{}".format(
self.hex[0:2], self.hex[2:4], self.hex[4:6]
).upper()
return addrstr
@property
def hex(self):
"""Emit the address in bare hex format (aabbcc)."""
addrstr = "000000"
if self.addr is not None:
addrstr = binascii.hexlify(self.addr).decode()
return addrstr
@property
def bytes(self):
"""Emit the address in bytes format."""
addrbyte = b"\x00\x00\x00"
if self.addr is not None:
addrbyte = self.addr
return addrbyte
@property
def id(self):
"""Return the ID of the device address."""
dev_id = ""
if self._is_x10:
dev_id = "x10{}{:02d}".format(self.x10_housecode, self.x10_unitcode)
else:
dev_id = self.hex
return dev_id
@property
def is_x10(self):
"""Test if this is an X10 address."""
return self._is_x10
@is_x10.setter
def is_x10(self, val: bool):
"""Set if this is an X10 address."""
self._is_x10 = val
@property
def x10_housecode_byte(self):
"""Emit the X10 house code byte value."""
housecode = None
if self.is_x10:
housecode = self.addr[1]
return housecode
@property
def x10_unitcode_byte(self):
"""Emit the X10 unit code byte value."""
unitcode = None
if self.is_x10:
unitcode = self.addr[2]
return unitcode
@property
def x10_housecode(self):
"""Emit the X10 house code."""
housecode = None
if self.is_x10:
housecode = insteonplm.utils.byte_to_housecode(self.addr[1])
return housecode
@property
def x10_unitcode(self):
"""Emit the X10 unit code."""
unitcode = None
if self.is_x10:
unitcode = insteonplm.utils.byte_to_unitcode(self.addr[2])
return unitcode
@classmethod
def x10(cls, housecode, unitcode):
"""Create an X10 device address."""
if housecode.lower() in [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
]:
byte_housecode = insteonplm.utils.housecode_to_byte(housecode)
else:
if isinstance(housecode, str):
_LOGGER.error("X10 house code error: %s", housecode)
else:
_LOGGER.error("X10 house code is not a string")
raise ValueError
# 20, 21 and 22 for All Units Off, All Lights On and All Lights Off
# 'fake' units
if unitcode in range(1, 17) or unitcode in range(20, 23):
byte_unitcode = insteonplm.utils.unitcode_to_byte(unitcode)
else:
if isinstance(unitcode, int):
_LOGGER.error("X10 unit code error: %d", unitcode)
else:
_LOGGER.error("X10 unit code is not an integer 1 - 16")
raise ValueError
addr = Address(bytearray([0x00, byte_housecode, byte_unitcode]))
addr.is_x10 = True
return addr
|
|
"""
visicon
copyright info:
name='visicon',
version='0.1',
description='IP address visualisation',
author='Antisense',
author_email='[email protected]',
url='http://code.google.com/p/visicon',
=======
An attempt at visualising a user based on their IP address. Visicon
would optimally be a direct port of Visiglyph [1]_ (a variation on
Identicon [2]_), but may not ever be, hence the different name, a
portmanteau of the two implementations.
Prerequisites: ::
- Python Imaging Library (PIL) >= 1.1.6
.. [1] http://digitalconsumption.com/forum/Visiglyphs-for-IP-visualisation
.. [2] http://www.docuverse.com/blog/donpark/2007/01/18/visual-security-9-block-ip-identification
"""
from hashlib import md5
# import os
# import random
# import sys
# Try to import PIL in either of the two ways it can be installed.
try:
from PIL import Image, ImageDraw
except ImportError:
import Image
import ImageDraw
T = TRANSPARENT = -1
class Visicon(object):
"""
Visicon
=======
The Visicon class itself. This is versatile in that it isn't
restricted to IP addresses, but could technically be used with
any sort of string.
The class uses md5 (or an md5-like algorithm) to encrypt the
string, and then generates a unique image from the first four
bytes of the hash when prompted to (upon calling
``Visicon.render``).
"""
resize = 0
min_size = 24
def __init__(self, string, seed, size=24, background=0xffffff):
"""
Visicon.__init__(self, string, seed, size=24) -> Visicon
Initialises the Visicon, storing everything that could
possibly be interpreted from the supplied ``string`` and
``seed``.
"""
self.string = string
self.seed = seed
self.hash = md5(self.string + self.seed).hexdigest()
self.size = size
dec = lambda hex: int(hex, 16)
self.block_one = dec(self.hash[0])
self.block_two = dec(self.hash[1])
self.block_centre = dec(self.hash[2]) & 7
self.rotate_one = dec(self.hash[3]) & 3
self.rotate_two = dec(self.hash[4]) & 3
self.fg_colour = (dec(self.hash[5:7]) & 239, dec(
self.hash[7:9]) & 239, dec(self.hash[9:11]) & 239)
self.fg_colour2 = (dec(self.hash[11:13]) & 239, dec(
self.hash[13:15]) & 239, dec(self.hash[15:17]) & 239)
self.background = background
if self.size < self.min_size:
self.resize = self.size
self.size = self.min_size
self.img_size = self.size * 3
self.quarter = self.size / 4
self.quarter3 = self.quarter * 3
self.half = self.size / 2
self.third = self.size / 3
self.double = self.size * 2
self.centre = self.img_size / 2
if self.background is not TRANSPARENT:
self.image = Image.new(
'RGB', (self.img_size,) * 2, color=self.background)
else:
self.image = Image.new('RGBA', (self.img_size,) * 2)
def draw_image(self):
"""
draw(self) -> Image.Image
Draws the Visicon, returning the result as an
``Image.Image`` instance.
"""
self.draw = ImageDraw.Draw(self.image)
self.draw_corners()
self.draw_sides()
self.draw_centre()
return self.image.resize((self.size,) * 2, Image.ANTIALIAS)
def draw_corners(self):
"""
draw_corners(self) -> None
Draws the corners of the image.
"""
corners = (
{'x': 0, 'y': 0},
{'x': 0, 'y': self.double},
{'x': self.double, 'y': self.double},
{'x': self.double, 'y': 0}
)
for n, corner in enumerate(corners):
rotation = self.rotate_one + n
self.draw_glyph(self.block_one, rotation, corner, self.fg_colour)
def draw_centre(self):
"""
draw_centre(self) -> None
Draws the centre part of the image.
"""
self.draw_glyph(self.block_centre, 0, {'x': self.size, 'y':
self.size}, self.fg_colour, False)
def draw_sides(self):
"""
draw_sides(self) -> None
Draws the sides of the image.
"""
sides = (
{'x': self.size, 'y': 0},
{'x': 0, 'y': self.size},
{'x': self.size, 'y': self.double},
{'x': self.double, 'y': self.size}
)
for n, side in enumerate(sides):
rotation = self.rotate_two + n
self.draw_glyph(self.block_two, rotation, side, self.fg_colour2)
def draw_glyph(self, block, rotation, modifier, colour, outer=True):
"""
draw_glyph(self, block, rotation, modifier, colour,\
outer=True) -> None
Draws a glyph on the image, based on the far-too-many
arguments.
"""
if outer:
if block is 1: # mountains
points = [
0, 0,
self.quarter, self.size,
self.half, 0
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
points = [
self.half, 0,
self.quarter3, self.size,
self.size, 0
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 2: # half triangle
points = [
0, 0,
self.size, 0,
0, self.size
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 3: # centre triangle
points = [
0, 0,
self.half, self.size,
self.size, 0
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 4: # half block
points = [
0, 0,
0, self.size,
self.half, self.size,
self.half, 0
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 5: # half diamond
points = [
self.quarter, 0,
0, self.half,
self.quarter, self.size,
self.half, self.half
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 6: # spike
points = [
0, 0,
self.size, self.half,
self.size, self.size,
self.half, self.size
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 7: # quarter triangle
points = [
0, 0,
self.half, self.size,
0, self.size
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 8: # diag triangle
points = [
0, 0,
self.size, self.half,
self.half, self.size
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 9: # centered mini triangle
points = [
self.quarter, self.quarter,
self.quarter3, self.quarter,
self.quarter, self.quarter3
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 10: # diag mountains
points = [
0, 0,
self.half, 0,
self.half, self.half
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
points = [
self.half, self.half,
self.size, self.half,
self.size, self.size
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 11: # quarter block
points = [
0, 0,
0, self.half,
self.half, self.half,
self.half, 0
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 12: # point out triangle
points = [
0, self.half,
self.half, self.size,
self.size, self.half
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 13: # point in triangle
points = [
0, 0,
self.half, self.half,
self.size, 0
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 14: # diag point in
points = [
self.half, self.half,
0, self.half,
self.half, self.size
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 15: # diag point out
points = [
0, 0,
self.half, 0,
0, self.half
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
else: # diag side point out
points = [
0, 0,
self.half, 0,
self.half, self.half
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
else:
if block is 1: # circle
self.draw.ellipse((
(self.centre - self.quarter3, self.centre - self.quarter3),
(self.centre + self.quarter3, self.centre + self.quarter3)
), fill=colour)
elif block is 2: # quarter square
points = [
self.quarter, self.quarter,
self.quarter, self.quarter3,
self.quarter3, self.quarter3,
self.quarter3, self.quarter
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 3: # full square
points = [
0, 0,
0, self.size,
self.size, self.size,
self.size, 0
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 4: # quarter diamond
points = [
self.half, self.quarter,
self.quarter3, self.half,
self.half, self.quarter3,
self.quarter, self.half
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
elif block is 5: # diamond
points = [
self.half, 0,
0, self.half,
self.half, self.size,
self.size, self.half
]
points = self.rotate_points(points, rotation, modifier)
self.draw.polygon(points, fill=colour)
def rotate_points(self, points, rotation, modifier):
"""
rotate_points(self, points, rotation, modifier) -> tuple
Rotate a set of points out from set modifiers.
"""
rotation = rotation % 4
if rotation is 1:
n = 0
while n < len(points):
tmp1 = n
val1 = points[tmp1]
tmp2 = n + 1
val2 = points[tmp2]
points[tmp1] = val2 + modifier['x']
points[tmp2] = self.size - val1 + modifier['y']
n += 2
elif rotation is 2:
n = 0
while n < len(points):
tmp1 = n
val1 = points[tmp1]
tmp2 = n + 1
val2 = points[tmp2]
points[tmp1] = self.size - val1 + modifier['x']
points[tmp2] = self.size - val2 + modifier['y']
n += 2
elif rotation is 3:
n = 0
while n < len(points):
tmp1 = n
val1 = points[tmp1]
tmp2 = n + 1
val2 = points[tmp2]
points[tmp1] = self.size - val2 + modifier['x']
points[tmp2] = val1 + modifier['y']
n += 2
else:
n = 0
while n < len(points):
tmp1 = n
val1 = points[tmp1]
tmp2 = n + 1
val2 = points[tmp2]
points[tmp1] = val1 + modifier['x']
points[tmp2] = val2 + modifier['y']
n += 2
return points
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import MySQLdb
from MySQLdb.times import DateTimeDeltaType
from markdown2 import UnicodeWithAttrs
import warnings
import datetime
import frappe
import frappe.defaults
import frappe.async
import re
import frappe.model.meta
from frappe.utils import now, get_datetime, cstr
from frappe import _
from types import StringType, UnicodeType
class Database:
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default = 0):
self.host = host or frappe.conf.db_host or 'localhost'
self.user = user or frappe.conf.db_name
self._conn = None
if ac_name:
self.user = self.get_db_login(ac_name) or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
def get_db_login(self, ac_name):
return ac_name
def connect(self):
"""Connects to a database as set in `site_config.json`."""
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
self._conn = MySQLdb.connect(user=self.user, host=self.host, passwd=self.password,
use_unicode=True, charset='utf8')
self._conn.converter[246]=float
self._conn.converter[12]=get_datetime
self._conn.encoders[UnicodeWithAttrs] = self._conn.encoders[UnicodeType]
self._conn.encoders[DateTimeDeltaType] = self._conn.encoders[StringType]
MYSQL_OPTION_MULTI_STATEMENTS_OFF = 1
self._conn.set_server_option(MYSQL_OPTION_MULTI_STATEMENTS_OFF)
self._cursor = self._conn.cursor()
if self.user != 'root':
self.use(self.user)
frappe.local.rollback_observers = []
def use(self, db_name):
"""`USE` db_name."""
self._conn.select_db(db_name)
self.cur_db_name = db_name
def validate_query(self, q):
"""Throw exception for dangerous queries: `ALTER`, `DROP`, `TRUNCATE` if not `Administrator`."""
cmd = q.strip().lower().split()[0]
if cmd in ['alter', 'drop', 'truncate'] and frappe.session.user != 'Administrator':
frappe.throw(_("Not permitted"), frappe.PermissionError)
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None):
"""Execute a SQL query and fetch all rows.
:param query: SQL query.
:param values: List / dict of values to be escaped and substituted in the query.
:param as_dict: Return as a dictionary.
:param as_list: Always return as a list.
:param formatted: Format values like date etc.
:param debug: Print query and `EXPLAIN` in debug log.
:param ignore_ddl: Catch exception if table, column missing.
:param as_utf8: Encode values as UTF 8.
:param auto_commit: Commit after executing the query.
:param update: Update this dict to all rows (if returned `as_dict`).
Examples:
# return customer names as dicts
frappe.db.sql("select name from tabCustomer", as_dict=True)
# return names beginning with a
frappe.db.sql("select name from tabCustomer where name like %s", "a%")
# values as dict
frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s",
{"name": "a%", "owner":"[email protected]"})
"""
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
if debug:
try:
self.explain_query(query, values)
frappe.errprint(query % values)
except TypeError:
frappe.errprint([query, values])
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log("with values:")
frappe.log(values)
frappe.log(">>>>")
self._cursor.execute(query, values)
else:
if debug:
self.explain_query(query)
frappe.errprint(query)
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log(">>>>")
self._cursor.execute(query)
except Exception, e:
# ignore data definition errors
if ignore_ddl and e.args[0] in (1146,1054,1091):
pass
# NOTE: causes deadlock
# elif e.args[0]==2006:
# # mysql has gone away
# self.connect()
# return self.sql(query=query, values=values,
# as_dict=as_dict, as_list=as_list, formatted=formatted,
# debug=debug, ignore_ddl=ignore_ddl, as_utf8=as_utf8,
# auto_commit=auto_commit, update=update)
else:
raise
if auto_commit: self.commit()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def explain_query(self, query, values=None):
"""Print `EXPLAIN` in error log."""
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
"""Return data as list of single elements (first column).
Example:
# doctypes = ["DocType", "DocField", "User", ...]
doctypes = frappe.db.sql_list("select name from DocType")
"""
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
"""Commit and execute a query. DDL (Data Definition Language) queries that alter schema
autocommit in MariaDB."""
self.commit()
self.sql(query, debug=debug)
def check_transaction_status(self, query):
"""Raises exception if more than 20,000 `INSERT`, `UPDATE` queries are
executed in one transaction. This is to ensure that writes are always flushed otherwise this
could cause the system to hang."""
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin", "truncate"]:
raise Exception, 'This statement can cause implicit commit'
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ('update', 'insert', 'delete'):
self.transaction_writes += 1
if self.transaction_writes > 200000:
if self.auto_commit_on_many_writes:
frappe.db.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
"""Internal. Converts results to dict."""
result = self._cursor.fetchall()
ret = []
needs_formatting = self.needs_formatting(result, formatted)
for r in result:
row_dict = frappe._dict({})
for i in range(len(r)):
if needs_formatting:
val = self.convert_to_simple_type(r[i], formatted)
else:
val = r[i]
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
row_dict[self._cursor.description[i][0]] = val
ret.append(row_dict)
return ret
def needs_formatting(self, result, formatted):
"""Returns true if the first row in the result has a Date, Datetime, Long Int."""
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
"""Returns result metadata."""
return self._cursor.description
def convert_to_simple_type(self, v, formatted=0):
"""Format date, time, longint values."""
return v
from frappe.utils import formatdate, fmt_money
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
if isinstance(v, datetime.date):
v = unicode(v)
if formatted:
v = formatdate(v)
# time
elif isinstance(v, (datetime.timedelta, datetime.datetime)):
v = unicode(v)
# long
elif isinstance(v, long):
v=int(v)
# convert to strings... (if formatted)
if formatted:
if isinstance(v, float):
v=fmt_money(v)
elif isinstance(v, int):
v = unicode(v)
return v
def convert_to_lists(self, res, formatted=0, as_utf8=0):
"""Convert tuple output to lists (internal)."""
nres = []
needs_formatting = self.needs_formatting(res, formatted)
for r in res:
nr = []
for c in r:
if needs_formatting:
val = self.convert_to_simple_type(c, formatted)
else:
val = c
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def convert_to_utf8(self, res, formatted=0):
"""Encode result as UTF-8."""
nres = []
for r in res:
nr = []
for c in r:
if type(c) is unicode:
c = c.encode('utf-8')
nr.append(self.convert_to_simple_type(c, formatted))
nres.append(nr)
return nres
def build_conditions(self, filters):
"""Convert filters sent as dict, lists to SQL conditions. filter's key
is passed by map function, build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
conditions = []
values = {}
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
_rhs = " %(" + key + ")s"
value = filters.get(key)
values[key] = value
if isinstance(value, (list, tuple)):
# value is a tuble like ("!=", 0)
_operator = value[0]
values[key] = value[1]
if isinstance(value[1], (tuple, list)):
# value is a list in tuple ("in", ("A", "B"))
inner_list = []
for i, v in enumerate(value[1]):
inner_key = "{0}_{1}".format(key, i)
values[inner_key] = v
inner_list.append("%({0})s".format(inner_key))
_rhs = " ({0})".format(", ".join(inner_list))
del values[key]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like", "in", "not in", "not like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
condition = "ifnull(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + _rhs
else:
condition = "`" + key + "` " + _operator + _rhs
conditions.append(condition)
if isinstance(filters, basestring):
filters = { "name": filters }
for f in filters:
_build_condition(f)
return " and ".join(conditions), values
def get(self, doctype, filters=None, as_dict=True, cache=False):
"""Returns `get_value` with fieldname='*'"""
return self.get_value(doctype, filters, "*", as_dict=as_dict, cache=cache)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, cache=False):
"""Returns a document property or list of properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
frappe.db.get_value("Customer", {"name": ("like a%")})
# return last login of **User** `[email protected]`
frappe.db.get_value("User", "[email protected]", "last_login")
last_login, last_ip = frappe.db.get_value("User", "[email protected]",
["last_login", "last_ip"])
# returns default date_format
frappe.db.get_value("System Settings", None, "date_format")
"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug, order_by, cache=cache)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, update=None, cache=False):
"""Returns multiple document properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
customers = frappe.db.get_values("Customer", {"name": ("like a%")})
# return last login of **User** `[email protected]`
user = frappe.db.get_values("User", "[email protected]", "*")[0]
"""
out = None
if cache and isinstance(filters, basestring) and \
(doctype, filters, fieldname) in self.value_cache:
return self.value_cache[(doctype, filters, fieldname)]
if isinstance(filters, list):
out = self._get_value_for_many_names(doctype, filters, fieldname, debug=debug)
else:
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, basestring):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
out = self._get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update)
except Exception, e:
if ignore and e.args[0] in (1146, 1054):
# table or column not found, return None
out = None
elif (not ignore) and e.args[0]==1146:
# table not found, look in singles
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
else:
raise
else:
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if cache and isinstance(filters, basestring):
self.value_cache[(doctype, filters, fieldname)] = out
return out
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
"""Get values from `tabSingles` (Single DocTypes) (internal).
:param fields: List of fields,
:param filters: Filters (dict).
:param doctype: DocType name.
"""
# TODO
# if not frappe.model.meta.is_single(doctype):
# raise frappe.DoesNotExistError("DocType", doctype)
if fields=="*" or isinstance(filters, dict):
# check if single doc matches with filters
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(lambda d: values.get(d), fields)]
else:
r = self.sql("""select field, value
from tabSingles where field in (%s) and doctype=%s""" \
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_singles_dict(self, doctype):
"""Get Single DocType as dict.
:param doctype: DocType of the single object whose value is requested
Example:
# Get coulmn and value of the single doctype Accounts Settings
account_settings = frappe.db.get_singles_dict("Accounts Settings")
"""
return frappe._dict(self.sql("""select field, value from
tabSingles where doctype=%s""", doctype))
def get_all(self, *args, **kwargs):
return frappe.get_all(*args, **kwargs)
def get_list(self, *args, **kwargs):
return frappe.get_list(*args, **kwargs)
def get_single_value(self, doctype, fieldname, cache=False):
"""Get property of Single DocType. Cache locally by default
:param doctype: DocType of the single object whose value is requested
:param fieldname: `fieldname` of the property whose value is requested
Example:
# Get the default value of the company from the Global Defaults doctype.
company = frappe.db.get_single_value('Global Defaults', 'default_company')
"""
value = self.value_cache.setdefault(doctype, {}).get(fieldname)
if value:
return value
val = self.sql("""select value from
tabSingles where doctype=%s and field=%s""", (doctype, fieldname))
val = val[0][0] if val else None
if val=="0" or val=="1":
# check type
val = int(val)
self.value_cache[doctype][fieldname] = val
return val
def get_singles_value(self, *args, **kwargs):
"""Alias for get_single_value"""
return self.get_single_value(*args, **kwargs)
def _get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, values = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select {0} from `tab{1}` where {2} {3}".format(fl, doctype,
conditions, order_by), values, as_dict=as_dict, debug=debug, update=update)
return r
def _get_value_for_many_names(self, doctype, names, field, debug=False):
names = filter(None, names)
if names:
return dict(self.sql("select name, `%s` from `tab%s` where name in (%s)" \
% (field, doctype, ", ".join(["%s"]*len(names))), names, debug=debug))
else:
return {}
def update(self, *args, **kwargs):
"""Update multiple values. Alias for `set_value`."""
return self.set_value(*args, **kwargs)
def set_value(self, dt, dn, field, val, modified=None, modified_by=None,
update_modified=True, debug=False):
"""Set a single value in the database, do not call the ORM triggers
but update the modified timestamp (unless specified not to).
**Warning:** this function will not call Document events and should be avoided in normal cases.
:param dt: DocType name.
:param dn: Document name.
:param field: Property / field name or dictionary of values to be updated
:param value: Value to be updated.
:param modified: Use this as the `modified` timestamp.
:param modified_by: Set this user as `modified_by`.
:param update_modified: default True. Set as false, if you don't want to update the timestamp.
:param debug: Print the query in the developer / js console.
"""
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
to_update = {}
if update_modified:
to_update = {"modified": modified, "modified_by": modified_by}
if isinstance(field, dict):
to_update.update(field)
else:
to_update.update({field: val})
if dn and dt!=dn:
# with table
conditions, values = self.build_conditions(dn)
values.update(to_update)
set_values = []
for key in to_update:
set_values.append('`{0}`=%({0})s'.format(key))
self.sql("""update `tab{0}`
set {1} where {2}""".format(dt, ', '.join(set_values), conditions),
values, debug=debug)
else:
# for singles
keys = to_update.keys()
self.sql('''
delete from tabSingles
where field in ({0}) and
doctype=%s'''.format(', '.join(['%s']*len(keys))),
keys + [dt], debug=debug)
for key, value in to_update.iteritems():
self.sql('''insert into tabSingles(doctype, field, value) values (%s, %s, %s)''',
(dt, key, value), debug=debug)
if dt in self.value_cache:
del self.value_cache[dt]
def set(self, doc, field, val):
"""Set value in document. **Avoid**"""
doc.db_set(field, val)
def touch(self, doctype, docname):
"""Update the modified timestamp of this document."""
from frappe.utils import now
modified = now()
frappe.db.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
def set_temp(self, value):
"""Set a temperory value and return a key."""
key = frappe.generate_hash()
frappe.cache().hset("temp", key, value)
return key
def get_temp(self, key):
"""Return the temperory value and delete it."""
return frappe.cache().hget("temp", key)
def set_global(self, key, val, user='__global'):
"""Save a global key value. Global values will be automatically set if they match fieldname."""
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
"""Returns a global key value."""
return self.get_default(key, user)
def set_default(self, key, val, parent="__default", parenttype=None):
"""Sets a global / user default value."""
frappe.defaults.set_default(key, val, parent, parenttype)
def add_default(self, key, val, parent="__default", parenttype=None):
"""Append a default value for a key, there can be multiple default values for a particular key."""
frappe.defaults.add_default(key, val, parent, parenttype)
def get_default(self, key, parent="__default"):
"""Returns default value as a list if multiple or single"""
d = self.get_defaults(key, parent)
return isinstance(d, list) and d[0] or d
def get_defaults(self, key=None, parent="__default"):
"""Get all defaults"""
if key:
defaults = frappe.defaults.get_defaults(parent)
d = defaults.get(key, None)
if(not d and key != frappe.scrub(key)):
d = defaults.get(frappe.scrub(key), None)
return d
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
self.sql("start transaction")
def commit(self):
"""Commit current transaction. Calls SQL `COMMIT`."""
self.sql("commit")
frappe.local.rollback_observers = []
self.flush_realtime_log()
def flush_realtime_log(self):
for args in frappe.local.realtime_log:
frappe.async.emit_via_redis(*args)
frappe.local.realtime_log = []
def rollback(self):
"""`ROLLBACK` current transaction."""
self.sql("rollback")
self.begin()
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
"""Return true of field exists."""
return self.sql("select name from tabDocField where fieldname=%s and parent=%s", (dt, fn))
def table_exists(self, tablename):
"""Returns True if table exists."""
return ("tab" + tablename) in self.get_tables()
def get_tables(self):
return [d[0] for d in self.sql("show tables")]
def a_row_exists(self, doctype):
"""Returns True if atleast one row exists."""
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None):
"""Returns true if document exists.
:param dt: DocType name.
:param dn: Document name or filter dict."""
if isinstance(dt, basestring):
if dt!="DocType" and dt==dn:
return True # single always exists (!)
try:
return self.get_value(dt, dn, "name")
except:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append('`%s` = "%s"' % (d, cstr(dt[d]).replace('"', '\"')))
return self.sql('select name from `tab%s` where %s' % \
(dt['doctype'], " and ".join(conditions)))
except:
return None
def count(self, dt, filters=None, debug=False):
"""Returns `COUNT(*)` for given DocType and filters."""
if filters:
conditions, filters = self.build_conditions(filters)
return frappe.db.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
else:
return frappe.db.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
def get_creation_count(self, doctype, minutes):
"""Get count of records created in the last x minutes"""
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return frappe.db.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_table_columns(self, doctype):
"""Returns list of column names from given doctype."""
return [r[0] for r in self.sql("DESC `tab%s`" % doctype)]
def has_column(self, doctype, column):
"""Returns True if column exists in database."""
return column in self.get_table_columns(doctype)
def add_index(self, doctype, fields, index_name=None):
"""Creates an index with given fields if not already created.
Index name will be `fieldname1_fieldname2_index`"""
if not index_name:
index_name = "_".join(fields) + "_index"
# remove index length if present e.g. (10) from index name
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
if not frappe.db.sql("""show index from `tab%s` where Key_name="%s" """ % (doctype, index_name)):
frappe.db.commit()
frappe.db.sql("""alter table `tab%s`
add index `%s`(%s)""" % (doctype, index_name, ", ".join(fields)))
def add_unique(self, doctype, fields, constraint_name=None):
if isinstance(fields, basestring):
fields = [fields]
if not constraint_name:
constraint_name = "unique_" + "_".join(fields)
if not frappe.db.sql("""select CONSTRAINT_NAME from information_schema.TABLE_CONSTRAINTS
where table_name=%s and constraint_type='UNIQUE' and CONSTRAINT_NAME=%s""",
('tab' + doctype, constraint_name)):
frappe.db.commit()
frappe.db.sql("""alter table `tab%s`
add unique `%s`(%s)""" % (doctype, constraint_name, ", ".join(fields)))
def get_system_setting(self, key):
def _load_system_settings():
return self.get_singles_dict("System Settings")
return frappe.cache().get_value("system_settings", _load_system_settings).get(key)
def close(self):
"""Close database connection."""
if self._conn:
self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
def escape(self, s, percent=True):
"""Excape quotes and percent in given string."""
if isinstance(s, unicode):
s = (s or "").encode("utf-8")
s = unicode(MySQLdb.escape_string(s), "utf-8").replace("`", "\\`")
# NOTE separating % escape, because % escape should only be done when using LIKE operator
# or when you use python format string to generate query that already has a %s
# for example: sql("select name from `tabUser` where name=%s and {0}".format(conditions), something)
# defaulting it to True, as this is the most frequent use case
# ideally we shouldn't have to use ESCAPE and strive to pass values via the values argument of sql
if percent:
s = s.replace("%", "%%")
return s
|
|
"""vobject module for reading vCard and vCalendar files."""
from __future__ import print_function
import copy
import codecs
import logging
import re
import six
import sys
# ------------------------------------ Python 2/3 compatibility challenges ----
# Python 3 no longer has a basestring type, so....
try:
basestring = basestring
except NameError:
basestring = (str, bytes)
# One more problem ... in python2 the str operator breaks on unicode
# objects containing non-ascii characters
try:
unicode
def str_(s):
"""
Return byte string with correct encoding
"""
if type(s) == unicode:
return s.encode('utf-8')
else:
return str(s)
except NameError:
def str_(s):
"""
Return string
"""
return s
if not isinstance(b'', type('')):
unicode_type = str
else:
unicode_type = unicode # noqa
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, unicode_type):
return value
return value.decode('utf-8')
def to_basestring(s):
"""Converts a string argument to a byte string.
If the argument is already a byte string, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(s, bytes):
return s
return s.encode('utf-8')
# ------------------------------------ Logging ---------------------------------
logger = logging.getLogger(__name__)
if not logging.getLogger().handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR) # Log errors
DEBUG = False # Don't waste time on debug calls
# ----------------------------------- Constants --------------------------------
CR = '\r'
LF = '\n'
CRLF = CR + LF
SPACE = ' '
TAB = '\t'
SPACEORTAB = SPACE + TAB
# --------------------------------- Main classes -------------------------------
class VBase(object):
"""
Base class for ContentLine and Component.
@ivar behavior:
The Behavior class associated with this object, which controls
validation, transformations, and encoding.
@ivar parentBehavior:
The object's parent's behavior, or None if no behaviored parent exists.
@ivar isNative:
Boolean describing whether this component is a Native instance.
@ivar group:
An optional group prefix, should be used only to indicate sort order in
vCards, according to spec.
Current spec: 4.0 (http://tools.ietf.org/html/rfc6350)
"""
def __init__(self, group=None, *args, **kwds):
super(VBase, self).__init__(*args, **kwds)
self.group = group
self.behavior = None
self.parentBehavior = None
self.isNative = False
def copy(self, copyit):
self.group = copyit.group
self.behavior = copyit.behavior
self.parentBehavior = copyit.parentBehavior
self.isNative = copyit.isNative
def validate(self, *args, **kwds):
"""
Call the behavior's validate method, or return True.
"""
if self.behavior:
return self.behavior.validate(self, *args, **kwds)
return True
def getChildren(self):
"""
Return an iterable containing the contents of the object.
"""
return []
def clearBehavior(self, cascade=True):
"""
Set behavior to None. Do for all descendants if cascading.
"""
self.behavior = None
if cascade:
self.transformChildrenFromNative()
def autoBehavior(self, cascade=False):
"""
Set behavior if name is in self.parentBehavior.knownChildren.
If cascade is True, unset behavior and parentBehavior for all
descendants, then recalculate behavior and parentBehavior.
"""
parentBehavior = self.parentBehavior
if parentBehavior is not None:
knownChildTup = parentBehavior.knownChildren.get(self.name, None)
if knownChildTup is not None:
behavior = getBehavior(self.name, knownChildTup[2])
if behavior is not None:
self.setBehavior(behavior, cascade)
if isinstance(self, ContentLine) and self.encoded:
self.behavior.decode(self)
elif isinstance(self, ContentLine):
self.behavior = parentBehavior.defaultBehavior
if self.encoded and self.behavior:
self.behavior.decode(self)
def setBehavior(self, behavior, cascade=True):
"""
Set behavior. If cascade is True, autoBehavior all descendants.
"""
self.behavior = behavior
if cascade:
for obj in self.getChildren():
obj.parentBehavior = behavior
obj.autoBehavior(True)
def transformToNative(self):
"""
Transform this object into a custom VBase subclass.
transformToNative should always return a representation of this object.
It may do so by modifying self in place then returning self, or by
creating a new object.
"""
if self.isNative or not self.behavior or not self.behavior.hasNative:
return self
else:
self_orig = copy.copy(self)
try:
return self.behavior.transformToNative(self)
except Exception as e:
# wrap errors in transformation in a ParseError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, ParseError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformToNative, unhandled exception on line {0}: {1}: {2}"
msg = msg.format(lineNumber, sys.exc_info()[0], sys.exc_info()[1])
msg = msg + " (" + str(self_orig) + ")"
raise ParseError(msg, lineNumber)
def transformFromNative(self):
"""
Return self transformed into a ContentLine or Component if needed.
May have side effects. If it does, transformFromNative and
transformToNative MUST have perfectly inverse side effects. Allowing
such side effects is convenient for objects whose transformations only
change a few attributes.
Note that it isn't always possible for transformFromNative to be a
perfect inverse of transformToNative, in such cases transformFromNative
should return a new object, not self after modifications.
"""
if self.isNative and self.behavior and self.behavior.hasNative:
try:
return self.behavior.transformFromNative(self)
except Exception as e:
# wrap errors in transformation in a NativeError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, NativeError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformFromNative, unhandled exception on line {0} {1}: {2}"
msg = msg.format(lineNumber, sys.exc_info()[0], sys.exc_info()[1])
raise NativeError(msg, lineNumber)
else:
return self
def transformChildrenToNative(self):
"""
Recursively replace children with their native representation.
"""
pass
def transformChildrenFromNative(self, clearBehavior=True):
"""
Recursively transform native children to vanilla representations.
"""
pass
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None):
"""
Serialize to buf if it exists, otherwise return a string.
Use self.behavior.serialize if behavior exists.
"""
if not behavior:
behavior = self.behavior
if behavior:
if DEBUG:
logger.debug("serializing {0!s} with behavior {1!s}".format(self.name, behavior))
return behavior.serialize(self, buf, lineLength, validate)
else:
if DEBUG:
logger.debug("serializing {0!s} without behavior".format(self.name))
return defaultSerialize(self, buf, lineLength)
def toVName(name, stripNum=0, upper=False):
"""
Turn a Python name into an iCalendar style name,
optionally uppercase and with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
class ContentLine(VBase):
"""
Holds one content line for formats like vCard and vCalendar.
For example::
<SUMMARY{u'param1' : [u'val1'], u'param2' : [u'val2']}Bastille Day Party>
@ivar name:
The uppercased name of the contentline.
@ivar params:
A dictionary of parameters and associated lists of values (the list may
be empty for empty parameters).
@ivar value:
The value of the contentline.
@ivar singletonparams:
A list of parameters for which it's unclear if the string represents the
parameter name or the parameter value. In vCard 2.1, "The value string
can be specified alone in those cases where the value is unambiguous".
This is crazy, but we have to deal with it.
@ivar encoded:
A boolean describing whether the data in the content line is encoded.
Generally, text read from a serialized vCard or vCalendar should be
considered encoded. Data added programmatically should not be encoded.
@ivar lineNumber:
An optional line number associated with the contentline.
"""
def __init__(self, name, params, value, group=None, encoded=False,
isNative=False, lineNumber=None, *args, **kwds):
"""
Take output from parseLine, convert params list to dictionary.
Group is used as a positional argument to match parseLine's return
"""
super(ContentLine, self).__init__(group, *args, **kwds)
self.name = name.upper()
self.encoded = encoded
self.params = {}
self.singletonparams = []
self.isNative = isNative
self.lineNumber = lineNumber
self.value = value
def updateTable(x):
if len(x) == 1:
self.singletonparams += x
else:
paramlist = self.params.setdefault(x[0].upper(), [])
paramlist.extend(x[1:])
list(map(updateTable, params))
qp = False
if 'ENCODING' in self.params:
if 'QUOTED-PRINTABLE' in self.params['ENCODING']:
qp = True
self.params['ENCODING'].remove('QUOTED-PRINTABLE')
if len(self.params['ENCODING']) == 0:
del self.params['ENCODING']
if 'QUOTED-PRINTABLE' in self.singletonparams:
qp = True
self.singletonparams.remove('QUOTED-PRINTABLE')
if qp:
if 'ENCODING' in self.params:
self.value = codecs.decode(self.value.encode("utf-8"), "quoted-printable").decode(self.params['ENCODING'])
else:
if 'CHARSET' in self.params:
self.value = codecs.decode(self.value.encode("utf-8"), "quoted-printable").decode(self.params['CHARSET'][0])
else:
self.value = codecs.decode(self.value.encode("utf-8"), "quoted-printable").decode('utf-8')
@classmethod
def duplicate(clz, copyit):
newcopy = clz('', {}, '')
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(ContentLine, self).copy(copyit)
self.name = copyit.name
self.value = copy.copy(copyit.value)
self.encoded = self.encoded
self.params = copy.copy(copyit.params)
for k, v in self.params.items():
self.params[k] = copy.copy(v)
self.singletonparams = copy.copy(copyit.singletonparams)
self.lineNumber = copyit.lineNumber
def __eq__(self, other):
try:
return (self.name == other.name) and (self.params == other.params) and (self.value == other.value)
except Exception:
return False
def __getattr__(self, name):
"""
Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
try:
if name.endswith('_param'):
return self.params[toVName(name, 6, True)][0]
elif name.endswith('_paramlist'):
return self.params[toVName(name, 10, True)]
else:
raise AttributeError(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
"""
Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name.endswith('_param'):
if type(value) == list:
self.params[toVName(name, 6, True)] = value
else:
self.params[toVName(name, 6, True)] = [value]
elif name.endswith('_paramlist'):
if type(value) == list:
self.params[toVName(name, 10, True)] = value
else:
raise VObjectError("Parameter list set to a non-list")
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name.endswith('_param'):
del self.params[toVName(name, 6, True)]
elif name.endswith('_paramlist'):
del self.params[toVName(name, 10, True)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def valueRepr(self):
"""
Transform the representation of the value
according to the behavior, if any.
"""
v = self.value
if self.behavior:
v = self.behavior.valueRepr(self)
return v
def __str__(self):
try:
return "<{0}{1}{2}>".format(self.name, self.params, self.valueRepr())
except UnicodeEncodeError as e:
return "<{0}{1}{2}>".format(self.name, self.params, self.valueRepr().encode('utf-8'))
def __repr__(self):
return self.__str__()
def __unicode__(self):
return u"<{0}{1}{2}>".format(self.name, self.params, self.valueRepr())
def prettyPrint(self, level=0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name + ":", self.valueRepr())
if self.params:
print(pre, "params for ", self.name + ':')
for k in self.params.keys():
print(pre + ' ' * tabwidth, k, self.params[k])
class Component(VBase):
"""
A complex property that can contain multiple ContentLines.
For our purposes, a component must start with a BEGIN:xxxx line and end with
END:xxxx, or have a PROFILE:xxx line if a top-level component.
@ivar contents:
A dictionary of lists of Component or ContentLine instances. The keys
are the lowercased names of child ContentLines or Components.
Note that BEGIN and END ContentLines are not included in contents.
@ivar name:
Uppercase string used to represent this Component, i.e VCARD if the
serialized object starts with BEGIN:VCARD.
@ivar useBegin:
A boolean flag determining whether BEGIN: and END: lines should
be serialized.
"""
def __init__(self, name=None, *args, **kwds):
super(Component, self).__init__(*args, **kwds)
self.contents = {}
if name:
self.name = name.upper()
self.useBegin = True
else:
self.name = ''
self.useBegin = False
self.autoBehavior()
@classmethod
def duplicate(cls, copyit):
newcopy = cls()
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(Component, self).copy(copyit)
# deep copy of contents
self.contents = {}
for key, lvalue in copyit.contents.items():
newvalue = []
for value in lvalue:
newitem = value.duplicate(value)
newvalue.append(newitem)
self.contents[key] = newvalue
self.name = copyit.name
self.useBegin = copyit.useBegin
def setProfile(self, name):
"""
Assign a PROFILE to this unnamed component.
Used by vCard, not by vCalendar.
"""
if self.name or self.useBegin:
if self.name == name:
return
raise VObjectError("This component already has a PROFILE or "
"uses BEGIN.")
self.name = name.upper()
def __getattr__(self, name):
"""
For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
# if the object is being re-created by pickle, self.contents may not
# be set, don't get into an infinite loop over the issue
if name == 'contents':
return object.__getattribute__(self, name)
try:
if name.endswith('_list'):
return self.contents[toVName(name, 5)]
else:
return self.contents[toVName(name)][0]
except KeyError:
raise AttributeError(name)
normal_attributes = ['contents', 'name', 'behavior', 'parentBehavior', 'group']
def __setattr__(self, name, value):
"""
For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name not in self.normal_attributes and name.lower() == name:
if type(value) == list:
if name.endswith('_list'):
name = name[:-5]
self.contents[toVName(name)] = value
elif name.endswith('_list'):
raise VObjectError("Component list set to a non-list")
else:
self.contents[toVName(name)] = [value]
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name not in self.normal_attributes and name.lower() == name:
if name.endswith('_list'):
del self.contents[toVName(name, 5)]
else:
del self.contents[toVName(name)]
else:
object.__delattr__(self, name)
except KeyError:
raise AttributeError(name)
def getChildValue(self, childName, default=None, childNumber=0):
"""
Return a child's value (the first, by default), or None.
"""
child = self.contents.get(toVName(childName))
if child is None:
return default
else:
return child[childNumber].value
def add(self, objOrName, group=None):
"""
Add objOrName to contents, set behavior if it can be inferred.
If objOrName is a string, create an empty component or line based on
behavior. If no behavior is found for the object, add a ContentLine.
group is an optional prefix to the name of the object (see RFC 2425).
"""
if isinstance(objOrName, VBase):
obj = objOrName
if self.behavior:
obj.parentBehavior = self.behavior
obj.autoBehavior(True)
else:
name = objOrName.upper()
try:
id = self.behavior.knownChildren[name][2]
behavior = getBehavior(name, id)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '', group)
obj.parentBehavior = self.behavior
obj.behavior = behavior
obj = obj.transformToNative()
except (KeyError, AttributeError):
obj = ContentLine(objOrName, [], '', group)
if obj.behavior is None and self.behavior is not None:
if isinstance(obj, ContentLine):
obj.behavior = self.behavior.defaultBehavior
self.contents.setdefault(obj.name.lower(), []).append(obj)
return obj
def remove(self, obj):
"""
Remove obj from contents.
"""
named = self.contents.get(obj.name.lower())
if named:
try:
named.remove(obj)
if len(named) == 0:
del self.contents[obj.name.lower()]
except ValueError:
pass
def getChildren(self):
"""
Return an iterable of all children.
"""
for objList in self.contents.values():
for obj in objList:
yield obj
def components(self):
"""
Return an iterable of all Component children.
"""
return (i for i in self.getChildren() if isinstance(i, Component))
def lines(self):
"""
Return an iterable of all ContentLine children.
"""
return (i for i in self.getChildren() if isinstance(i, ContentLine))
def sortChildKeys(self):
try:
first = [s for s in self.behavior.sortFirst if s in self.contents]
except Exception:
first = []
return first + sorted(k for k in self.contents.keys() if k not in first)
def getSortedChildren(self):
return [obj for k in self.sortChildKeys() for obj in self.contents[k]]
def setBehaviorFromVersionLine(self, versionLine):
"""
Set behavior if one matches name, versionLine.value.
"""
v = getBehavior(self.name, versionLine.value)
if v:
self.setBehavior(v)
def transformChildrenToNative(self):
"""
Recursively replace children with their native representation.
Sort to get dependency order right, like vtimezone before vevent.
"""
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for child in childArray:
child = child.transformToNative()
child.transformChildrenToNative()
def transformChildrenFromNative(self, clearBehavior=True):
"""
Recursively transform native children to vanilla representations.
"""
for childArray in self.contents.values():
for child in childArray:
child = child.transformFromNative()
child.transformChildrenFromNative(clearBehavior)
if clearBehavior:
child.behavior = None
child.parentBehavior = None
def __str__(self):
if self.name:
return "<{0}| {1}>".format(self.name, self.getSortedChildren())
else:
return u'<*unnamed*| {0}>'.format(self.getSortedChildren())
def __repr__(self):
return self.__str__()
def prettyPrint(self, level=0, tabwidth=3):
pre = ' ' * level * tabwidth
print(pre, self.name)
if isinstance(self, Component):
for line in self.getChildren():
line.prettyPrint(level + 1, tabwidth)
class VObjectError(Exception):
def __init__(self, msg, lineNumber=None):
self.msg = msg
if lineNumber is not None:
self.lineNumber = lineNumber
def __str__(self):
if hasattr(self, 'lineNumber'):
return "At line {0!s}: {1!s}".format(self.lineNumber, self.msg)
else:
return repr(self.msg)
class ParseError(VObjectError):
pass
class ValidateError(VObjectError):
pass
class NativeError(VObjectError):
pass
# --------- Parsing functions and parseLine regular expressions ----------------
patterns = {}
# Note that underscore is not legal for names, it's included because
# Lotus Notes uses it
patterns['name'] = '[a-zA-Z0-9\-_]+'
patterns['safe_char'] = '[^";:,]'
patterns['qsafe_char'] = '[^"]'
# the combined Python string replacement and regex syntax is a little confusing;
# remember that {foobar} is replaced with patterns['foobar'], so for instance
# param_value is any number of safe_chars or any number of qsaf_chars surrounded
# by double quotes.
patterns['param_value'] = ' "{qsafe_char!s} * " | {safe_char!s} * '.format(**patterns)
# get a tuple of two elements, one will be empty, the other will have the value
patterns['param_value_grouped'] = """
" ( {qsafe_char!s} * )" | ( {safe_char!s} + )
""".format(**patterns)
# get a parameter and its values, without any saved groups
patterns['param'] = r"""
; (?: {name!s} ) # parameter name
(?:
(?: = (?: {param_value!s} ) )? # 0 or more parameter values, multiple
(?: , (?: {param_value!s} ) )* # parameters are comma separated
)*
""".format(**patterns)
# get a parameter, saving groups for name and value (value still needs parsing)
patterns['params_grouped'] = r"""
; ( {name!s} )
(?: =
(
(?: (?: {param_value!s} ) )? # 0 or more parameter values, multiple
(?: , (?: {param_value!s} ) )* # parameters are comma separated
)
)?
""".format(**patterns)
# get a full content line, break it up into group, name, parameters, and value
patterns['line'] = r"""
^ ((?P<group> {name!s})\.)?(?P<name> {name!s}) # name group
(?P<params> ;?(?: {param!s} )* ) # params group (may be empty)
: (?P<value> .* )$ # value group
""".format(**patterns)
' "%(qsafe_char)s*" | %(safe_char)s* ' # what is this line?? - never assigned?
param_values_re = re.compile(patterns['param_value_grouped'], re.VERBOSE)
params_re = re.compile(patterns['params_grouped'], re.VERBOSE)
line_re = re.compile(patterns['line'], re.DOTALL | re.VERBOSE)
begin_re = re.compile('BEGIN', re.IGNORECASE)
def parseParams(string):
"""
Parse parameters
"""
all = params_re.findall(string)
allParameters = []
for tup in all:
paramList = [tup[0]] # tup looks like (name, valuesString)
for pair in param_values_re.findall(tup[1]):
# pair looks like ('', value) or (value, '')
if pair[0] != '':
paramList.append(pair[0])
else:
paramList.append(pair[1])
allParameters.append(paramList)
return allParameters
def parseLine(line, lineNumber=None):
"""
Parse line
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: {0!s}".format(line), lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_', '-'),
parseParams(match.group('params')),
match.group('value'), match.group('group'))
# logical line regular expressions
patterns['lineend'] = r'(?:\r\n|\r|\n|$)'
patterns['wrap'] = r'{lineend!s} [\t ]'.format(**patterns)
patterns['logicallines'] = r"""
(
(?: [^\r\n] | {wrap!s} )*
{lineend!s}
)
""".format(**patterns)
patterns['wraporend'] = r'({wrap!s} | {lineend!s} )'.format(**patterns)
wrap_re = re.compile(patterns['wraporend'], re.VERBOSE)
logical_lines_re = re.compile(patterns['logicallines'], re.VERBOSE)
testLines = """
Line 0 text
, Line 0 continued.
Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2 is a new line, it does not start with whitespace.
"""
def getLogicalLines(fp, allowQP=True):
"""
Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
# We're leaving this test in for awhile, because the unittest was ugly and dumb.
>>> from six import StringIO
>>> f=StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print("Line %s: %s" % (n, l[0]))
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
val = fp.read(-1)
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable = False
newbuffer = six.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable = False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable = False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# vCard 2.1 allows parameters to be encoded without a parameter name
# False positives are unlikely, but possible.
val = logicalLine.getvalue()
if val[-1] == '=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable = True
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
def textLineToContentLine(text, n=None):
return ContentLine(*parseLine(text, n), **{'encoded': True,
'lineNumber': n})
def dquoteEscape(param):
"""
Return param, or "param" if ',' or ';' or ':' is in param.
"""
if param.find('"') >= 0:
raise VObjectError("Double quotes aren't allowed in parameter values.")
for char in ',;:':
if param.find(char) >= 0:
return '"' + param + '"'
return param
def foldOneLine(outbuf, input, lineLength=75):
"""
Folding line procedure that ensures multi-byte utf-8 sequences are not
broken across lines
TO-DO: This all seems odd. Is it still needed, especially in python3?
"""
if len(input) < lineLength:
# Optimize for unfolded line case
try:
outbuf.write(bytes(input, 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write(input)
else:
# Look for valid utf8 range and write that out
start = 0
written = 0
counter = 0 # counts line size in bytes
decoded = to_unicode(input)
length = len(to_basestring(input))
while written < length:
s = decoded[start] # take one char
size = len(to_basestring(s)) # calculate it's size in bytes
if counter + size > lineLength:
try:
outbuf.write(bytes("\r\n ", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n ")
counter = 1 # one for space
if str is unicode_type:
outbuf.write(to_unicode(s))
else:
# fall back on py2 syntax
outbuf.write(s.encode('utf-8'))
written += size
counter += size
start += 1
try:
outbuf.write(bytes("\r\n", 'UTF-8'))
except Exception:
# fall back on py2 syntax
outbuf.write("\r\n")
def defaultSerialize(obj, buf, lineLength):
"""
Encode and fold obj and its children, write to buf or return a string.
"""
outbuf = buf or six.StringIO()
if isinstance(obj, Component):
if obj.group is None:
groupString = ''
else:
groupString = obj.group + '.'
if obj.useBegin:
foldOneLine(outbuf, "{0}BEGIN:{1}".format(groupString, obj.name),
lineLength)
for child in obj.getSortedChildren():
# validate is recursive, we only need to validate once
child.serialize(outbuf, lineLength, validate=False)
if obj.useBegin:
foldOneLine(outbuf, "{0}END:{1}".format(groupString, obj.name),
lineLength)
elif isinstance(obj, ContentLine):
startedEncoded = obj.encoded
if obj.behavior and not startedEncoded:
obj.behavior.encode(obj)
s = six.StringIO()
if obj.group is not None:
s.write(obj.group + '.')
s.write(str_(obj.name.upper()))
keys = sorted(obj.params.keys())
for key in keys:
paramstr = ','.join(dquoteEscape(p) for p in obj.params[key])
try:
s.write(";{0}={1}".format(key, paramstr))
except (UnicodeDecodeError, UnicodeEncodeError):
s.write(";{0}={1}".format(key, paramstr.encode('utf-8')))
try:
s.write(":{0}".format(obj.value))
except (UnicodeDecodeError, UnicodeEncodeError):
s.write(":{0}".format(obj.value.encode('utf-8')))
if obj.behavior and not startedEncoded:
obj.behavior.decode(obj)
foldOneLine(outbuf, s.getvalue(), lineLength)
return buf or outbuf.getvalue()
class Stack:
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def top(self):
if len(self) == 0:
return None
else:
return self.stack[-1]
def topName(self):
if len(self) == 0:
return None
else:
return self.stack[-1].name
def modifyTop(self, item):
top = self.top()
if top:
top.add(item)
else:
new = Component()
self.push(new)
new.add(item) # add sets behavior for item and children
def push(self, obj):
self.stack.append(obj)
def pop(self):
return self.stack.pop()
def readComponents(streamOrString, validate=False, transform=True,
ignoreUnreadable=False, allowQP=False):
"""
Generate one Component at a time from a stream.
"""
if isinstance(streamOrString, basestring):
stream = six.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError as e:
if e.lineNumber is not None:
msg = "Skipped line {lineNumber}, message: {msg}"
else:
msg = "Skipped a line, message: {msg}"
logger.error(msg.format(**{'lineNumber': e.lineNumber, 'msg': str(e)}))
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top():
stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the {0} component but it was never opened"
raise ParseError(err.format(vline.value), n)
if vline.value.upper() == stack.topName(): # START matches END
if len(stack) == 1:
component = stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate:
component.validate(raiseException=True)
if transform:
component.transformChildrenToNative()
yield component # EXIT POINT
else:
stack.modifyTop(stack.pop())
else:
err = "{0} component wasn't closed"
raise ParseError(err.format(stack.topName()), n)
else:
stack.modifyTop(vline) # not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component {0!s} was never closed".format(
(stack.topName())), n)
yield stack.pop()
except ParseError as e:
e.input = streamOrString
raise
def readOne(stream, validate=False, transform=True, ignoreUnreadable=False,
allowQP=False):
"""
Return the first component from stream.
"""
return next(readComponents(stream, validate, transform, ignoreUnreadable,
allowQP))
# --------------------------- version registry ---------------------------------
__behaviorRegistry = {}
def registerBehavior(behavior, name=None, default=False, id=None):
"""
Register the given behavior.
If default is True (or if this is the first version registered with this
name), the version will be the default if no id is given.
"""
if not name:
name = behavior.name.upper()
if id is None:
id = behavior.versionString
if name in __behaviorRegistry:
if default:
__behaviorRegistry[name].insert(0, (id, behavior))
else:
__behaviorRegistry[name].append((id, behavior))
else:
__behaviorRegistry[name] = [(id, behavior)]
def getBehavior(name, id=None):
"""
Return a matching behavior if it exists, or None.
If id is None, return the default for name.
"""
name = name.upper()
if name in __behaviorRegistry:
if id:
for n, behavior in __behaviorRegistry[name]:
if n == id:
return behavior
return __behaviorRegistry[name][0][1]
return None
def newFromBehavior(name, id=None):
"""
Given a name, return a behaviored ContentLine or Component.
"""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named {0!s}".format(name))
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
# --------------------------- Helper function ----------------------------------
def backslashEscape(s):
s = s.replace("\\", "\\\\").replace(";", "\;").replace(",", "\,")
return s.replace("\r\n", "\\n").replace("\n", "\\n").replace("\r", "\\n")
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Sub programs for operating some Keithley instruments
author : Eoin O'Farrell
email : [email protected]
last edited : July 2013
Classes for:
Keithley 6221
InitializeInstruments
ScanInstruments
InitializeDataFile
WriteDataFile
CloseDataFile
GraphData
"""
import rpyc
import visa as visa
import VisaSubs as VisaSubs
import string as string
import re as re
from collections import namedtuple
import time
import math
import numpy as np
import threading
import Queue
######################################################
# At the moment each of the instruments we use is a
# seperate class
#####################################################
class k6221:
def __init__(self, address, compliance = 0.1, analogFilter = True, autorange = True, setupOption = "SAV0", doSetup = False, mode = "Wave", wave = "SIN", frequency = 9.2, amplitude = 10e-8):
# The setup option sets the setup that we use if doSetup is True
self.Address = address
self.Visa = VisaSubs.InitializeGPIB(address,0,term_chars = "\\n")
# Other 6430 properties
self.Compliance = compliance
self.AnalogFilter = analogFilter
self.AutoRange = autorange
self.CurrentRange = currentRange
self.Mode = mode
self.Wave = wave
self.DoSetup = doSetup
self.SetupOption = SetupOption
self.Output = False
self.Frequency = frequency
self.Amplitude = amplitude
if doSetup:
self.Visa.write("*RST")
self.Visa.write("".join(("SYST:POS ",setupOption)))
######################################
# Initialization i.e. writing a load of SCPI
#######################################
def Initialize(self):
# Assume that the source is in Sine mode and that there is no
# offset
# Determine if the output is on or off
Reply = self.Visa.ask("OUTP:STAT?")
self.Output = bool(Reply)
time.sleep(.1)
# if the output is on we now determine the parameters
# amplitude, frequency, compliance ...
if self.Output:
Reply = self.Visa.ask("SOUR:CURR:COMP?")
self.Compliance = float(Reply)
Reply = self.Visa.ask("SOUR:CURR:FILT?")
self.AnalogFilter = bool(Reply)
Reply = self.Visa.ask("SOUR:CURR:RANG:AUTO?")
self.AutoRange = bool(Reply)
if not self.AutoRange:
Reply = self.Visa.ask("SOUR:CURR:RANG?")
self.CurrentRange = float(Reply)
self.Visa.write("".join((":SOUR:FUNC:MODE ",self.Source)))
# Configure the auto zero (reference)
self.Visa.write(":SYST:AZER:STAT ON")
self.Visa.write(":SYST:AZER:CACH:STAT 1")
self.Visa.write(":SYST:AZER:CACH:RES")
# Disable concurrent mode, measure I and V (not R)
self.Visa.write(":SENS:FUNC:CONC 1")
if self.Source == "VOLT":
self.Sense = "CURR"
elif self.Source == "CURR":
self.Sense = "VOLT"
self.Visa.write("".join((":SENS:FUNC:ON ","\"%s\"," % self.Source,"\"%s\"" % self.Sense)))
self.Visa.write("".join((":FORM:ELEM ","%s," % self.Source,"%s" % self.Sense)))
self.Visa.write("".join((":SENS:",self.Sense,":RANG:AUTO 0")))
# Set the complicance
if not SkipCompliance:
self.Visa.write("".join((":SENS:",self.Sense,":RANG 105e-9")))
self.Visa.write("".join((":SENS:",self.Sense,":PROT:LEV %.3e" % self.Compliance)))
# # Set some filters
self.Visa.write("".join((":SENS:",self.Sense,":NPLC %.2f" % self.Integration)))
if not SkipMath:
self.Visa.write(":SENS:AVER:REP:COUN %d" % self.Repetition)
self.Visa.write(":SENS:MED:RANK %d" % self.Median)
self.Visa.write(":SOUR:DEL %.4f" % self.Delay)
self.Visa.write(":TRIG:DEL %.4f" % self.Trigger)
pass
###########################################
# Set the range and compliance
#######################################
def SetRangeCompliance(self, Range = 105, Compliance = 105):
self.Compliance = Compliance
self.Visa.write("".join((":SENS:",self.Sense,":PROT:LEV %.3e" % self.Compliance)))
if Range:
self.Visa.write("".join((":SENS:",self.Sense,":RANG ","%.2e" % Range)))
else:
self.Visa.write("".join((":SENS:",self.Sense,":RANG:AUTO 1")))
pass
##################################################
# Read data
################################################
def ReadWave(self):
Reply = self.Visa.ask("SOUR:WAVE:FREQ?")
self.Frequency = float(Reply)
Reply = self.Visa.ask("SOUR:WAVE:AMPL?")
self.Amplitude = float(Reply)
pass
##################################################
# Set source
##################################################
def SetWave(self,Amp,Freq):
self.Visa.write("SOUR:WAVE:CURR %.4e" % Amp)
self.Visa.write("SOUR:WAVE:FREQ %.4e" % Amp)
pass
#################################################
# Switch the output
###############################################
def SwitchOutput(self):
self.Output = not self.Output
self.Visa.write("".join((":OUTP:STAT ","%d" % self.Output)))
pass
#################################################
# Switch the wave
###############################################
def SwitchWave(self):
self.Output = not self.Output
if self.Output:
self.Visa.write("SOUR:WAVE:ARM")
self.Visa.write("SOUR:WAVE:INIT")
else:
self.Visa.write("SOUR:WAVE:ABOR")
pass
######################################################
# Manual sweep, this sweep will be run as a separate process
# so it doesn't block the program
##################################################
def RunSweep(self,Start,Stop,Step,Wait,Mode = "linear",**kwargs):
#self.Visa.write("".join((":SOUR:",self.Source,":MODE FIX")))
Targets = [Start, Stop]
for kw in kwargs.keys():
if kw == "mid":
Mid = kwargs[kw]
for i in Mid:
Targets.insert(len(Targets)-1,i)
Voltage = [Start]
for i in range(1,len(Targets)):
Points = int(1+abs(Targets[i]-Targets[i-1])/Step)
if Mode == "linear":
Voltage = np.hstack([Voltage,np.linspace(Targets[i-1],Targets[i],num = Points)[1:Points]])
if Mode == "log":
Voltage = np.hstack([Voltage,np.linspace(Targets[i-1],Targets[i],num = Points)[1:Points]])
# self.Visa.write("".join((":SOUR:",self.Source," %.4e" % Voltage[0])))
return Voltage
###################################################
# Print a description string
################################################
def Description(self):
DescriptionString = "Keithley6221"
for item in vars(self).items():
if item[0] == "Frequency" or item[0] == "Amplitude" or item[0] == "Address":
DescriptionString = ", ".join((DescriptionString,"%s = %.3f" % item))
DescriptionString = "".join((DescriptionString,"\n"))
return DescriptionString
############################################
######### Ramp the source to a final value
#########################################
def Ramp(self,Finish):
if self.Output:
self.ReadData()
VStart = self.Data[0]
N = max(100,int(abs(Finish-VStart)/0.1))
VSweep = np.linspace(VStart,Finish,num=N+1)
if not self.Output:
self.SwitchOutput()
for i in range(len(VSweep)):
self.SetSource(VSweep[i])
time.sleep(0.05)
self.ReadData()
return
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python import summary
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
_CENTERED_BIAS_WEIGHT = "centered_bias_weight"
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `_Head` instance.
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use the Adagrad
optimizer with a default learning rate of 0.05.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* num_ps_replicas: The number of parameter server replicas.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
head = params["head"]
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or "Adagrad"
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
num_ps_replicas = params.get("num_ps_replicas", 0)
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
parent_scope = "dnn"
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
input_layer_scope = parent_scope + "/input_from_feature_columns"
with variable_scope.variable_scope(
input_layer_scope,
values=list(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
parent_scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(
net,
keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
parent_scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[parent_scope],
scope=scope)
_add_hidden_layer_summary(logits, scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope, input_layer_scope)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging the training_loss.
summaries=[])
return head.head_ops(features, labels, mode, _train_op_fn, logits)
class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1. Note: Class labels are integers representing
the class index (i.e. values from 0 to n_classes-1). For arbitrary
label values (e.g. string labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._hidden_units = hidden_units
self._feature_columns = feature_columns
self._enable_centered_bias = enable_centered_bias
self._estimator = estimator.Estimator(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units":
hidden_units,
"feature_columns":
feature_columns,
"optimizer":
optimizer,
"activation_fn":
activation_fn,
"dropout":
dropout,
"gradient_clip_norm":
gradient_clip_norm,
"num_ps_replicas":
config.num_ps_replicas if config else 0,
"embedding_lr_multipliers":
embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable. Note: Labels must be integer class indices."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None):
"""See evaluable.Evaluable. Note: Labels must be integer class indices."""
return self._estimator.evaluate(
x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes (or an iterable of predicted classes if
as_iterable is True). Each predicted class is represented by its class
index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = self._estimator.predict(x=x, input_fn=input_fn,
batch_size=batch_size, outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True). Each predicted class is represented
by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = self._estimator.predict(x=x, input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def _get_predict_ops(self, features):
"""See `Estimator` class."""
# This method exists to support some models that use the legacy interface.
# pylint: disable=protected-access
return self._estimator._get_predict_ops(features)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return self._estimator.get_variable_names()
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
`Tensor` object.
"""
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(
signature_fn or export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@experimental
def export_savedmodel(self,
export_dir_base,
input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
return self._estimator.export_savedmodel(
export_dir_base,
input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep)
@property
def model_dir(self):
return self._estimator.model_dir
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
hiddenlayer_weights = [
self.get_variable_value("dnn/hiddenlayer_%d/weights" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_weights = [self.get_variable_value("dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [
self.get_variable_value("dnn/hiddenlayer_%d/biases" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_bias = [self.get_variable_value("dnn/logits/biases")]
if self._enable_centered_bias:
centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)]
else:
centered_bias = []
return hiddenlayer_bias + logits_bias + centered_bias
@property
def config(self):
return self._estimator.config
class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
label_dimension=1):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_dimension: Dimension of the label for multilabels. Defaults to 1.
Returns:
A `DNNRegressor` estimator.
"""
super(DNNRegressor, self).__init__(
model_dir=model_dir,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
config=config,
feature_engineering_fn=feature_engineering_fn,
label_dimension=label_dimension)
self.feature_columns = feature_columns
self.optimizer = optimizer
self.activation_fn = activation_fn
self.dropout = dropout
self.hidden_units = hidden_units
self._feature_columns_inferred = False
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
return self.dnn_weights_
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.dnn_bias_
|
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ConfigParser import RawConfigParser
from ovs.extensions.generic.sshclient import SSHClient
from ovs.extensions.generic.system import System
from StringIO import StringIO
import os
import tempfile
import time
class ClusterNode(object):
"""
cluster node config parameters
"""
def __init__(self, name=None, ip=None, client_port=None, messaging_port=None):
self.name = name
self.ip = ip
self.client_port = client_port
self.messaging_port = messaging_port
def __hash__(self):
"""
Defines a hashing equivalent for a given ClusterNode
"""
return hash('{0}_{1}_{2}_{3}'.format(self.name, self.ip, self.client_port, self.messaging_port))
def __eq__(self, other):
"""
Checks whether two objects are the same.
"""
if not isinstance(other, ClusterNode):
return False
return self.__hash__() == other.__hash__()
def __ne__(self, other):
"""
Checks whether to objects are not the same.
"""
if not isinstance(other, ClusterNode):
return True
return not self.__eq__(other)
class ClusterConfig():
"""
contains cluster config parameters
"""
def __init__(self, base_dir, cluster_name, log_level, plugins=None):
self.base_dir = base_dir
self.cluster_name = cluster_name
self.log_level = log_level
self.log_dir = "/var/log/arakoon/" + cluster_name
self.home_dir = "/".join([self.base_dir, 'arakoon', cluster_name])
self.tlog_dir = "/".join([self.base_dir, 'tlogs', cluster_name])
self.target_ip = '127.0.0.1'
if plugins is None:
self.plugins = ""
else:
self.plugins = plugins
self.nodes = []
self.fsync = True
def set_base_dir(self, base_dir):
self.home_dir = base_dir + '/arakoon/' + self.cluster_name
self.tlog_dir = base_dir + '/tlogs/' + self.cluster_name
self.base_dir = base_dir
def set_cluster_name(self, cluster_name):
self.log_dir = "/var/log/arakoon/" + cluster_name
self.home_dir = "/".join([self.base_dir, 'arakoon', cluster_name])
self.tlog_dir = "/".join([self.base_dir, 'tlogs', cluster_name])
self.cluster_name = cluster_name
class ArakoonInstaller():
"""
class to dynamically install/(re)configure arakoon cluster
"""
ARAKOON_BIN = '/usr/bin/arakoon'
ARAKOON_CONFIG_DIR = '/opt/OpenvStorage/config/arakoon'
ARAKOON_CONFIG_FILE = '/opt/OpenvStorage/config/arakoon/{0}/{0}.cfg'
ARAKOON_UPSTART = """
description "Arakoon upstart"
start on (local-filesystems and started networking)
stop on runlevel [016]
kill timeout 60
respawn
respawn limit 10 5
console log
setuid root
setgid root
env PYTHONPATH=/opt/OpenvStorage
{0}
chdir /opt/OpenvStorage
exec /usr/bin/python2 /opt/OpenvStorage/ovs/extensions/db/arakoon/ArakoonManagement.py --start --cluster {1}
"""
def __init__(self):
self.config = None
def get_config_file(self, suffix=None):
config_dir = '/'.join([ArakoonInstaller.ARAKOON_CONFIG_DIR, self.config.cluster_name])
filename = '/'.join([config_dir, self.config.cluster_name + suffix])
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return filename
def create_config(self, cluster_name, ip, client_port, messaging_port, plugins=None):
"""
Creates initial config object causing this host to be master
:param cluster_name: unique name for this arakoon cluster used in paths
:param ip: ip on which service should listen
:param client_port:
:param messaging_port:
:param plugins: optional arakoon plugins
:return:
"""
client = SSHClient.load(ip)
node_name = System.get_my_machine_id(client)
base_dir = System.read_remote_config(client, 'ovs.core.db.arakoon.location')
self.clear_config()
self.config = ClusterConfig(base_dir, cluster_name, 'info', plugins)
self.config.nodes.append(ClusterNode(node_name, ip, client_port, messaging_port))
self.config.target_ip = ip
@staticmethod
def get_config_from(cluster_name, master_ip, master_password=None):
"""
Gets a config object representation for the cluster on master
"""
client = SSHClient.load(master_ip, master_password)
cfg_file = client.file_read(ArakoonInstaller.ARAKOON_CONFIG_FILE.format(cluster_name))
cfg = RawConfigParser()
cfg.readfp(StringIO(cfg_file))
return cfg
def load_config_from(self, cluster_name, master_ip):
"""
Reads actual config from master node
Assumes this node is up-to-date and is considered valid
:param base_dir: base_dir should be identical across multiple nodes
"""
cfg = ArakoonInstaller.get_config_from(cluster_name, master_ip)
global_section = dict(cfg.items('global'))
nodes = cfg.sections()
nodes.remove('global')
# validate config
if not nodes:
raise ValueError('Expected at least one node in cfg file')
first = True
for node_id in nodes:
node_config = dict(cfg.items(node_id))
if first is True:
self.create_config(cluster_name, node_config['ip'],
node_config['client_port'], node_config['messaging_port'],
plugins=global_section['plugins'])
first = False
else:
self.add_node_to_config(node_id, node_config['ip'],
node_config['client_port'], node_config['messaging_port'])
def upload_config_for(self, cluster_name):
if self.config.cluster_name != cluster_name:
raise RuntimeError('Configuration is not setup for: {0} '.format(cluster_name))
cluster_ips = list()
for node in self.config.nodes:
cluster_ips.append(node.ip)
for ip in cluster_ips:
client = SSHClient.load(ip)
self.generate_config(client)
self.generate_upstart_config(client)
def clear_config(self):
self.config = None
def get_config(self):
return self.config
def add_node_to_config(self, node_id, ip, client_port, messaging_port):
node = ClusterNode(node_id, ip, client_port, messaging_port)
self.config.nodes.append(node)
def remove_node_from_config(self, node_id):
for node in self.config.nodes:
if node.name == node_id:
self.config.nodes.remove(node)
break
def generate_config(self, client=None):
(temp_handle, temp_filename) = tempfile.mkstemp()
config_file = self.get_config_file('.cfg')
contents = RawConfigParser()
contents.add_section('global')
contents.set('global', 'cluster_id', self.config.cluster_name)
contents.set('global', 'cluster', '')
contents.set('global', 'plugins', self.config.plugins)
for node in self.config.nodes:
if not contents.has_section(node.name):
contents.add_section(node.name)
contents.set(node.name, 'name', node.name)
contents.set(node.name, 'ip', node.ip)
contents.set(node.name, 'client_port', node.client_port)
contents.set(node.name, 'messaging_port', node.messaging_port)
contents.set(node.name, 'tlog_compression', 'snappy')
contents.set(node.name, 'log_level', self.config.log_level)
contents.set(node.name, 'log_dir', self.config.log_dir)
contents.set(node.name, 'home', self.config.home_dir)
contents.set(node.name, 'tlog_dir', self.config.tlog_dir)
contents.set(node.name, 'fsync', str(self.config.fsync).lower())
if contents.get('global', 'cluster'):
contents.set('global', 'cluster', ','.join([contents.get('global', 'cluster'), node.name]))
else:
contents.set('global', 'cluster', node.name)
if client is None:
with open(config_file, 'wb') as f:
contents.write(f)
else:
with open(temp_filename, 'wb') as f:
contents.write(f)
client.dir_ensure(os.path.dirname(config_file))
client.file_upload(config_file, temp_filename)
os.remove(temp_filename)
def generate_upstart_config(self, client=None):
(temp_handle, temp_filename) = tempfile.mkstemp()
config_file = '/etc/init/ovs-arakoon-{0}.conf'.format(self.config.cluster_name)
ld_config = 'env LD_LIBRARY_PATH=/usr/lib/alba'
contents = ArakoonInstaller.ARAKOON_UPSTART.format(ld_config, self.config.cluster_name)
if client is None:
with open(config_file, 'wb') as f:
f.write(contents)
else:
with open(temp_filename, 'wb') as f:
f.write(contents)
client.dir_ensure(os.path.dirname(config_file))
client.file_upload(config_file, temp_filename)
os.remove(temp_filename)
def create_dir_structure(self, client=None, cluster_name=None):
if cluster_name is None:
cluster_name = self.config.cluster_name
cmd = """
mkdir -p {0}/arakoon/{1}
mkdir -p {0}/tlogs/{1}
mkdir -p /var/log/arakoon/{1}
""".format(self.config.base_dir, cluster_name)
System.run(cmd, client)
def delete_dir_structure(self, client=None, cluster_name=None):
if cluster_name is None:
cluster_name = self.config.cluster_name
cmd = """
rm -rf {0}/arakoon/{1}
rm -rf {0}/tlogs/{1}
rm -rf /var/log/arakoon/{1}
""".format(self.config.base_dir, cluster_name)
System.run(cmd, client)
def generate_configs(self, client=None):
self.generate_config(client)
self.generate_upstart_config(client)
@staticmethod
def create_cluster(cluster_name, ip, exclude_ports, plugins=None):
ai = ArakoonInstaller()
ai.clear_config()
client = SSHClient.load(ip)
port_range = System.read_remote_config(client, 'ovs.ports.arakoon')
free_ports = System.get_free_ports(port_range, exclude_ports, 2, client)
ai.create_config(cluster_name, ip, free_ports[0], free_ports[1], plugins)
ai.generate_configs(client)
ai.create_dir_structure(client)
return {'client_port': free_ports[0],
'messaging_port': free_ports[1]}
@staticmethod
def start(cluster_name, ip):
client = SSHClient.load(ip)
cmd = """
from ovs.plugin.provider.service import Service
print Service.start_service('arakoon-{0}')
""".format(cluster_name)
System.exec_remote_python(client, cmd)
@staticmethod
def stop(cluster_name, ip):
client = SSHClient.load(ip)
cmd = """
from ovs.plugin.provider.service import Service
print Service.stop_service('arakoon-{0}')
""".format(cluster_name)
System.exec_remote_python(client, cmd)
@staticmethod
def status(cluster_name, ip):
client = SSHClient.load(ip)
cmd = """
from ovs.plugin.provider.service import Service
print Service.get_service_status('arakoon-{0}')
""".format(cluster_name)
System.exec_remote_python(client, cmd)
@staticmethod
def catchup_cluster_node(cluster_name, ip):
client = SSHClient.load(ip)
cmd = """
from ovs.extensions.db.arakoon.ArakoonManagement import ArakoonManagementEx
cluster = ArakoonManagementEx().getCluster('{0}')
cluster.catchup_node()
""".format(cluster_name)
System.exec_remote_python(client, cmd)
@staticmethod
def extend_cluster(src_ip, tgt_ip, cluster_name, exclude_ports):
ai = ArakoonInstaller()
ai.load_config_from(cluster_name, src_ip)
client = SSHClient.load(tgt_ip)
tgt_id = System.get_my_machine_id(client)
port_range = System.read_remote_config(client, 'ovs.ports.arakoon')
free_ports = System.get_free_ports(port_range, exclude_ports, 2, client)
ai.create_dir_structure(client)
ai.add_node_to_config(tgt_id, tgt_ip, free_ports[0], free_ports[1])
ai.upload_config_for(cluster_name)
return {'client_port': free_ports[0],
'messaging_port': free_ports[1]}
@staticmethod
def shrink_cluster(remaining_node_ip, deleted_node_ip, cluster_name):
ai = ArakoonInstaller()
ai.load_config_from(cluster_name, remaining_node_ip)
client = SSHClient.load(deleted_node_ip)
deleted_node_id = System.get_my_machine_id(client)
ai.delete_dir_structure(client)
ai.remove_node_from_config(deleted_node_id)
ai.upload_config_for(cluster_name)
@staticmethod
def deploy_config(from_ip, to_ip, cluster_name):
ai = ArakoonInstaller()
ai.load_config_from(cluster_name, from_ip)
client = SSHClient.load(to_ip)
ai.generate_config(client)
@staticmethod
def wait_for_cluster(cluster_name):
"""
Waits for an Arakoon cluster to be available (by sending a nop)
"""
from ovs.extensions.db.arakoon.ArakoonManagement import ArakoonManagementEx
from ovs.extensions.db.arakoon.arakoon.ArakoonExceptions import ArakoonSockReadNoBytes
last_exception = None
tries = 3
while tries > 0:
try:
cluster_object = ArakoonManagementEx().getCluster(str(cluster_name))
client = cluster_object.getClient()
client.nop()
return True
except ArakoonSockReadNoBytes as exception:
last_exception = exception
tries -= 1
time.sleep(1)
raise last_exception
@staticmethod
def restart_cluster_add(cluster_name, current_ips, new_ip):
"""
Execute a (re)start sequence after adding a new node to a cluster.
"""
# Make sure all nodes are correctly (re)started
loglevel = logging.root.manager.disable # Workaround for disabling Arakoon logging
logging.disable('WARNING')
ArakoonInstaller.catchup_cluster_node(cluster_name, new_ip)
threshold = 2 if new_ip in current_ips else 1
for ip in current_ips:
if ip == new_ip:
continue
ArakoonInstaller.stop(cluster_name, ip)
ArakoonInstaller.start(cluster_name, ip)
if len(current_ips) > threshold: # A two node cluster needs all nodes running
ArakoonInstaller.wait_for_cluster(cluster_name)
ArakoonInstaller.start(cluster_name, new_ip)
ArakoonInstaller.wait_for_cluster(cluster_name)
logging.disable(loglevel) # Restore workaround
@staticmethod
def restart_cluster_remove(cluster_name, remaining_ips):
"""
Execute a restart sequence after removing a node from a cluster
"""
loglevel = logging.root.manager.disable # Workaround for disabling Arakoon logging
logging.disable('WARNING')
for ip in remaining_ips:
ArakoonInstaller.stop(cluster_name, ip)
ArakoonInstaller.start(cluster_name, ip)
if len(remaining_ips) > 2: # A two node cluster needs all nodes running
ArakoonInstaller.wait_for_cluster(cluster_name)
logging.disable(loglevel) # Restore workaround
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras import testing_utils
from tensorflow.python.platform import test
class Convolution1DTest(test.TestCase):
def test_dilated_conv1d(self):
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv1D,
input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
kwargs={
'filters': 1,
'kernel_size': 2,
'dilation_rate': 1,
'padding': 'valid',
'kernel_initializer': 'ones',
'use_bias': False,
},
expected_output=[[[1], [3], [5]]])
def test_conv_1d(self):
batch_size = 2
steps = 8
input_dim = 2
kernel_size = 3
filters = 3
for padding in ['valid', 'same']:
for strides in [1, 2]:
if padding == 'same' and strides != 1:
continue
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs={
'filters': filters,
'kernel_size': kernel_size,
'padding': padding,
'strides': strides
},
input_shape=(batch_size, steps, input_dim))
def test_conv_1d_regularization(self):
# regularizers
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 3)
# constraints
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.constraints), 2)
class Conv2DTest(test.TestCase):
def test_convolution_2d(self):
num_samples = 2
filters = 2
stack_size = 3
kernel_size = (3, 2)
num_row = 7
num_col = 6
for padding in ['valid', 'same']:
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
with self.test_session(use_gpu=True):
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs={
'filters': filters,
'kernel_size': kernel_size,
'padding': padding,
'strides': strides,
'data_format': 'channels_first'
},
input_shape=(num_samples, stack_size, num_row, num_col))
def test_convolution_2d_regularization(self):
# regularizers
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
# constraints
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.constraints), 2)
def test_dilated_conv_2d(self):
num_samples = 2
filters = 2
stack_size = 3
kernel_size = (3, 2)
num_row = 7
num_col = 6
# Test dilation
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs={
'filters': filters,
'kernel_size': kernel_size,
'dilation_rate': (2, 2)
},
input_shape=(num_samples, num_row, num_col, stack_size))
class Conv2DTransposeTest(test.TestCase):
def test_conv2d_transpose(self):
num_samples = 2
filters = 2
stack_size = 3
num_row = 5
num_col = 6
for padding in ['valid', 'same']:
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2DTranspose,
kwargs={
'filters': filters,
'kernel_size': 3,
'padding': padding,
'strides': strides,
'data_format': 'channels_last'
},
input_shape=(num_samples, num_row, num_col, stack_size))
def test_conv2dtranspose_regularization(self):
# regularizers
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
# constraints
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.constraints), 2)
class Conv3DTransposeTest(test.TestCase):
def test_conv3d_transpose(self):
num_samples = 2
filters = 2
stack_size = 3
num_row = 5
num_col = 6
depth = 4
for padding in ['valid', 'same']:
for strides in [(1, 1, 1), (2, 2, 2)]:
if padding == 'same' and strides != (1, 1, 1):
continue
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs={
'filters': filters,
'kernel_size': 3,
'padding': padding,
'strides': strides,
'data_format': 'channels_last'
},
input_shape=(num_samples, depth, num_row, num_col, stack_size))
def test_conv3dtranspose_regularization(self):
# regularizers
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
# constraints
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.constraints), 2)
class SeparableConv2DTest(test.TestCase):
def test_separable_conv_2d(self):
num_samples = 2
filters = 6
stack_size = 3
num_row = 7
num_col = 6
for padding in ['valid', 'same']:
for strides in [(1, 1), (2, 2)]:
for multiplier in [1, 2]:
if padding == 'same' and strides != (1, 1):
continue
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv2D,
kwargs={
'filters': filters,
'kernel_size': (3, 3),
'padding': padding,
'strides': strides,
'depth_multiplier': multiplier
},
input_shape=(num_samples, num_row, num_col, stack_size))
def test_separable_conv2d_regularization(self):
# regularizers
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 4)
# constraints
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': 'unit_norm',
'depthwise_constraint': 'unit_norm',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.constraints), 2)
class Conv3DTest(test.TestCase):
def test_convolution_3d(self):
num_samples = 2
filters = 2
stack_size = 3
input_len_dim1 = 9
input_len_dim2 = 8
input_len_dim3 = 8
for padding in ['valid', 'same']:
for strides in [(1, 1, 1), (2, 2, 2)]:
if padding == 'same' and strides != (1, 1, 1):
continue
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Convolution3D,
kwargs={
'filters': filters,
'kernel_size': 3,
'padding': padding,
'strides': strides
},
input_shape=(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
def test_convolution_3d_regularization(self):
# regularizers
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
# constraints
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.constraints), 2)
class ZeroPaddingTest(test.TestCase):
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
def test_zero_padding_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 4
input_num_col = 5
for data_format in ['channels_first', 'channels_last']:
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': ((1, 2), (3, 4)),
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding2D(
padding=(2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
elif data_format == 'channels_first':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
def test_zero_padding_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 4
input_len_dim2 = 5
input_len_dim3 = 3
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={'padding': (2, 2, 2)},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding3D(padding=(2, 2, 2))
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
class UpSamplingTest(test.TestCase):
def test_upsampling_1d(self):
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
def test_upsampling_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else: # tf
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else: # tf
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
np.testing.assert_allclose(np_output, expected_out)
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling3D,
kwargs={'size': (2, 2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_dim1 in [2, 3]:
for length_dim2 in [2]:
for length_dim3 in [3]:
layer = keras.layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
np.testing.assert_allclose(np_output, expected_out)
class CroppingTest(test.TestCase):
def test_cropping_1d(self):
num_samples = 2
time_length = 4
input_len_dim1 = 2
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping1D,
kwargs={'cropping': (2, 2)},
input_shape=inputs.shape)
def test_cropping_2d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 9
input_len_dim2 = 9
cropping = ((2, 2), (3, 3))
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping2D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[
1][0]:-cropping[1][1]]
else:
expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][
0]:-cropping[1][1], :]
np.testing.assert_allclose(np_output, expected_out)
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# another correctness test (no cropping)
with self.test_session(use_gpu=True):
cropping = ((0, 0), (0, 0))
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
# compare with input
np.testing.assert_allclose(np_output, inputs)
def test_cropping_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
input_len_dim3 = 8
cropping = ((2, 2), (1, 1), (2, 3))
for data_format in ['channels_last', 'channels_first']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping3D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.Cropping3D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1]]
else:
expected_out = inputs[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1], :]
print(expected_out.shape)
np.testing.assert_allclose(np_output, expected_out)
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
import pytest
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex, MultiIndex,
to_datetime, date_range, period_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_index_equal,
assert_raises_regex)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
assert rs.s[1] == 1
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis0(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
result = df.diff(axis=0)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', '1 days']),
1: pd.TimedeltaIndex(['NaT', '1 days'])})
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis1(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
if tz is None:
result = df.diff(axis=1)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', 'NaT']),
1: pd.TimedeltaIndex(['0 days',
'0 days'])})
assert_frame_equal(result, expected)
else:
with pytest.raises(NotImplementedError):
result = df.diff(axis=1)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
assert result[0].dtype == np.float64
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs,
(filled / filled.shift(freq='5D') - 1)
.reindex_like(filled))
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0., 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
@pytest.mark.parametrize("freq, periods, fill_method, limit",
[('5B', 5, None, None),
('3B', 3, None, None),
('3B', 3, 'bfill', None),
('7B', 7, 'pad', 1),
('7B', 7, 'bfill', 3),
('14B', 14, None, None)])
def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
# GH 7292
rs_freq = self.tsframe.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = self.tsframe.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=self.tsframe.index,
columns=self.tsframe.columns)
rs_freq = empty_ts.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = empty_ts.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
assert np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert (tmp['dates'].values == ex_vals).all()
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(self.tsframe)
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].dropna().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
tm.assert_raises_regex(ValueError,
'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH 9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
assert_series_equal(nulls, Series(range(1, 6), dtype='int64'))
# check all answers are the same
assert_frame_equal(shifted[0], shifted[1])
assert_frame_equal(shifted[0], shifted[2])
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
tm.assert_raises_regex(
ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
pytest.raises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] + 1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
assert not (self.tsframe.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self):
# GH 17935
df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e']},
index=[5, 3, 2, 9, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
df.truncate(before=3, after=9)
rng = pd.date_range('2011-01-01', '2012-01-01', freq='W')
ts = pd.DataFrame({'A': np.random.randn(len(rng)),
'B': np.random.randn(len(rng))},
index=rng)
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
ts.sort_values('A', ascending=False).truncate(before='2011-11',
after='2011-12')
df = pd.DataFrame({3: np.random.randn(5),
20: np.random.randn(5),
2: np.random.randn(5),
0: np.random.randn(5)},
columns=[3, 20, 2, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
df.truncate(before=2, after=20, axis=1)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
assert isinstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize("data,idx,expected_first,expected_last", [
({'A': [1, 2, 3]}, [1, 1, 2], 1, 2),
({'A': [1, 2, 3]}, [1, 2, 2], 1, 2),
({'A': [1, 2, 3, 4]}, ['d', 'd', 'd', 'd'], 'd', 'd'),
({'A': [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({'A': [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)])
def test_first_last_valid(self, data, idx,
expected_first, expected_last):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq='D')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_frame_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_frame_equal(result, expected)
result = ts[:0].first('3M')
assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first('1D')
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq='D')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['2000-01-10':]
assert_frame_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_frame_equal(result, expected)
result = ts[:0].last('3M')
assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last('1D')
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time('00:00')
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time='00:00', end_time='12:00')
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
pd.Timestamp('2012-05-01')]})
res = df.min()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, 'nat'], dtype='datetime64[ns]')
result = pd.Series(data_ns).to_frame()
result['new'] = data_ns
expected = pd.DataFrame({0: [1, None],
'new': [1, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, 'nat'], dtype='datetime64[s]')
result['new'] = data_s
expected = pd.DataFrame({0: [1, None],
'new': [1e9, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
def test_frame_to_period(self):
K = 5
from pandas.core.indexes.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq('M'))
pytest.raises(ValueError, df.to_period, axis=2)
@pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert'])
def test_tz_convert_and_localize(self, fn):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with pytest.raises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with pytest.raises(NotImplementedError):
period_range('20140701', periods=1).tz_localize('UTC')
# l1 = period_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
assert not df3.index.levels[0].equals(l0)
assert_index_equal(df3.index.levels[0], l0_expected)
assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)('US/Pacific', level=1)
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
# Bad Inputs
# Not DatetimeIndex / PeriodIndex
with assert_raises_regex(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with assert_raises_regex(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with assert_raises_regex(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
|
|
#! /usr/bin/python
import selenium, selenium.webdriver, getpass
from sys import exit, stderr, argv, stdout, stdin
def safeFindElementByID(driver, theID, failOnError=True):
while True:
try:
return driver.find_element_by_id(theID)
except:
#errorFile.write("Could not find {0} by id\n".format(theID))
if failOnError:
return None
def safeFindElementsByTagName(driver, tagName, failOnError=True):
while True:
try:
return driver.find_elements_by_tag_name(tagName)
except:
#errorFile.write("Could not find {0} by tag name\n".format(tagName))
if failOnError:
return None
def safeFindElementsByClassName(driver, className, failOnError=True):
while True:
try:
return driver.find_elements_by_class_name(className)
except BaseException as e:
if failOnError:
return None
def safeFindElementsByXPath(driver, theXPath, failOnError=True):
while True:
try:
return driver.find_elements_by_xpath(theXPath)
except BaseException as e:
if failOnError:
return None
def safeFindElementByName(driver, theName, failOnError=True):
while True:
try:
return driver.find_element_by_name(theName)
except:
#errorFile.write("Could not find {0} by name\n".format(theName))
if failOnError:
return None
def clickCloseButton(driver):
notClicked = True
while notClicked:
closeButtons = safeFindElementsByXPath(driver, "//a[@class='enterCodeSuccessClose enterCodeToolTipClose']", False)
#outputFile.write("Found {0} button(s)\n".format(len(closeButtons)))
for closeButton in closeButtons:
try:
closeButton.click()
#outputFile.write("Clicked close button\n")
notClicked = False
break
except BaseException as e:
#print(e)
pass
def enterCokeCardCode(driver, code, outputFile, errorFile):
#Enter coke codes.
codeField = safeFindElementByName(driver, "enterCodeField", False)
submitButton = safeFindElementsByClassName(driver, "enterCodeSubmit", False)
#print(submitButton)
codeField.clear()
codeField.send_keys(code)
submitButton[0].click()
message = getCodeStatusMessage(driver)
if "try again" in message.lower():
errorFile.write(code + " received an error message: " + message + "\n")
else:
outputFile.write(code + " received a success message: " + message + "\n")
#Returns a 2-tuple. The first element is whether there
#was an error (True) or not (False). The second is the
#error or success message, whichever the case may be.
def getCodeStatusMessage(driver):
while True:
errorMessages = safeFindElementsByClassName(driver, "enterCodeErrorMessage", True)
if errorMessages is not None:
errorMessage = "\n".join([msg.text for msg in errorMessages if len(msg.text.strip()) != 0])
if errorMessage != "":
return errorMessage
successMessages = safeFindElementsByClassName(driver, "enterCodeSuccessMessage", True)
if successMessages is not None:
successMessage = "\n".join([msg.text for msg in successMessages if len(msg.text.strip()) != 0])
if successMessage != "":
return successMessage
def enterCokeCapCode(driver, code, outputFile, errorFile):
#Enter coke codes.
codeField = safeFindElementByName(driver, "enterCodeField", False)
submitButton = safeFindElementsByClassName(driver, "enterCodeSubmit", False)
#print(submitButton)
codeField.clear()
codeField.send_keys(code)
submitButton[0].click()
foundBrand = False
while not foundBrand:
brandButtons = safeFindElementsByTagName(driver, "a", False)
errorMessages = safeFindElementsByClassName(driver, "enterCodeErrorMessage", False)
if errorMessages is not None:
errorMessage = "\n".join([msg.text for msg in errorMessages if len(msg.text.strip()) != 0])
if errorMessage != "":
errorFile.write("{0} received error messages: \n\"{1}\"\n".format(code, errorMessage))
foundBrand = True
break
for button in brandButtons:
if button is not None and button.get_attribute("brand-id") is not None:
try:
button.click()
#print("Clicking button "+button.get_attribute("brand-id") + " succeeded")
outputFile.write(code + " entered successfully.\n")
clickCloseButton(driver)
foundBrand = True
break
except:
pass
#print("Clicking button " + button.get_attribute("brand-id") + " failed")
def logout(driver):
signOutButton = safeFindElementByID(driver, "h-profilePhoto-id", False)
signOutButton.click()
notClicked = True
while notClicked:
try:
realSignOutButton = safeFindElementByID(driver, "h-signOutLink", False)
if realSignOutButton is not None:
realSignOutButton.click()
notClicked = False
else:
errorFile.write("Couldn't find the sign out button!\n")
except:
pass
def main():
if len(argv) != 5:
errorFile.write("Usage: {0} <codes_caps_file> <codes_cardboard_file> <output_file> <error_file>\n".format(argv[0]))
exit(-1)
try:
outFile = open(argv[3], "a")
except:
stderr.write("Could not open {0}\n".format(argv[3]))
exit(-1)
try:
errFile = open(argv[4], "a")
except:
outFile.close()
stderr.write("Could not open {0}\n".format(argv[4]))
exit(-1)
stdout.write("Please enter your email address: ")
emailAddr = stdin.readline().strip()
passwd = getpass.getpass("Please enter your password: ")
#Go to mycokerewards.com
driver = selenium.webdriver.Firefox()
driver.get("http://www.mycokerewards.com")
#Sign in to mycokerewards.com
signInButton = safeFindElementByID(driver, "h-signInJoinLink", False)
signInButton.click()
email = safeFindElementByID(driver, "capture_signIn_traditionalSignIn_emailAddress", False)
password = safeFindElementByID(driver, "capture_signIn_traditionalSignIn_password", False)
email.send_keys(emailAddr)
password.send_keys(passwd)
signInButton = safeFindElementByID(driver, "capture_signIn_traditionalSignIn_signInButton", False)
signInButton.click()
#Get past the "Connect with Facebook and Twitter" garbage
for i in range(3):
succeeded = False
while not succeeded:
skipButtons = driver.find_elements_by_class_name("connect-with-provider-skip-for-now")
for button in skipButtons:
try:
button.click()
#print("Yay it worked!")
succeeded = True
break
except BaseException as ex:
pass
#print(type(ex))
#Enter code from bottlecaps
with open(argv[1], 'r') as codeCapsFile:
for code in codeCapsFile:
enterCokeCapCode(driver, code.strip(), outFile, errFile)
#Enter code from big cardboard boxes
with open(argv[2], 'r') as codeCardFile:
for code in codeCardFile:
enterCokeCardCode(driver, code.strip(), outFile, errFile)
outFile.close()
errFile.close()
logout(driver)
driver.close()
if __name__ == "__main__":
main()
|
|
from os.path import dirname
from shutil import rmtree
try: from scipy.misc import imread
#try: from cv2 import IMREAD_UNCHANGED, imread
except ImportError: imread = None
from pylab import imshow, show
from numpy import (array, arccos, arctan2, cos, cross, dot, hstack, load, pi,
sin, square, sqrt)
from _core import BlenderModule
from _scene import Prop
__name__ = 'fauxton'
__all__ = ['Camera', 'DepthSensor', 'SurfaceNormalSensor', 'VelocitySensor']
#===============================================================================
# Private Symbols
#===============================================================================
bl_camera = BlenderModule('''
from contextlib import contextmanager
from os.path import join
from tempfile import mkdtemp
from numpy import array, reshape, save
DEFAULT_RESOLUTION = (256, 256)
materials = {}
def create_material(source):
script_text = bpy.data.texts.new('')
script_text.write(source)
material = bpy.data.materials.new('')
material.use_nodes = True
nodes = material.node_tree.nodes
nodes.clear()
script = nodes.new('ShaderNodeScript')
emittor = nodes.new('ShaderNodeEmission')
output = nodes.new('ShaderNodeOutputMaterial')
bpy.context.scene.render.engine = 'CYCLES'
script.script = script_text
if len(script.outputs) == 0:
raise ValueError('A camera\\'s OSL shader must '
'provide at least 1 output.')
links = material.node_tree.links
links.new(script.outputs[0], emittor.inputs[0])
links.new(emittor.outputs[0], output.inputs[0])
material.use_fake_user = True
return material
def get_material_name(source):
if not source in materials:
materials[source] = create_material(source)
return materials[source].name
@contextmanager
def use_material(scene, material_name):
if material_name is not None:
old_horizon_color = scene.world.horizon_color
scene.render.engine = 'CYCLES'
scene.world.horizon_color = (0, 0, 0)
new_material = bpy.data.materials[material_name]
old_materials = {}
for obj in scene.objects:
if hasattr(obj.data, 'materials'):
old_materials[obj.name] = list(obj.data.materials)
obj.data.materials.clear()
obj.data.materials.append(new_material)
yield
if material_name is not None:
scene.world.horizon_color = old_horizon_color
for obj in scene.objects:
if hasattr(obj.data, 'materials'):
obj.data.materials.clear()
for material in old_materials[obj.name]:
obj.data.materials.append(material)
def save_links(links):
src = lambda l: (l.from_node, l.from_socket.name)
snk = lambda l: (l.to_node, l.to_socket.name)
return [src(l) + snk(l) for l in links]
def load_links(links, link_info):
for src_n, src_s, snk_n, snk_s in link_info:
src = src_n.outputs[src_s]
snk = snk_n.inputs[snk_s]
links.new(src, snk)
@contextmanager
def use_render_pass(scene, l_render_pass_name):
if type(l_render_pass_name) == list:
if len(l_render_pass_name) != 0:
scene_use_nodes = scene.use_nodes
scene.use_nodes = True
nodes = scene.node_tree.nodes
links = scene.node_tree.links
layer = scene.render.layers[0]
passes = [a for a in dir(layer) if a.startswith('use_pass_')]
scene_enabled_passes = [p for p in passes if getattr(layer, p)]
scene_node_links = save_links(links)
for p in passes: setattr(layer, p, False)
for render_pass in l_render_pass_name:
setattr(layer, 'use_pass_' + render_pass, True)
links.clear()
is_composite = lambda n: n.bl_idname == 'CompositorNodeComposite'
src_node = nodes.new('CompositorNodeRLayers')
snk_node = next(filter(is_composite, nodes), None)
snk_node = snk_node or nodes.new('CompositorNodeComposite')
src_socket = next(s for s in src_node.outputs if s.enabled)
snk_socket = snk_node.inputs['Image']
links.new(src_socket, snk_socket)
else:
if l_render_pass_name is not None:
scene_use_nodes = scene.use_nodes
scene.use_nodes = True
nodes = scene.node_tree.nodes
links = scene.node_tree.links
layer = scene.render.layers[0]
passes = [a for a in dir(layer) if a.startswith('use_pass_')]
scene_enabled_passes = [p for p in passes if getattr(layer, p)]
scene_node_links = save_links(links)
for p in passes: setattr(layer, p, False)
setattr(layer, 'use_pass_' + l_render_pass_name, True)
links.clear()
is_composite = lambda n: n.bl_idname == 'CompositorNodeComposite'
src_node = nodes.new('CompositorNodeRLayers')
snk_node = next(filter(is_composite, nodes), None)
snk_node = snk_node or nodes.new('CompositorNodeComposite')
src_socket = next(s for s in src_node.outputs if s.enabled)
snk_socket = snk_node.inputs['Image']
links.new(src_socket, snk_socket)
yield
if type(l_render_pass_name) == list:
if len(l_render_pass_name)!= 0:
nodes.remove(src_node)
for render_pass in l_render_pass_name:
setattr(layer, 'use_pass_' + render_pass, False)
for p in scene_enabled_passes: setattr(layer, p, True)
load_links(links, scene_node_links)
scene.use_nodes = scene_use_nodes
else:
if l_render_pass_name is not None:
nodes.remove(src_node)
setattr(layer, 'use_pass_' + l_render_pass_name, False)
for p in scene_enabled_passes: setattr(layer, p, True)
load_links(links, scene_node_links)
scene.use_nodes = scene_use_nodes
@contextmanager
def use_render_engine(scene, render_engine_name):
if render_engine_name is not None:
scene_render_engine = scene.render.engine
scene.render.engine = render_engine_name
yield
if render_engine_name is not None:
scene.render.engine = scene_render_engine
def create(type_):
camera = bpy.data.objects.new('', bpy.data.cameras.new(''))
camera['__type__'] = type_
return camera
def get_field_of_view(camera):
return [camera.data.angle_y, camera.data.angle_x]
def set_field_of_view(camera, field_of_view):
camera.data.angle_y, camera.data.angle_x = field_of_view
def get_resolution(camera):
return camera.get('resolution', DEFAULT_RESOLUTION)
def set_resolution(camera, resolution):
camera['resolution'] = resolution
def get_source(camera):
return camera.get('source', None)
def set_source(camera, source):
if 'source' in camera:
del camera['source']
del camera['material_name']
if source is not None:
camera['source'] = source
camera['material_name'] = get_material_name(source)
def get_render_pass(camera):
return camera.get('render_pass', None)
def set_render_pass(camera, render_pass):
camera['render_pass'] = render_pass
def get_render_engine(camera):
return camera.get('render_engine', None)
def set_render_engine(camera, render_engine):
camera['render_engine'] = render_engine
def preset_scene(scene, res, tile):
scene.render.engine = 'CYCLES'
scene.world.use_nodes = True
scene.render.resolution_x = res[0]
scene.render.resolution_y = res[1]
scene.render.resolution_percentage = 100
scene.render.tile_x = tile[0]
scene.render.tile_y = tile[1]
return scene
def render(camera, filepath, i_gpu = 0, preset = True, \
l_passes = ['combined'], fileformat = 'OPEN_EXR_MULTILAYER'):
scene = camera.users_scene[0]
scene.render.engine = 'CYCLES'
# making sure that the object pass index == 1 (for the object_index pass)
#scene.objects["object"].pass_index = 1
try:
scene.objects["object"].pass_index = 1
except:
pass
scene.camera = camera
for ppp in l_passes:
print(ppp)
if ppp == 'vector':
scene.render.layers[0].use_pass_vector = True
elif ppp == 'normal':
scene.render.layers[0].use_pass_normal = True
elif ppp == 'z':
scene.render.layers[0].use_pass_z = True
elif ppp == 'object_index':
scene.render.layers[0].use_pass_object_index = True
res = [0,0]
res[0] = get_resolution(camera)[0]
res[1] = get_resolution(camera)[1]
#if preset:
# tile = res
# preset_scene(scene, res, tile)
from bpy import context
C = bpy.context
C.user_preferences.system.compute_device = "CUDA_" + str(i_gpu)
scene.cycles.device = 'GPU'
scene.render.filepath = filepath
scene.render.image_settings.file_format = fileformat
scene.render.image_settings.color_mode = 'RGBA'
scene.render.resolution_y = res[0]
scene.render.resolution_x = res[1]
bpy.context.screen.scene = scene
with use_render_engine(scene, get_render_engine(camera)):
with use_render_pass(scene, get_render_pass(camera)):
with use_material(scene, camera.get('material_name', None)):
bpy.ops.render.render(write_still=True)
if fileformat == 'OPEN_EXR_MULTILAYER':
return filepath
if fileformat == 'PNG':
return filepath
''')
#===============================================================================
# Public Symbols
#===============================================================================
class Camera(Prop):
'''
A prop that can take snapshots of its surroundings.
:param dict \**properties: Initial values of instance variables.
:var numpy.ndarray field_of_view: *y* and *x* viewing angles, in radians.
:var numpy.ndarray resolution: *y* and *x* resolution, in pixels.
:var str source: OSL source to use as an emissive material when rendering.
:var str render_pass: Blender render pass to use (e.g. "z" or "color").
:var str render_engine: Blender render engine to use (e.g. "CYCLES").
'''
resource_type = 'CAMERA'
def __new__(cls, **properties):
result = bl_camera.create(cls.resource_type)
[setattr(result, k, v) for k, v in properties.items()]
return result
@property
def field_of_view(self):
return array(bl_camera.get_field_of_view(self))
@field_of_view.setter
def field_of_view(self, field_of_view):
bl_camera.set_field_of_view(self, list(map(float, field_of_view)))
@property
def resolution(self):
return array(bl_camera.get_resolution(self))
@resolution.setter
def resolution(self, resolution):
bl_camera.set_resolution(self, list(map(float, resolution)))
@property
def source(self):
return bl_camera.get_source(self)
@source.setter
def source(self, source):
bl_camera.set_source(self, source)
@property
def render_pass(self):
return bl_camera.get_render_pass(self)
@render_pass.setter
def render_pass(self, render_pass):
bl_camera.set_render_pass(self, render_pass)
@property
def render_engine(self):
return bl_camera.get_render_engine(self)
@render_engine.setter
def render_engine(self, render_engine):
bl_camera.set_render_engine(self, render_engine)
def render(self, filepath, i_gpu = 0, l_passes = ['combined'], fileformat = 'OPEN_EXR_MULTILAYER'):
'''
Return a snapshot of the camera's containing scene.
:rtype: numpy.ndarray
'''
preset = True
path = bl_camera.render(self, filepath, i_gpu, preset, l_passes, fileformat)
return path
def look_at(self, target, roll=0):
'''
Orient the camera towards a point in space.
:param numpy.ndarray target: 3D spatial location to look at.
:param float roll: Rotation around the gaze axis, in radians.
'''
def norm(v):
return sqrt(sum(square(v)))
def normalize(v):
return array(v, 'd') / norm(v)
def rotation(axis, angle):
w = cos(angle / 2)
xyz = axis / norm(axis) * sin(angle / 2)
return hstack([w, xyz])
def compose(rotation_0, rotation_1):
w0, x0, y0, z0 = rotation_0
w1, x1, y1, z1 = rotation_1
w2 = w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1
x2 = w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1
y2 = w0 * y1 + y0 * w1 + z0 * x1 - x0 * z1
z2 = w0 * z1 + z0 * w1 + x0 * y1 - y0 * x1
return array([w2, x2, y2, z2])
eye = normalize(target - self.position)
look_axis = cross((0, 0, -1), eye) if any(eye[:2]) else (1, 0, 0)
look = rotation(look_axis, arccos(dot((0, 0, -1), eye)))
pivot = rotation(array((0, 0, -1)), pi/2 - arctan2(*eye[1::-1]) + roll)
self.rotation = compose(look, pivot)
class DepthSensor(Camera):
'''
A camera that reports the depth at each pixel.
:param dict \**properties: Initial values of instance variables.
'''
def __new__(cls, **properties):
return Camera.__new__(cls, render_pass='z', **properties)
def render(self):
'''
Return a snapshot of the camera's containing scene.
:rtype: numpy.ndarray
'''
return Camera.render(self)[:, :, 0]
class SurfaceNormalSensor(Camera):
'''
A camera that reports the surface normal at each pixel.
:param dict \**properties: Initial values of instance variables.
'''
def __new__(cls, **properties):
return Camera.__new__(cls, render_pass='normal', **properties)
def render(self):
'''
Return a snapshot of the camera's containing scene.
:rtype: numpy.ndarray
'''
return Camera.render(self)[:, :, 0:3]
class VelocitySensor(Camera):
'''
A camera that reports the velocity at each pixel.
:param dict \**properties: Initial values of instance variables.
'''
def __new__(cls, **properties):
return Camera.__new__(cls, render_pass='vector', **properties)
def render(self):
'''
Return a snapshot of the camera's containing scene.
:rtype: numpy.ndarray
'''
return Camera.render(self)[:, :, 0:3]
|
|
'''
How well do the GBT and VLA data match in the overlap regions?
'''
from spectral_cube import SpectralCube
import matplotlib.pyplot as plt
import os
from radio_beam import Beam
import astropy.units as u
import numpy as np
from scipy import stats
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.base.model import GenericLikelihoodModel
from astropy.visualization import hist
from cube_analysis.feather_cubes import feather_compare_cube
from paths import fourteenB_HI_data_path, data_path, allfigs_path
from constants import hi_freq
from plotting_styles import onecolumn_figure, default_figure, twocolumn_figure
vla_cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
gbt_path = os.path.join(data_path, "GBT")
gbt_registered_cube = SpectralCube.read(os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088_spectralregrid_registered.fits"))
beam_fwhm = lambda diam: ((1.18 * hi_freq.to(u.cm, u.spectral())) / diam.to(u.cm)) * u.rad
# The shortest baseline in the 14B-088 data is ~44 m.
las = (hi_freq.to(u.cm, u.spectral()) / (44 * u.m)).to(u.arcsec, u.dimensionless_angles())
# Return the distributions of ratios for all channels.
radii, ratios, high_pts, low_pts = \
feather_compare_cube(vla_cube, gbt_registered_cube, las,
num_cores=6,
lowresfwhm=beam_fwhm(90 * u.m))
radii_85, ratios_85, high_pts_85, low_pts_85 = \
feather_compare_cube(vla_cube, gbt_registered_cube, las,
num_cores=6,
lowresfwhm=beam_fwhm(85 * u.m))
radii_875, ratios_875, high_pts_875, low_pts_875 = \
feather_compare_cube(vla_cube, gbt_registered_cube, las,
num_cores=6,
lowresfwhm=beam_fwhm(87.5 * u.m))
radii_95, ratios_95, high_pts_95, low_pts_95 = \
feather_compare_cube(vla_cube, gbt_registered_cube, las,
num_cores=6,
lowresfwhm=beam_fwhm(95 * u.m))
# Test #1 -- what are the slopes between k and ratio per channel?
def sentheil_perchan(xvals, yvals, alpha=0.85):
slope = np.empty((len(xvals)))
upper_uncert = np.empty((len(xvals)))
lower_uncert = np.empty((len(xvals)))
for i, (xval, yval) in enumerate(zip(xvals, yvals)):
out = stats.theilslopes(yval, x=xval, alpha=alpha)
slope[i] = out[0]
upper_uncert[i] = out[3] - out[0]
lower_uncert[i] = out[0] - out[2]
return slope, lower_uncert, upper_uncert
slope, lower_uncert, upper_uncert = \
sentheil_perchan([rad.to(u.arcmin)**2 for rad in radii], ratios)
slope_85, lower_uncert_85, upper_uncert_85 = \
sentheil_perchan([rad.to(u.arcmin)**2 for rad in radii_85], ratios_85)
slope_875, lower_uncert_875, upper_uncert_875 = \
sentheil_perchan([rad.to(u.arcmin)**2 for rad in radii_875], ratios_875)
slope_95, lower_uncert_95, upper_uncert_95 = \
sentheil_perchan([rad.to(u.arcmin)**2 for rad in radii_95], ratios_95)
chans = np.arange(len(radii))
# For visualizing, do a LOWESS smoothing
slope_lowess = lowess(slope, chans, frac=0.1, is_sorted=True,
return_sorted=False)
slope_lowess_85 = lowess(slope_85, chans, frac=0.1, is_sorted=True,
return_sorted=False)
slope_lowess_875 = lowess(slope_875, chans, frac=0.1, is_sorted=True,
return_sorted=False)
slope_lowess_95 = lowess(slope_95, chans, frac=0.1, is_sorted=True,
return_sorted=False)
twocolumn_figure()
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
ax[0, 0].errorbar(chans, slope_85,
yerr=[lower_uncert_85, upper_uncert_85], label="85 m",
alpha=0.5)
ax[0, 0].plot(chans, slope_lowess_85)
ax[0, 0].axhline(0, linestyle='--')
ax[0, 0].text(0, 0.015, "{}".format(np.round(beam_fwhm(85 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 0].set_ylabel(r"Slope (arcmin$^{-2}$)")
ax[0, 0].grid(True)
ax[0, 1].errorbar(chans, slope_875,
yerr=[lower_uncert_875, upper_uncert_875], label="87.5 m",
alpha=0.5)
ax[0, 1].plot(chans, slope_lowess_875)
ax[0, 1].text(0, 0.015, "{}".format(np.round(beam_fwhm(87.5 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 1].axhline(0, linestyle='--')
ax[0, 1].grid(True)
ax[1, 0].errorbar(chans, slope, yerr=[lower_uncert, upper_uncert],
label="90 m", alpha=0.5)
ax[1, 0].plot(chans, slope_lowess)
ax[1, 0].axhline(0, linestyle='--')
ax[1, 0].text(0, 0.015, "{}".format(np.round(beam_fwhm(90 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 0].set_ylabel(r"Slope (arcmin$^{-2}$)")
ax[1, 0].set_xlabel("Channel")
ax[1, 0].grid(True)
ax[1, 1].errorbar(chans, slope_95,
yerr=[lower_uncert_95, upper_uncert_95], label="95 m",
alpha=0.5)
ax[1, 1].plot(chans, slope_lowess_95)
ax[1, 1].axhline(0, linestyle='--')
ax[1, 1].text(0, 0.015, "{}".format(np.round(beam_fwhm(95 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 1].set_xlabel("Channel")
ax[1, 1].grid(True)
fig.tight_layout()
fig.savefig(allfigs_path("angres_vs_ratios_vla_gbt.png"))
fig.savefig(allfigs_path("angres_vs_ratios_vla_gbt.pdf"))
plt.close()
# Make a separate plots for the 87.5 m (9.8') beam
onecolumn_figure()
plt.errorbar(chans, slope_875,
yerr=[lower_uncert_875, upper_uncert_875], label="87.5 m",
alpha=0.5)
plt.plot(chans, slope_lowess_875)
plt.axhline(0, linestyle='--')
plt.grid(True)
plt.ylabel(r"Slope (arcmin$^{-2}$)")
plt.xlabel("Channel")
plt.tight_layout()
plt.savefig(allfigs_path("angres_vs_ratios_vla_gbt_9.8arcmin.png"))
plt.savefig(allfigs_path("angres_vs_ratios_vla_gbt_9.8arcmin.pdf"))
plt.close()
# Test #2 -- Sen-Theil fit to the low and high res points to get scale factor
# Do per channel
sf_slope, sf_slope_llim, sf_slope_hlim = sentheil_perchan(low_pts, high_pts)
sf_slope_85, sf_slope_llim_85, sf_slope_hlim_85 = \
sentheil_perchan(low_pts_85, high_pts_85)
sf_slope_875, sf_slope_llim_875, sf_slope_hlim_875 = \
sentheil_perchan(low_pts_875, high_pts_875)
sf_slope_95, sf_slope_llim_95, sf_slope_hlim_95 = \
sentheil_perchan(low_pts_95, high_pts_95)
twocolumn_figure()
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
ax[0, 0].errorbar(chans, sf_slope_85,
yerr=[sf_slope_llim_85, sf_slope_hlim_85], label="85 m",
alpha=0.5)
# ax[0, 0].plot(chans, slope_lowess_85)
ax[0, 0].axhline(1, linestyle='--')
ax[0, 0].text(0, 1.2, "{}".format(np.round(beam_fwhm(85 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 0].set_ylabel(r"Scale Factor")
ax[0, 0].grid(True)
ax[0, 1].errorbar(chans, sf_slope_875,
yerr=[sf_slope_llim_875, sf_slope_hlim_875], label="87.5 m",
alpha=0.5)
# ax[0, 1].plot(chans, slope_lowess_875)
ax[0, 1].text(0, 1.2, "{}".format(np.round(beam_fwhm(87.5 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 1].axhline(1, linestyle='--')
ax[0, 1].grid(True)
ax[1, 0].errorbar(chans, sf_slope, yerr=[sf_slope_llim, sf_slope_hlim],
label="90 m", alpha=0.5)
# ax[1, 0].plot(chans, slope_lowess)
ax[1, 0].axhline(1, linestyle='--')
ax[1, 0].text(0, 1.2, "{}".format(np.round(beam_fwhm(90 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 0].set_ylabel(r"Scale Factor")
ax[1, 0].set_xlabel("Channel")
ax[1, 0].grid(True)
ax[1, 1].errorbar(chans, sf_slope_95,
yerr=[sf_slope_llim_95, sf_slope_hlim_95], label="95 m",
alpha=0.5)
# ax[1, 1].plot(chans, slope_lowess_95)
ax[1, 1].axhline(1, linestyle='--')
ax[1, 1].text(0, 1.2, "{}".format(np.round(beam_fwhm(95 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 1].set_xlabel("Channel")
ax[1, 1].grid(True)
fig.tight_layout()
fig.savefig(allfigs_path("scalefactors_fitted_vla_gbt.png"))
fig.savefig(allfigs_path("scalefactors_fitted_vla_gbt.pdf"))
plt.close()
# Test #3 -- Cauchy fit to the ratios to get the scale factor
class Likelihood(GenericLikelihoodModel):
# Get the number of parameters from shapes.
# Add one for scales, since we're assuming loc is frozen.
# Keeping loc=0 is appropriate for log-normal models.
nparams = 1 if stats.cauchy.shapes is None else \
len(stats.cauchy.shapes.split(",")) + 1
def loglike(self, params):
if np.isnan(params).any():
return - np.inf
loglikes = \
stats.cauchy.logpdf(self.endog, *params[:-2],
scale=params[-1],
loc=params[-2])
if not np.isfinite(loglikes).all():
return - np.inf
else:
return loglikes.sum()
def cauchy_fitter(ratios, verbose=False):
init_fit = stats.cauchy.fit(np.log(ratios))
mle_model = Likelihood(np.log(ratios))
fitted_model = mle_model.fit(init_fit, method='nm', disp=0)
fitted_model.df_model = len(ratios)
fitted_model.df_resid = len(ratios) - 2
if verbose:
_ = hist(np.log(ratios), bins='scott', normed=True, alpha=0.5)
xvals = np.arange(-3, 3, 0.01)
plt.plot(xvals, stats.cauchy.pdf(xvals, *fitted_model.params))
return fitted_model.params[0], fitted_model.bse[0], fitted_model
# Fit a Cauchy distribution to the ratios
cauchy_fit = cauchy_fitter(np.hstack(ratios))
cauchy_fit_85 = cauchy_fitter(np.hstack(ratios_85))
cauchy_fit_875 = cauchy_fitter(np.hstack(ratios_875))
cauchy_fit_95 = cauchy_fitter(np.hstack(ratios_95))
print(cauchy_fit_85, cauchy_fit_875, cauchy_fit, cauchy_fit_95)
# Do per channel
def cauchy_channel_fits(ratios, chunk=1):
num_chans = vla_cube.shape[0]
channels = np.arange(num_chans)
chunked_channels = \
np.array_split(channels,
[chunk * i for i in xrange(num_chans / chunk)])
if chunked_channels[-1].size == 0:
chunked_channels = chunked_channels[:-1]
if chunked_channels[0].size == 0:
chunked_channels = chunked_channels[1:]
sf = np.empty(len(chunked_channels))
sf_uncert = np.empty(len(chunked_channels))
for i, chunk in enumerate(chunked_channels):
slicer = slice(chunk[0], chunk[-1])
vals = np.hstack(ratios[slicer])
out = cauchy_fitter(vals, verbose=False)[:-1]
sf[i] = out[0]
sf_uncert[i] = out[1]
return sf, sf_uncert
chunk = 10
sf_cauchy, sf_cauchy_uncert = cauchy_channel_fits(ratios, chunk=chunk)
sf_cauchy_85, sf_cauchy_uncert_85 = cauchy_channel_fits(ratios_85, chunk=chunk)
sf_cauchy_875, sf_cauchy_uncert_875 = \
cauchy_channel_fits(ratios_875, chunk=chunk)
sf_cauchy_95, sf_cauchy_uncert_95 = cauchy_channel_fits(ratios_95, chunk=chunk)
half_chunk = chunk / 2
chunk_chans = np.arange(1, len(sf_cauchy) + 1) * chunk - half_chunk
twocolumn_figure()
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
ax[0, 0].errorbar(chunk_chans, sf_cauchy_85,
yerr=sf_cauchy_uncert_85, label="85 m",
alpha=0.5)
ax[0, 0].axhline(0, linestyle='--')
ax[0, 0].text(0, 0.4, "{}".format(np.round(beam_fwhm(85 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 0].set_ylabel(r"ln Scale Factor")
ax[0, 0].grid(True)
ax[0, 0].set_ylim([-0.3, 0.5])
ax[0, 1].errorbar(chunk_chans, sf_cauchy_875,
yerr=sf_cauchy_uncert_875, label="87.5 m",
alpha=0.5)
ax[0, 1].text(0, 0.4, "{}".format(np.round(beam_fwhm(87.5 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 1].axhline(0, linestyle='--')
ax[0, 1].grid(True)
ax[1, 0].errorbar(chunk_chans, sf_cauchy, yerr=sf_cauchy_uncert,
label="90 m", alpha=0.5)
ax[1, 0].axhline(0, linestyle='--')
ax[1, 0].text(0, 0.4, "{}".format(np.round(beam_fwhm(90 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 0].set_ylabel(r"ln Scale Factor")
ax[1, 0].set_xlabel("Channel")
ax[1, 0].grid(True)
ax[1, 1].errorbar(chunk_chans, sf_cauchy_95,
yerr=sf_cauchy_uncert_95, label="95 m",
alpha=0.5)
ax[1, 1].axhline(0, linestyle='--')
ax[1, 1].text(0, 0.4, "{}".format(np.round(beam_fwhm(95 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 1].set_xlabel("Channel")
ax[1, 1].grid(True)
fig.tight_layout()
fig.savefig(allfigs_path("scalefactors_cauchy_vla_gbt.png"))
fig.savefig(allfigs_path("scalefactors_cauchy_vla_gbt.pdf"))
plt.close()
# Plot comparison of fcal from methods per channel.
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
ax[0, 0].errorbar(chunk_chans, sf_cauchy_85, label="Cauchy",
alpha=0.5)
ax[0, 0].errorbar(chans, np.log(sf_slope_85), label="Theil-Sen Fit",
alpha=0.5)
ax[0, 0].axhline(0, linestyle='--')
ax[0, 0].text(0, 0.3, "{}".format(np.round(beam_fwhm(85 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 0].set_ylabel(r"ln Scale Factor")
ax[0, 0].grid(True)
ax[0, 0].set_ylim([-1.0, 0.5])
ax[0, 0].legend(frameon=True, loc='upper right')
ax[0, 1].errorbar(chunk_chans, sf_cauchy_875, label="87.5 m",
alpha=0.5)
ax[0, 1].errorbar(chans, np.log(sf_slope_875), label="Sen-Theil Fit",
alpha=0.5)
ax[0, 1].text(0, 0.3, "{}".format(np.round(beam_fwhm(87.5 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[0, 1].axhline(0, linestyle='--')
ax[0, 1].grid(True)
ax[1, 0].errorbar(chunk_chans, sf_cauchy,
label="90 m", alpha=0.5)
ax[1, 0].errorbar(chans, np.log(sf_slope), label="Sen-Theil Fit",
alpha=0.5)
ax[1, 0].axhline(0, linestyle='--')
ax[1, 0].text(0, 0.3, "{}".format(np.round(beam_fwhm(90 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 0].set_ylabel(r"ln Scale Factor")
ax[1, 0].set_xlabel("Channel")
ax[1, 0].grid(True)
ax[1, 1].errorbar(chunk_chans, sf_cauchy_95, label="95 m",
alpha=0.5)
ax[1, 1].errorbar(chans, np.log(sf_slope_95), label="Sen-Theil Fit",
alpha=0.5)
ax[1, 1].axhline(0, linestyle='--')
ax[1, 1].text(0, 0.3, "{}".format(np.round(beam_fwhm(95 * u.m).to(u.arcmin), 1)),
bbox={"boxstyle": "square", "facecolor": "w"})
ax[1, 1].set_xlabel("Channel")
ax[1, 1].grid(True)
fig.tight_layout()
fig.savefig(allfigs_path("scalefactors_comparison_vla_gbt.png"))
fig.savefig(allfigs_path("scalefactors_comparison_vla_gbt.pdf"))
plt.close()
onecolumn_figure()
plt.errorbar(chunk_chans, sf_cauchy_875, label="Cauchy",
alpha=0.5)
plt.errorbar(chans, np.log(sf_slope_875), label="Sen-Theil Fit",
alpha=0.5)
plt.axhline(0, linestyle='--')
plt.grid(True)
plt.legend(frameon=True, loc='upper left')
plt.xlabel("Channel")
plt.ylabel(r"ln Scale Factor")
plt.ylim([-1.0, 0.5])
plt.tight_layout()
plt.savefig(allfigs_path("scalefactors_comparison_vla_gbt_9.8arcmin.png"))
plt.savefig(allfigs_path("scalefactors_comparison_vla_gbt_9.8arcmin.pdf"))
plt.close()
# Finally, show the Cauchy fit for the 87.5 m beam, since this is where the
# f_cal=1 will come from.
cauchy_fit_875_limrange = cauchy_fitter(np.hstack(ratios_875[200:600]))
# Using a limited range of channels gives 0.99+/-0.24(since we are near 0)
# This is within range of just using the whole range.
out = hist(np.log(np.hstack(ratios_875)), bins='scott', normed=True)
plt.plot(np.arange(-3, 3, 0.01),
stats.cauchy.pdf(np.arange(-3, 3, 0.01), *cauchy_fit_875[-1].params))
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path("ratio_hist_vla_gbt_9.8arcmin.png"))
plt.savefig(allfigs_path("ratio_hist_vla_gbt_9.8arcmin.pdf"))
plt.close()
# The scale factor we adopt is...
print("Scale factor: {0}+/-{1}".format(np.exp(cauchy_fit_875[0]),
np.exp(cauchy_fit_875[0]) * cauchy_fit_875[1]))
# Open up the GBT cube and update the beam parameters
import astropy.io.fits as fits
gbt_hdu = fits.open(os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088_spectralregrid_registered.fits"),
mode='update')
gbt_hdu[0].header.update(Beam(beam_fwhm(87.5 * u.m).to(u.deg)).to_header_keywords())
gbt_hdu.flush()
gbt_hdu.close()
# And the low-res version too
gbt_hdu = fits.open(os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_Tmb_14B088_spectralregrid_registered.fits"),
mode='update')
gbt_hdu[0].header.update(Beam(beam_fwhm(87.5 * u.m).to(u.deg)).to_header_keywords())
gbt_hdu.flush()
gbt_hdu.close()
default_figure()
|
|
# App.py
# Application stuff.
# The application is responsible for managing the main frame window.
#
# We also grab the FileOpen command, to invoke our Python editor
" The PythonWin application code. Manages most aspects of MDI, etc "
import win32con
import win32api
import win32ui
import sys
import string
import os
from pywin.mfc import window, dialog, afxres
from pywin.mfc.thread import WinApp
import traceback
import regutil
from . import scriptutils
## NOTE: App and AppBuild should NOT be used - instead, you should contruct your
## APP class manually whenever you like (just ensure you leave these 2 params None!)
## Whoever wants the generic "Application" should get it via win32iu.GetApp()
# These are "legacy"
AppBuilder = None
App = None # default - if used, must end up a CApp derived class.
# Helpers that should one day be removed!
def AddIdleHandler(handler):
print("app.AddIdleHandler is deprecated - please use win32ui.GetApp().AddIdleHandler() instead.")
return win32ui.GetApp().AddIdleHandler(handler)
def DeleteIdleHandler(handler):
print("app.DeleteIdleHandler is deprecated - please use win32ui.GetApp().DeleteIdleHandler() instead.")
return win32ui.GetApp().DeleteIdleHandler(handler)
# Helper for writing a Window position by name, and later loading it.
def SaveWindowSize(section,rect,state=""):
""" Writes a rectangle to an INI file
Args: section = section name in the applications INI file
rect = a rectangle in a (cy, cx, y, x) tuple
(same format as CREATESTRUCT position tuples)."""
left, top, right, bottom = rect
if state: state = state + " "
win32ui.WriteProfileVal(section,state+"left",left)
win32ui.WriteProfileVal(section,state+"top",top)
win32ui.WriteProfileVal(section,state+"right",right)
win32ui.WriteProfileVal(section,state+"bottom",bottom)
def LoadWindowSize(section, state=""):
""" Loads a section from an INI file, and returns a rect in a tuple (see SaveWindowSize)"""
if state: state = state + " "
left = win32ui.GetProfileVal(section,state+"left",0)
top = win32ui.GetProfileVal(section,state+"top",0)
right = win32ui.GetProfileVal(section,state+"right",0)
bottom = win32ui.GetProfileVal(section,state+"bottom",0)
return (left, top, right, bottom)
def RectToCreateStructRect(rect):
return (rect[3]-rect[1], rect[2]-rect[0], rect[1], rect[0] )
# Define FrameWindow and Application objects
#
# The Main Frame of the application.
class MainFrame(window.MDIFrameWnd):
sectionPos = "Main Window"
statusBarIndicators = ( afxres.ID_SEPARATOR, #// status line indicator
afxres.ID_INDICATOR_CAPS,
afxres.ID_INDICATOR_NUM,
afxres.ID_INDICATOR_SCRL,
win32ui.ID_INDICATOR_LINENUM,
win32ui.ID_INDICATOR_COLNUM )
def OnCreate(self, cs):
self._CreateStatusBar()
return 0
def _CreateStatusBar(self):
self.statusBar = win32ui.CreateStatusBar(self)
self.statusBar.SetIndicators(self.statusBarIndicators)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_LINENUM)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_COLNUM)
def OnUpdatePosIndicator(self, cmdui):
editControl = scriptutils.GetActiveEditControl()
value = " " * 5
if editControl is not None:
try:
startChar, endChar = editControl.GetSel()
lineNo = editControl.LineFromChar(startChar)
colNo = endChar - editControl.LineIndex(lineNo)
if cmdui.m_nID==win32ui.ID_INDICATOR_LINENUM:
value = "%0*d" % (5, lineNo + 1)
else:
value = "%0*d" % (3, colNo + 1)
except win32ui.error:
pass
cmdui.SetText(value)
cmdui.Enable()
def PreCreateWindow(self, cc):
cc = self._obj_.PreCreateWindow(cc)
pos = LoadWindowSize(self.sectionPos)
self.startRect = pos
if pos[2] - pos[0]:
rect = RectToCreateStructRect(pos)
cc = cc[0], cc[1], cc[2], cc[3], rect, cc[5], cc[6], cc[7], cc[8]
return cc
def OnDestroy(self, msg):
# use GetWindowPlacement(), as it works even when min'd or max'd
rectNow = self.GetWindowPlacement()[4]
if rectNow != self.startRect:
SaveWindowSize(self.sectionPos, rectNow)
return 0
class CApp(WinApp):
" A class for the application "
def __init__(self):
self.oldCallbackCaller = None
WinApp.__init__(self, win32ui.GetApp() )
self.idleHandlers = []
def InitInstance(self):
" Called to crank up the app "
HookInput()
numMRU = win32ui.GetProfileVal("Settings","Recent File List Size", 10)
win32ui.LoadStdProfileSettings(numMRU)
# self._obj_.InitMDIInstance()
if win32api.GetVersionEx()[0]<4:
win32ui.SetDialogBkColor()
win32ui.Enable3dControls()
# install a "callback caller" - a manager for the callbacks
# self.oldCallbackCaller = win32ui.InstallCallbackCaller(self.CallbackManager)
self.LoadMainFrame()
self.SetApplicationPaths()
def ExitInstance(self):
" Called as the app dies - too late to prevent it here! "
win32ui.OutputDebug("Application shutdown\n")
# Restore the callback manager, if any.
try:
win32ui.InstallCallbackCaller(self.oldCallbackCaller)
except AttributeError:
pass
if self.oldCallbackCaller:
del self.oldCallbackCaller
self.frame=None # clean Python references to the now destroyed window object.
self.idleHandlers = []
# Attempt cleanup if not already done!
if self._obj_: self._obj_.AttachObject(None)
self._obj_ = None
global App
global AppBuilder
App = None
AppBuilder = None
return 0
def HaveIdleHandler(self, handler):
return handler in self.idleHandlers
def AddIdleHandler(self, handler):
self.idleHandlers.append(handler)
def DeleteIdleHandler(self, handler):
self.idleHandlers.remove(handler)
def OnIdle(self, count):
try:
ret = 0
handlers = self.idleHandlers[:] # copy list, as may be modified during loop
for handler in handlers:
try:
thisRet = handler(handler, count)
except:
print("Idle handler %s failed" % (repr(handler)))
traceback.print_exc()
print("Idle handler removed from list")
try:
self.DeleteIdleHandler(handler)
except ValueError: # Item not in list.
pass
thisRet = 0
ret = ret or thisRet
return ret
except KeyboardInterrupt:
pass
def CreateMainFrame(self):
return MainFrame()
def LoadMainFrame(self):
" Create the main applications frame "
self.frame = self.CreateMainFrame()
self.SetMainFrame(self.frame)
self.frame.LoadFrame(win32ui.IDR_MAINFRAME, win32con.WS_OVERLAPPEDWINDOW)
self.frame.DragAcceptFiles() # we can accept these.
self.frame.ShowWindow(win32ui.GetInitialStateRequest())
self.frame.UpdateWindow()
self.HookCommands()
def OnHelp(self,id, code):
try:
if id==win32ui.ID_HELP_GUI_REF:
helpFile = regutil.GetRegisteredHelpFile("Pythonwin Reference")
helpCmd = win32con.HELP_CONTENTS
else:
helpFile = regutil.GetRegisteredHelpFile("Main Python Documentation")
helpCmd = win32con.HELP_FINDER
if helpFile is None:
win32ui.MessageBox("The help file is not registered!")
else:
from . import help
help.OpenHelpFile(helpFile, helpCmd)
except:
t, v, tb = sys.exc_info()
win32ui.MessageBox("Internal error in help file processing\r\n%s: %s" % (t,v))
tb = None # Prevent a cycle
def DoLoadModules(self, modules):
# XXX - this should go, but the debugger uses it :-(
# dont do much checking!
for module in modules:
__import__(module)
def HookCommands(self):
self.frame.HookMessage(self.OnDropFiles,win32con.WM_DROPFILES)
self.HookCommand(self.HandleOnFileOpen,win32ui.ID_FILE_OPEN)
self.HookCommand(self.HandleOnFileNew,win32ui.ID_FILE_NEW)
self.HookCommand(self.OnFileMRU,win32ui.ID_FILE_MRU_FILE1)
self.HookCommand(self.OnHelpAbout,win32ui.ID_APP_ABOUT)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_PYTHON)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_GUI_REF)
# Hook for the right-click menu.
self.frame.GetWindow(win32con.GW_CHILD).HookMessage(self.OnRClick,win32con.WM_RBUTTONDOWN)
def SetApplicationPaths(self):
# Load the users/application paths
new_path = []
apppath=win32ui.GetProfileVal('Python','Application Path','').split(';')
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
for extra_num in range(1,11):
apppath=win32ui.GetProfileVal('Python','Application Path %d'%extra_num,'').split(';')
if len(apppath) == 0:
break
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
sys.path = new_path + sys.path
def OnRClick(self,params):
" Handle right click message "
# put up the entire FILE menu!
menu = win32ui.LoadMenu(win32ui.IDR_TEXTTYPE).GetSubMenu(0)
menu.TrackPopupMenu(params[5]) # track at mouse position.
return 0
def OnDropFiles(self,msg):
" Handle a file being dropped from file manager "
hDropInfo = msg[2]
self.frame.SetActiveWindow() # active us
nFiles = win32api.DragQueryFile(hDropInfo)
try:
for iFile in range(0,nFiles):
fileName = win32api.DragQueryFile(hDropInfo, iFile)
win32ui.GetApp().OpenDocumentFile( fileName )
finally:
win32api.DragFinish(hDropInfo);
return 0
# No longer used by Pythonwin, as the C++ code has this same basic functionality
# but handles errors slightly better.
# It all still works, tho, so if you need similar functionality, you can use it.
# Therefore I havent deleted this code completely!
# def CallbackManager( self, ob, args = () ):
# """Manage win32 callbacks. Trap exceptions, report on them, then return 'All OK'
# to the frame-work. """
# import traceback
# try:
# ret = apply(ob, args)
# return ret
# except:
# # take copies of the exception values, else other (handled) exceptions may get
# # copied over by the other fns called.
# win32ui.SetStatusText('An exception occured in a windows command handler.')
# t, v, tb = sys.exc_info()
# traceback.print_exception(t, v, tb.tb_next)
# try:
# sys.stdout.flush()
# except (NameError, AttributeError):
# pass
# Command handlers.
def OnFileMRU( self, id, code ):
" Called when a File 1-n message is recieved "
fileName = win32ui.GetRecentFileList()[id - win32ui.ID_FILE_MRU_FILE1]
win32ui.GetApp().OpenDocumentFile(fileName)
def HandleOnFileOpen( self, id, code ):
" Called when FileOpen message is received "
win32ui.GetApp().OnFileOpen()
def HandleOnFileNew( self, id, code ):
" Called when FileNew message is received "
win32ui.GetApp().OnFileNew()
def OnHelpAbout( self, id, code ):
" Called when HelpAbout message is received. Displays the About dialog. "
win32ui.InitRichEdit()
dlg=AboutBox()
dlg.DoModal()
def _GetRegistryValue(key, val, default = None):
# val is registry value - None for default val.
try:
hkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
try:
hkey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
return default
scintilla = "Scintilla is Copyright 1998-2008 Neil Hodgson (http://www.scintilla.org)"
idle = "This program uses IDLE extensions by Guido van Rossum, Tim Peters and others."
contributors = "Thanks to the following people for making significant contributions: Roger Upole, Sidnei da Silva, Sam Rushing, Curt Hagenlocher, Dave Brennan, Roger Burnham, Gordon McMillan, Neil Hodgson, Laramie Leavitt. (let me know if I have forgotten you!)"
# The About Box
class AboutBox(dialog.Dialog):
def __init__(self, idd=win32ui.IDD_ABOUTBOX):
dialog.Dialog.__init__(self, idd)
def OnInitDialog(self):
text = "Pythonwin - Python IDE and GUI Framework for Windows.\n\n%s\n\nPython is %s\n\n%s\n\n%s\n\n%s" % (win32ui.copyright, sys.copyright, scintilla, idle, contributors)
self.SetDlgItemText(win32ui.IDC_EDIT1, text)
# Get the build number - written by installers.
# For distutils build, read pywin32.version.txt
import distutils.sysconfig
site_packages = distutils.sysconfig.get_python_lib(plat_specific=1)
try:
build_no = open(os.path.join(site_packages, "pywin32.version.txt")).read().strip()
ver = "pywin32 build %s" % build_no
except EnvironmentError:
ver = None
if ver is None:
# See if we are Part of Active Python
ver = _GetRegistryValue("SOFTWARE\\ActiveState\\ActivePython", "CurrentVersion")
if ver is not None:
ver = "ActivePython build %s" % (ver,)
if ver is None:
ver = ""
self.SetDlgItemText(win32ui.IDC_ABOUT_VERSION, ver)
self.HookCommand(self.OnButHomePage, win32ui.IDC_BUTTON1)
def OnButHomePage(self, id, code):
if code == win32con.BN_CLICKED:
win32api.ShellExecute(0, "open", "http://starship.python.net/crew/mhammond/win32", None, "", 1)
def Win32RawInput(prompt=None):
"Provide raw_input() for gui apps"
# flush stderr/out first.
try:
sys.stdout.flush()
sys.stderr.flush()
except:
pass
if prompt is None: prompt = ""
ret=dialog.GetSimpleInput(prompt)
if ret==None:
raise KeyboardInterrupt("operation cancelled")
return ret
def Win32Input(prompt=None):
"Provide input() for gui apps"
return eval(input(prompt))
def HookInput():
try:
raw_input
# must be py2x...
sys.modules['__builtin__'].raw_input=Win32RawInput
sys.modules['__builtin__'].input=Win32Input
except NameError:
# must be py3k
import code
sys.modules['builtins'].input=Win32RawInput
def HaveGoodGUI():
"""Returns true if we currently have a good gui available.
"""
return "pywin.framework.startup" in sys.modules
def CreateDefaultGUI( appClass = None):
"""Creates a default GUI environment
"""
if appClass is None:
from . import intpyapp # Bring in the default app - could be param'd later.
appClass = intpyapp.InteractivePythonApp
# Create and init the app.
appClass().InitInstance()
def CheckCreateDefaultGUI():
"""Checks and creates if necessary a default GUI environment.
"""
rc = HaveGoodGUI()
if not rc:
CreateDefaultGUI()
return rc
|
|
import codecs
import requests
from six import iteritems
from six import BytesIO
from six.moves.urllib_parse import urljoin
from lxml.html import _nons, HTMLParser
from .py2compat import parse_headers
from .iterable import one
from .http import DEFAULT_TIMEOUT, readable_from_response, merge_setting
from .etree import get_base_url
from .response import Response
def create_html_parser(headers):
charset = headers.get_content_charset()
try:
if charset and codecs.lookup(charset).name == 'iso8859-1':
charset = 'windows-1252'
except LookupError:
pass
# if charset is not specified in the Content-Type, this will be
# None ; encoding=None produces default (ISO 8859-1) behavior.
return HTMLParser(encoding=charset)
class ParserReadable(object):
""" Readable that feeds a parser as it is reads. """
def __init__(self, readable):
self.readable = readable
self.lines = []
self.code = None
self.headers = None
self.parser = None
self.root = None
@classmethod
def from_response(cls, response, url, decode_content, context):
return cls(readable_from_response(response, url,
decode_content=decode_content,
context=context))
@property
def name(self):
return getattr(self.readable, 'name')
def read(self, size):
buf = self.readable.read(size)
if self.parser:
self.parser.feed(buf)
if len(buf) < size:
if self.root is None:
self.root = self.parser.close()
url = self.headers.get('X-wex-request-url')
# this sets the .base_url
self.root.getroottree().docinfo.URL = url
return buf
def readline(self, *args):
line = self.readable.readline(*args)
if not self.lines:
_, _, self.code, _ = Response.parse_status_line(self.readable,
line)
self.lines.append(line)
if not line.strip():
self.headers = parse_headers(BytesIO(b''.join(self.lines[1:])))
if 200 <= self.code < 300:
self.parser = create_html_parser(self.headers)
return line
def close(self):
self.readable.close()
# just like:
# https://github.com/lxml/lxml/blob/master/src/lxml/html/__init__.py#L1004
# but doesn't ignore <input type="submit" ...> elements
def form_values(self):
"""
Return a list of tuples of the field values for the form.
This is suitable to be passed to ``urllib.urlencode()``.
"""
results = []
for el in self.inputs:
name = el.name
if not name:
continue
tag = _nons(el.tag)
if tag == 'textarea':
results.append((name, el.value))
elif tag == 'select':
value = el.value
if el.multiple:
for v in value:
results.append((name, v))
elif value is not None:
results.append((name, el.value))
else:
assert tag == 'input', (
"Unexpected tag: %r" % el)
if el.checkable and not el.checked:
continue
if el.type in ('image', 'reset'):
continue
value = el.value
if value is not None:
results.append((name, el.value))
return results
def submit_form(url, method, session=None, **kw):
if session is None:
session = requests.Session()
session.stream = True
decode_content = kw.get('decode_content', True)
proxies = kw.get('proxies', None)
headers = merge_setting(method.args.get('headers'), kw.get('headers'))
context = kw.get('context', {})
auth = merge_setting(method.args.get('auth'), kw.get('auth'))
response = session.request(
'get',
url,
allow_redirects=False,
cookies=method.args.get('cookies', None),
data=None,
headers=headers,
params=method.args.get('params', None),
proxies=proxies,
timeout=DEFAULT_TIMEOUT,
auth=auth,
)
readable = ParserReadable.from_response(response, url,
decode_content=decode_content,
context=context)
yield readable
redirects = session.resolve_redirects(response,
response.request,
proxies=proxies,
stream=True,
timeout=DEFAULT_TIMEOUT)
for response in redirects:
readable = ParserReadable.from_response(response, url,
decode_content=decode_content,
context=context)
yield readable
if readable.root is None:
return
form_css_selector, values = one(iteritems(method.args))
form = one(readable.root.cssselect(form_css_selector))
if isinstance(values, dict):
values = values.items()
for name, value in values:
if name in form.inputs:
input = form.inputs[name]
else:
input = one(form.cssselect(name))
if hasattr(input, 'add'):
input.add(value)
else:
input.value = value
base_url = get_base_url(form)
form_action_url = urljoin(base_url, form.get('action', ''))
form_method = form.method.upper()
if form_method in ('POST', 'PUT'):
# this implies 'application/x-www-form-urlencoded'
data = form_values(form)
params = None
else:
data = None
params = form_values(form)
response = session.request(
form_method,
form_action_url,
params=params,
allow_redirects=False,
cookies=method.args.get('cookies', None),
data=data,
headers=headers,
proxies=proxies,
timeout=DEFAULT_TIMEOUT,
)
yield readable_from_response(response, url,
decode_content=decode_content,
context=context)
redirects = session.resolve_redirects(response,
response.request,
proxies=proxies,
stream=True,
timeout=DEFAULT_TIMEOUT)
for redirect in redirects:
yield readable_from_response(redirect, url,
decode_content=decode_content,
context=context)
|
|
"""The registration module contains classes for image registration.
Image registration aims to align two images using a particular transformation.
miapy currently supports multi-modal rigid registration, i.e. align two images of different modalities
using a rigid transformation (rotation, translation, reflection, or their combination).
See Also:
`ITK Registration <https://itk.org/Doxygen/html/RegistrationPage.html>`_
`ITK Software Guide Registration <https://itk.org/ITKSoftwareGuide/html/Book2/ITKSoftwareGuide-Book2ch3.html>`_
"""
import abc
import enum
import os
import typing as t
import matplotlib
matplotlib.use('Agg') # use matplotlib without having a window appear
import matplotlib.pyplot as plt
import numpy as np
import SimpleITK as sitk
import miapy.filtering.filter as miapy_fltr
class RegistrationType(enum.Enum):
"""Represents the registration transformation type."""
AFFINE = 1
SIMILARITY = 2
RIGID = 3
BSPLINE = 4
class RegistrationCallback(metaclass=abc.ABCMeta):
"""Represents the abstract handler for the registration callbacks."""
def __init__(self) -> None:
"""Initializes a new instance of the abstract RegistrationCallback class."""
self.registration_method = None
self.fixed_image = None
self.moving_image = None
self.transform = None
def set_params(self, registration_method: sitk.ImageRegistrationMethod,
fixed_image: sitk.Image,
moving_image: sitk.Image,
transform: sitk.Transform):
"""Sets the parameters that might be used during the callbacks
Args:
registration_method (sitk.ImageRegistrationMethod): The registration method.
fixed_image (sitk.Image): The fixed image.
moving_image (sitk.Image): The moving image.
transform (sitk.Transform): The transformation.
"""
self.registration_method = registration_method
self.fixed_image = fixed_image
self.moving_image = moving_image
self.transform = transform
# link the callback functions to the events
self.registration_method.AddCommand(sitk.sitkStartEvent, self.registration_started)
self.registration_method.AddCommand(sitk.sitkEndEvent, self.registration_ended)
self.registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
self.registration_resolution_changed)
self.registration_method.AddCommand(sitk.sitkIterationEvent, self.registration_iteration_ended)
def registration_ended(self):
"""Callback for the EndEvent."""
pass
def registration_started(self):
"""Callback for the StartEvent."""
pass
def registration_resolution_changed(self):
"""Callback for the MultiResolutionIterationEvent."""
pass
def registration_iteration_ended(self):
"""Callback for the IterationEvent."""
pass
class MultiModalRegistrationParams(miapy_fltr.IFilterParams):
"""Represents parameters for the multi-modal rigid registration."""
def __init__(self, fixed_image: sitk.Image, fixed_image_mask: sitk.Image=None,
callbacks: t.List[RegistrationCallback]=None):
"""Initializes a new instance of the MultiModalRegistrationParams class.
Args:
fixed_image (sitk.Image): The fixed image for the registration.
fixed_image_mask (sitk.Image): A mask for the fixed image to limit the registration.
callbacks (t.List[RegistrationCallback]): Path to the directory where to plot the registration
progress if any. Note that this increases the computational time.
"""
self.fixed_image = fixed_image
self.fixed_image_mask = fixed_image_mask
self.callbacks = callbacks
class MultiModalRegistration(miapy_fltr.IFilter):
"""Represents a multi-modal image registration filter.
The filter estimates a 3-dimensional rigid or affine transformation between images of different modalities using
- Mutual information similarity metric
- Linear interpolation
- Gradient descent optimization
Examples:
The following example shows the usage of the MultiModalRegistration class.
>>> fixed_image = sitk.ReadImage('/path/to/image/fixed.mha')
>>> moving_image = sitk.ReadImage('/path/to/image/moving.mha')
>>> registration = MultiModalRegistration() # specify parameters to your needs
>>> parameters = MultiModalRegistrationParams(fixed_image)
>>> registered_image = registration.execute(moving_image, parameters)
"""
def __init__(self,
registration_type: RegistrationType=RegistrationType.RIGID,
number_of_histogram_bins: int=200,
learning_rate: float=1.0,
step_size: float=0.001,
number_of_iterations: int=200,
relaxation_factor: float=0.5,
shrink_factors: [int]=(2, 1, 1),
smoothing_sigmas: [float]=(2, 1, 0),
sampling_percentage: float=0.2,
resampling_interpolator=sitk.sitkBSpline):
"""Initializes a new instance of the MultiModalRegistration class.
Args:
registration_type (RegistrationType): The type of the registration ('rigid' or 'affine').
number_of_histogram_bins (int): The number of histogram bins.
learning_rate (float): The optimizer's learning rate.
step_size (float): The optimizer's step size. Each step in the optimizer is at least this large.
number_of_iterations (int): The maximum number of optimization iterations.
relaxation_factor (float): The relaxation factor to penalize abrupt changes during optimization.
shrink_factors ([int]): The shrink factors at each shrinking level (from high to low).
smoothing_sigmas ([int]): The Gaussian sigmas for smoothing at each shrinking level (in physical units).
sampling_percentage (float): Fraction of voxel of the fixed image that will be used for registration (0, 1].
Typical values range from 0.01 (1 %) for low detail images to 0.2 (20 %) for high detail images.
The higher the fraction, the higher the computational time.
resampling_interpolator: Interpolation to be applied while resampling the image by the determined
transformation.
"""
super().__init__()
if len(shrink_factors) != len(smoothing_sigmas):
raise ValueError("shrink_factors and smoothing_sigmas need to be same length")
self.registration_type = registration_type
self.number_of_histogram_bins = number_of_histogram_bins
self.learning_rate = learning_rate
self.step_size = step_size
self.number_of_iterations = number_of_iterations
self.relaxation_factor = relaxation_factor
self.shrink_factors = shrink_factors
self.smoothing_sigmas = smoothing_sigmas
self.sampling_percentage = sampling_percentage
self.resampling_interpolator = resampling_interpolator
registration = sitk.ImageRegistrationMethod()
# similarity metric
# will compare how well the two images match each other
# registration.SetMetricAsJointHistogramMutualInformation(self.number_of_histogram_bins, 1.5)
registration.SetMetricAsMattesMutualInformation(self.number_of_histogram_bins)
registration.SetMetricSamplingStrategy(registration.RANDOM)
registration.SetMetricSamplingPercentage(self.sampling_percentage)
# An image gradient calculator based on ImageFunction is used instead of image gradient filters
# set to True uses GradientRecursiveGaussianImageFilter
# set to False uses CentralDifferenceImageFunction
# see also https://itk.org/Doxygen/html/classitk_1_1ImageToImageMetricv4.html
registration.SetMetricUseFixedImageGradientFilter(False)
registration.SetMetricUseMovingImageGradientFilter(False)
# interpolator
# will evaluate the intensities of the moving image at non-rigid positions
registration.SetInterpolator(sitk.sitkLinear)
# optimizer
# is required to explore the parameter space of the transform in search of optimal values of the metric
if self.registration_type == RegistrationType.BSPLINE:
registration.SetOptimizerAsLBFGSB()
else:
registration.SetOptimizerAsRegularStepGradientDescent(learningRate=self.learning_rate,
minStep=self.step_size,
numberOfIterations=self.number_of_iterations,
relaxationFactor=self.relaxation_factor,
gradientMagnitudeTolerance=1e-4,
estimateLearningRate=registration.EachIteration,
maximumStepSizeInPhysicalUnits=0.0)
registration.SetOptimizerScalesFromPhysicalShift()
# setup for the multi-resolution framework
registration.SetShrinkFactorsPerLevel(self.shrink_factors)
registration.SetSmoothingSigmasPerLevel(self.smoothing_sigmas)
registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
self.registration = registration
self.transform = None
def execute(self, image: sitk.Image, params: MultiModalRegistrationParams=None) -> sitk.Image:
"""Executes a multi-modal rigid registration.
Args:
image (sitk.Image): The moving image.
params (MultiModalRegistrationParams): The parameters, which contain the fixed image.
Returns:
sitk.Image: The registered image.
"""
if params is None:
raise ValueError("params is not defined")
dimension = image.GetDimension()
if dimension not in (2, 3):
raise ValueError('Image dimension {} is not among the accepted (2, 3)'.format(dimension))
# set a transform that is applied to the moving image to initialize the registration
if self.registration_type == RegistrationType.BSPLINE:
transform_domain_mesh_size = [10] * image.GetDimension()
initial_transform = sitk.BSplineTransformInitializer(params.fixed_image, transform_domain_mesh_size)
else:
if self.registration_type == RegistrationType.RIGID:
transform_type = sitk.VersorRigid3DTransform() if dimension == 3 else sitk.Euler2DTransform()
elif self.registration_type == RegistrationType.AFFINE:
transform_type = sitk.AffineTransform(dimension)
elif self.registration_type == RegistrationType.SIMILARITY:
transform_type = sitk.Similarity3DTransform() if dimension == 3 else sitk.Similarity2DTransform()
else:
raise ValueError('not supported registration_type')
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(params.fixed_image,
image.GetPixelIDValue()),
image,
transform_type,
sitk.CenteredTransformInitializerFilter.GEOMETRY)
self.registration.SetInitialTransform(initial_transform, inPlace=True)
if params.fixed_image_mask:
self.registration.SetMetricFixedMask(params.fixed_image_mask)
if params.callbacks is not None:
for callback in params.callbacks:
callback.set_params(self.registration, params.fixed_image, image, initial_transform)
self.transform = self.registration.Execute(sitk.Cast(params.fixed_image, sitk.sitkFloat32),
sitk.Cast(image, sitk.sitkFloat32))
if self.verbose:
print('MultiModalRegistration:\n Final metric value: {0}'.format(self.registration.GetMetricValue()))
print(' Optimizer\'s stopping condition, {0}'.format(
self.registration.GetOptimizerStopConditionDescription()))
elif self.number_of_iterations == self.registration.GetOptimizerIteration():
print('MultiModalRegistration: Optimizer terminated at number of iterations and did not converge!')
return sitk.Resample(image, params.fixed_image, self.transform, self.resampling_interpolator, 0.0,
image.GetPixelIDValue())
def __str__(self):
"""Gets a nicely printable string representation.
Returns:
str: The string representation.
"""
return 'MultiModalRegistration:\n' \
' registration_type: {self.registration_type}\n' \
' number_of_histogram_bins: {self.number_of_histogram_bins}\n' \
' learning_rate: {self.learning_rate}\n' \
' step_size: {self.step_size}\n' \
' number_of_iterations: {self.number_of_iterations}\n' \
' relaxation_factor: {self.relaxation_factor}\n' \
' shrink_factors: {self.shrink_factors}\n' \
' smoothing_sigmas: {self.smoothing_sigmas}\n' \
' sampling_percentage: {self.sampling_percentage}\n' \
' resampling_interpolator: {self.resampling_interpolator}\n' \
.format(self=self)
class PlotCallback(RegistrationCallback):
"""Represents a plotter for SimpleITK registrations."""
def __init__(self, plot_dir: str, file_name_prefix: str='', slice_no: int=-1) -> None:
"""
Args:
plot_dir (str): Path to the directory where to save the plots.
file_name_prefix (str): The file name prefix for the plots.
slice_no (int): The slice number to plot (affects only 3-D images). -1 means to use the middle slice.
"""
super().__init__()
self.plot_dir = plot_dir
self.file_name_prefix = file_name_prefix
self.slice_no = slice_no
self.metric_values = []
self.resolution_iterations = []
def registration_ended(self):
"""Callback for the EndEvent."""
plt.close()
def registration_started(self):
"""Callback for the StartEvent."""
self.metric_values = []
self.resolution_iterations = []
def registration_resolution_changed(self):
"""Callback for the MultiResolutionIterationEvent."""
self.resolution_iterations.append(len(self.metric_values))
def registration_iteration_ended(self):
"""Callback for the IterationEvent.
Saves an image including the visualization of the registered images and the metric value plot.
"""
self.metric_values.append(self.registration_method.GetMetricValue())
# Plot the similarity metric values; resolution changes are marked with a blue star
plt.plot(self.metric_values, 'r')
plt.plot(self.resolution_iterations, [self.metric_values[index] for index in self.resolution_iterations], 'b*')
plt.xlabel('Iteration', fontsize=12)
plt.ylabel('Metric Value', fontsize=12)
# todo(fabianbalsiger): format precision of legends
# plt.axes().yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('{x:2f}'))
# plt.axes().xaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('{x:d}'))
# _, ax = plt.subplots()
# ax.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('{x:f2}'))
# todo(fabianbalsiger): add margin to the left side of the plot
# Convert the plot to a SimpleITK image (works with the agg matplotlib backend, doesn't work
# with the default - the relevant method is canvas_tostring_rgb())
plt.gcf().canvas.draw()
plot_data = np.fromstring(plt.gcf().canvas.tostring_rgb(), dtype=np.uint8, sep='')
plot_data = plot_data.reshape(plt.gcf().canvas.get_width_height()[::-1] + (3,))
plot_image = sitk.GetImageFromArray(plot_data, isVector=True)
# Extract the central axial slice from the two volumes, compose it using the transformation and alpha blend it
alpha = 0.7
moving_transformed = sitk.Resample(self.moving_image, self.fixed_image, self.transform,
sitk.sitkLinear, 0.0,
self.moving_image.GetPixelIDValue())
# Extract the plotting slice in xy and alpha blend them
if self.fixed_image.GetDimension() == 3:
slice_index = self.slice_no if self.slice_no != -1 else round((self.fixed_image.GetSize())[2] / 2)
image_registration_overlay = (1.0 - alpha) * sitk.Normalize(self.fixed_image[:, :, slice_index]) + \
alpha * sitk.Normalize(moving_transformed[:, :, slice_index])
else:
image_registration_overlay = (1.0 - alpha) * sitk.Normalize(self.fixed_image) + \
alpha * sitk.Normalize(moving_transformed[:, :])
combined_slices_image = sitk.ScalarToRGBColormap(image_registration_overlay)
self._write_combined_image(combined_slices_image, plot_image,
os.path.join(self.plot_dir,
self.file_name_prefix + format(len(self.metric_values), '03d') + '.png')
)
@staticmethod
def _write_combined_image(image1, image2, file_name):
"""Writes an image including the visualization of the registered images and the metric value plot."""
combined_image = sitk.Image(
(image1.GetWidth() + image2.GetWidth(), max(image1.GetHeight(), image2.GetHeight())),
image1.GetPixelID(), image1.GetNumberOfComponentsPerPixel())
image1_destination = [0, 0]
image2_destination = [image1.GetWidth(), 0]
if image1.GetHeight() > image2.GetHeight():
image2_destination[1] = round((combined_image.GetHeight() - image2.GetHeight()) / 2)
else:
image1_destination[1] = round((combined_image.GetHeight() - image1.GetHeight()) / 2)
combined_image = sitk.Paste(combined_image, image1, image1.GetSize(), (0, 0), image1_destination)
combined_image = sitk.Paste(combined_image, image2, image2.GetSize(), (0, 0), image2_destination)
sitk.WriteImage(combined_image, file_name)
class PlotOnResolutionChangeCallback(RegistrationCallback):
"""Represents a plotter for SimpleITK registrations.
Saves the moving image on each resolution change and the registration end.
"""
def __init__(self, plot_dir: str, file_name_prefix: str='') -> None:
"""
Args:
plot_dir (str): Path to the directory where to save the plots.
file_name_prefix (str): The file name prefix for the plots.
"""
super().__init__()
self.plot_dir = plot_dir
self.file_name_prefix = file_name_prefix
self.resolution = 0
def registration_ended(self):
"""Callback for the EndEvent."""
self._write_image('end')
def registration_started(self):
"""Callback for the StartEvent."""
self.resolution = 0
def registration_resolution_changed(self):
"""Callback for the MultiResolutionIterationEvent."""
self._write_image('res' + str(self.resolution))
self.resolution = self.resolution + 1
def registration_iteration_ended(self):
"""Callback for the IterationEvent."""
def _write_image(self, file_name_suffix: str):
"""Writes an image."""
file_name = os.path.join(self.plot_dir, self.file_name_prefix + '_' + file_name_suffix + '.mha')
moving_transformed = sitk.Resample(self.moving_image, self.fixed_image, self.transform,
sitk.sitkLinear, 0.0, self.moving_image.GetPixelIDValue())
sitk.WriteImage(moving_transformed, file_name)
|
|
import schedule
import logging
import re
import datetime
import io
import time
import traceback
import importlib
import pkgutil
import inspect
import reporter.dq_log
from datetime import date, datetime
from enum import Enum
from weasyprint import HTML
from jinja2 import Environment, FileSystemLoader
from reporter.emailing import send_markdown_email
from reporter.connections import DatabaseConnection
from reporter.emailing import email_error, get_recipients
from reporter.dq_log import log_report_run
class Schedule(Enum):
def daily(func):
schedule.every().day.at("12:00").do(func)
def weekly(func):
schedule.every().sunday.at("15:00").do(func)
def monthly(func):
schedule.every(4).saturday.at("15:00").do(func)
def never(func):
pass
class Report:
def __init__(self, introduction=None, recipients=None,
name=None, send_email=True, schedule=None):
self._name = name or type(self).__name__
# Unpick CamelCase
self._name = re.sub('([a-z])([A-Z])', r'\1 \2', self._name)
self._recipients = recipients or ('DEFAULT_RECIPIENT')
self._introduction = introduction or ''
self._send_email = send_email
self._schedule = schedule or Schedule.weekly
def schedule(self):
self._schedule(self.run)
logging.info("{} scheduled".format(self._name))
def run(self):
try:
logging.info("{} started".format(self._name))
report, rows, attachments = self.get_report()
logging.info("{} ran with {} rows".format(self._name, rows))
if (rows == 0):
return
if self._send_email:
send_markdown_email(
self._name,
self._recipients,
report,
attachments)
except KeyboardInterrupt as e:
raise e
except Exception:
logging.error(traceback.format_exc())
email_error(self._name, traceback.format_exc())
def get_introduction(self):
result = "**{} ({:%d-%b-%Y})**\r\n\r\n".format(
self._name,
date.today())
result += "_{}_:\r\n\r\n".format(self._introduction)
return result
def get_report(self):
return None, 0, None
def get_details(self):
return '"{}", "{}", "{}", "{}"'.format(
self._name,
self._schedule.__name__,
"; ".join(self._recipients),
"; ".join(get_recipients(self._recipients)),
)
class SqlReport(Report):
def __init__(self, sql, conn=None, parameters=None, **kwargs):
super().__init__(**kwargs)
self._sql = sql
self._conn = conn or DatabaseConnection.reporting
self._parameters = parameters or ()
def get_report(self):
attachments = None
start_datetime = datetime.utcnow()
try:
with self._conn() as conn:
conn.execute(self._sql, self._parameters)
report, rows = self.get_report_lines(conn)
log_report_run(
name=self._name,
start_datetime=start_datetime,
end_datetime=datetime.utcnow(),
recipients=self._recipients,
report=report,
error_count=rows,
)
if rows == 0:
return None, 0, attachments
except Exception as e:
print(self._sql)
raise e
markdown = self.get_introduction()
markdown += report
markdown += "\r\n\r\n{} Record(s) Found".format(rows)
return markdown, rows, attachments
def get_report_lines(self, cursor):
markdown = ''
rows = 0
for row in cursor:
line = self.get_report_line(row)
if line:
rows += 1
markdown += line
return markdown, rows
def get_report_line(self, row):
return '- {}\r\n'.format(row)
class PdfReport(SqlReport):
def __init__(self, template, **kwargs):
super().__init__(**kwargs)
self._template = template
def get_report(self):
env = Environment(loader=FileSystemLoader('./templates'))
template = env.get_template(self._template)
with self._conn() as conn:
conn.execute(self._sql, self._parameters)
template_vars = {
"rows": conn.fetchall(),
"now": datetime.utcnow()
}
html = template.render(template_vars)
buf = io.BytesIO()
HTML(string=html, base_url='.').write_pdf(buf)
buf.seek(0)
mkdn = self.get_introduction()
attachments = [{
'filename': '{}.pdf'.format(self._name),
'inline': False,
'stream': buf
}]
return mkdn, 1, attachments
def get_concrete_reports(cls=None):
if (cls is None):
cls = Report
result = [sub() for sub in cls.__subclasses__()
if len(sub.__subclasses__()) == 0 and
# If the constructor requires parameters
# other than self (i.e., it has more than 1
# argument), it's an abstract class
len(inspect.getfullargspec(sub.__init__)[0]) == 1]
for sub in [sub for sub in cls.__subclasses__()
if len(sub.__subclasses__()) != 0]:
result += get_concrete_reports(sub)
return result
def schedule_reports():
reports = get_concrete_reports()
for r in reports:
r.schedule()
logging.info("---- {} reports scheduled ----".format(len(reports)))
while True:
try:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
logging.info('Schedule stopped')
return
def run_reports(report_name, exclude):
reports = get_concrete_reports()
for r in reports:
if type(r).__name__.lower() in exclude:
continue
if type(r).__name__[:len(report_name)].lower() == report_name.lower():
try:
r.run()
except KeyboardInterrupt:
logging.info('Schedule stopped')
return
def run_all(exclude):
reports = get_concrete_reports()
for r in reports:
if type(r).__name__.lower() in exclude:
continue
r.run()
def list_all():
for r in get_concrete_reports():
print(r.get_details())
def get_sub_modules(path, prefix):
result = []
for m in pkgutil.iter_modules(path):
new_module_name = prefix + m[1]
result.append(new_module_name)
result.extend(get_sub_modules(
[path[0] + '/' + m[1]],
new_module_name + '.'
))
return result
def import_sub_reports(path, name):
for m in get_sub_modules(path, name + '.'):
importlib.import_module(m)
|
|
from silverraw import silvershop, silvercom, silverbook, silvercore
import os
from datetime import datetime
from enum import Enum
import pyxb
# Imports for suds
import urllib2, httplib, socket
from suds.client import Client
from suds.sax.text import Raw
from suds.transport.http import HttpTransport, Reply, TransportError
# Making binding easier to use
b=pyxb.BIND
class TRAVEL_TYPE:
STATION = "STATION"
class FARE_FILTER(Enum):
NONE = "NONE"
CHEAPEST = "CHEAPEST"
CHEAPEST_ONEWAY_AND_ROUNDTRIP = "CHEAPEST_ONEWAY_AND_ROUNDTRIP"
OPEN = "OPEN"
CHEAPEST_OPEN = "CHEAPEST_OPEN"
class CONTACT_TYPE(Enum):
HOME = "HOME"
BUSINESS = "BUSINESS"
UNKNOWN = "UNKNOWN"
class CONTACT_MEDIUM(Enum):
PHONE = "PHONE"
EMAIL = "EMAIL"
class PASSENGER_TYPE(Enum):
A = "A"
C = "C"
class ADDRESS_TYPE(Enum):
BUSINESS = "BUSINESS"
class PAYMENT_TYPE(Enum):
CREDIT_CARD = "CC"
ON_ACCOUNT = "OA"
DEBIT_CARD = "DB"
class CONFIRMATION_TYPE(Enum):
CREDIT_CARD = "CC"
DEBIT_CARD = "DB"
ID_CARD = "ID"
LOYALTY_CARD = "LC"
class TICKET_DELIVERY_OPTION(Enum):
EMAIL = "EML"
E_TICKET = "ETK"
PRINT_AT_HOME = "PAH"
SMS = "SMS"
CONDUCTOR = "TBC"
REGULAR_MAIL = "TBM"
OVERNIGHT_MAIL = "TBO"
EXPRESS_MAIL = "TBX"
METRO_LINK = "TML"
TICKETING_OFFICE = "TOF"
VENDING_MACHINE = "TVM"
class TicketOption:
"""
Represents a ticket option to add, modify or remove an existing booking
"""
def __init__(self, code, currency, fee):
"""
Args:
code (TICKET_DELIVERY_OPTION): Type of ticket option.
currency (str): Currency of fees involved.
fee (int): Amount infolved as fee in order to have this ticket option.
"""
self.code = code
self.currency = currency
self.fee = fee
class BookingUpdate:
"""
This class represents a booking update, and allows users to add/remove fees, ticket delivery option/fee, adding passenger IDs, leg solutions, etc.
"""
def __init__(self,
record_locator = None,
ticket_option = None):
"""
Args:
record_locator (str): The identifier of the booking record.
ticket_option (TicketOption): A TicketOption object specifying the option to add to a specific booking
"""
self.record_locator = record_locator
self.ticket_option = ticket_option
class BookingConfirmation:
"""
This class represents a booking confirmation.
"""
def __init__(self,
record_locator = None,
confirmation_type = None,
card_number = None,
expiration_year=2016,
expiration_month=12,
card_holder_first_name = None,
card_holder_last_name = None):
"""
Args:
record_locator (str): The identifier of the booking record.
confirmation_type (CONFIRMATION_TYPE): The form of confirmation for the booking.
card_number (str): The card number.
expiration_year (int): The year of expiration of card.
expiration_month (int): The month of expiration of card.
card_holder_first_name (str): The first name of card holder.
card_holder_last_name (str): The last name of card holder.
"""
self.record_locator = record_locator
self.confirmation_type = confirmation_type
self.card_number = card_number
self.expiration_year = expiration_year
self.expiration_month = expiration_month
self.card_holder_first_name = card_holder_first_name
self.card_holder_last_name = card_holder_last_name
class BillingAddress:
"""
This class represents a billing address. It is mostly used for payemnts.
"""
def __init__(self,
address1 = None,
city = None,
zip_code = None,
country = None,
type = None):
"""
Args:
address1 (str): Your first line of billing address.
city (str): Your billing city.
zip_code (str): Your billing zip code.
country (str): The country of your billing address.
type (ADDRESS_TYPE): The type of payemnt.
"""
self.address1 = address1
self.city = city
self.zip_code = zip_code
self.country = country
self.type = type
class PaymentMethod:
"""
This class represents a payment method for use in SilverCore API.
"""
def __init__(self,
record_locator = None,
payment_form = None,
payment_form_type = None,
card_number = None,
card_type = None,
card_holder_first_name = None,
card_holder_last_name = None,
card_validation_number = None,
amount = None,
currency = None,
expiration_year = None,
expiration_month = None,
customer_ip_address = None,
billing_address = None,
response_spec = None):
self.record_locator = record_locator
self.payment_form = payment_form
self.payment_form_type = payment_form_type
self.card_number = card_number
self.card_type = card_type
self.card_holder_first_name = card_holder_first_name
self.card_holder_last_name = card_holder_last_name
self.card_validation_number = card_validation_number
self.amount = amount
self.currency = currency
self.expiration_year = expiration_year
self.expiration_month = expiration_month
self.customer_ip_address = customer_ip_address
self.billing_address = billing_address
self.response_spec = response_spec
class ContactInfo:
"""
Represents contact information for a passenger
"""
def __init__(self, type, medium, info):
self.type = type
self.medium = medium
self.info = info
class Passenger:
"""Represents a passenger that will be on the relevant trip.
The passenger generates a unique ID whenever it is created.
"""
passenger_count = 0
@staticmethod
def _get_new_passenger_id():
"""This function returns a unique id to create a passenger.
Returns:
str. The unique id for the passenger::
"""
Passenger.passenger_count = Passenger.passenger_count+1
return "PAX_" + str(Passenger.passenger_count)
def __init__(self,
age=None,
id=None,
first_name=None,
last_name=None,
contact_info=[]):
"""Initializes passenger with attributes given
Args:
age (int): The age of the passenger as integer.
id (str): The id of passenger - if not given, it's automatically generated.
first_name (str): First name of passenger.
last_name (str): Last name of passenger.
contact_info (ContactInfo[]): The contact information of the passenger.
"""
if not id:
self.id = Passenger._get_new_passenger_id()
else:
self.id = id
self.age = age
self.first_name = first_name
self.last_name = last_name
self.contact_info = contact_info
def add_contact(self, contact_info):
assert(isinstance(contact_info, ContactInfo))
self.contact_info.append(contact_info)
class TravelPoint:
"""
Stores a pair of origin and destination location as well as the time of departure and arrival
"""
def __init__(self, origin=None, destination=None, departure=None, arrival=None, type=TRAVEL_TYPE.STATION):
"""Initializes the Travelpoint with the relevant origin and destination, and optional departure and arrival times.
Args:
origin (str): Name of station of origin.
destination (str): Name of station of arrival.
departure (DateTime): Approximate time and date of departure.
arrival (DateTime): Approximate time and date of arrival.
"""
if not departure and not arrival:
raise Exception("You must supply either departure time, arrival time.")
self.origin = origin
self.destination = destination
self.departure = departure
self.arrival = arrival
self.type = type
class FareSearch:
"""
Represents an object which contains all required fields to perform a successful fare search
"""
def __init__(self, travel_points=[],
fare_filter=FARE_FILTER.CHEAPEST,
passengers=[],
specs=[]):
""" Initializes a fare search to find the relevant travel points for the passengers given.
Args:
travel_points (TravelPoint[]): List of travel points.
fare_filter (FARE_FILTER): Fare filter type.
passengers (Passenger[]): List of passengers travelling.
specs (TRAVEL_SPECS[]): [Not supported yet] List of travel specifications.
"""
self.travel_points = travel_points
self.fare_filter = fare_filter
self.passengers = passengers
# TODO: Specs are not supported yet
self.specs = specs
class Leg:
"""
Represents a travel leg with its respective travel segments
"""
def __init__(self, id, travel_segments):
"""FUNC_DESC.
Args:
id (str): The id of the leg
trave_segments (TravelSegment[]): An array of travel segments.
"""
self.id = id
self.travel_segments = travel_segments
class TravelSegment:
"""
This class represents a travel segment for a specific leg
"""
def __init__(self,
type="TRAIN",
id = None,
sequence = None,
origin = None,
origin_type = "STATION",
destination = None,
destination_type = "STATION",
departure = None,
arrival = None,
designator = None,
marketing_carrier = None,
operating_carrier = None,
equipment_type = None,
equipment_type_str = None):
"""
Args:
type (str): The type for the medium for transport
id (str): The identifier of the travel segment.
sequence (int): The ordering sequence number of the travel segment
origin (str): The origin of the travel segment.
origin_type (str): The type of origin of the travel segment.
destination (str): The destination of the travel segment.
destination_type (str): The destination type of the travel segment.
departure (DateTime): The datetime of departure of the travel segment.
arrival (DateTime): The datetime of arrival of the travel segment.
designator (str): The designator for the travel segment.
marketing_carrier (str): The marketign carrier for the travel segment.
operating_carrier (str): The operational carrier for the travel segment.
equipment_type (str): The type of equipment for the travel segment defined by the silvercore equipment types.
equipment_type_str (str): The printable string version of the equipment type.
"""
self.id = id
self.type = type
self.sequence = sequence
self.origin = origin
self.origin_type = origin_type
self.destination = destination
self.destination_type = destination_type
self.departure = departure
self.arrival = arrival
self.designator = designator
self.marketing_carrier = marketing_carrier
self.operating_carrier = operating_carrier
self.equipment_type = equipment_type
self.equipment_type_str = equipment_type_str
class FareCode:
"""
Represents a fare code for the respective Ticketable Fare
"""
def __init__(self,
code = None,
service_class = None,
travel_segment_id = None,
cabin_class = None,
fare_display_name = None):
"""
Args:
code (str): The code that identifies the fare code
service_class (str): The type of service class
travel_segment_id (str): The travel segment it references
cabin_class (str): The class of the cabin for the travel segment
fare_display_name (str): The display name for the fare code
"""
self.code = code
self.service_class = service_class
self.travel_segment_id = travel_segment_id
self.cabin_class = cabin_class
self.fare_display_name = fare_display_name
class FarePrice:
"""
Class representing the breakdown costs of a fare total
"""
def __init__(self, price, type, currency):
"""
Args:
price (int): The price of the current fare breakdown item
type (str): The type of the fare price breakdown item
currency (str): The currency of the fare price breakdown item
"""
self.price = price
self.type = type
self.currency = currency
class PassengerReference:
"""
An agregation of a passenger with respective fare codes for a ticketable fare
"""
def __init__(self, passenger, class_type, fare_codes):
"""
Args:
ARG (TYPE): DESC1
passenger (Passenger[]): The passenger for this reference.
class_type (PASSENGER_TYPE): The class type of passenger travelling.
fare_codes (FareCode[]): An array of fare codes for this passenger.
"""
self.passenger = passenger
self.class_type = class_type
self.fare_codes = fare_codes
class TicketableFare:
"""
Represents an individual section of a fare total, containing a set of fare codes and passengers
"""
def __init__(self,
price = None,
prices = [],
currency = None,
passenger_references = []):
"""
Args:
price (int): The total amount of the ticketable fare.
prices (FarePrice[]): Array of prices for the breakdown of the total
currency (str): The currency of the fare.
passenger_references (PassengeReference[]): An array of passenger references for this ticketable fare.
"""
self.price = price
self.prices = prices
self.currency = currency
self.passenger_references = passenger_references
class FareTotal:
"""
This class represents a fare for a specific leg, with specific segments given
"""
def __init__(self,
id = None,
currency = None,
price = None,
expiration = None,
ticketable_fares = [],
legs = []):
"""
Args:
id (str): The identifier for this fare
currency (str): the currency for the fare amount
price (int): The of the total cost fare
expiration (DateTime): The date of expiration for the fare
ticketable_fares (TicketableFare[]): Array of ticketable fares comprising this fare
legs (Leg[]): Array of legs that this fare refers to, minimum 1.
"""
self.id = id
self.currency = currency
self.price = price
self.expiration = expiration
self.ticketable_fares = ticketable_fares
self.legs = legs
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
"""
This class handles authentication in the secure ssl tunnel created with the given PEM certificate and RSA key
"""
def __init__(self, key, cert):
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
#Rather than pass in a reference to a connection class, we pass in
# a reference to a function which, for all intents and purposes,
# will behave as a constructor
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host,
key_file=self.key,
cert_file=self.cert)
class HTTPSClientCertTransport(HttpTransport):
"""
This class creates an SSL tunnel to establish the secure connection between the silverclient and silvercore API
"""
def __init__(self, key, cert, *args, **kwargs):
HttpTransport.__init__(self, *args, **kwargs)
self.key = key
self.cert = cert
def u2open(self, u2request):
"""
Open a connection.
@param u2request: A urllib2 request.
@type u2request: urllib2.Requet.
@return: The opened file-like urllib2 object.
@rtype: fp
"""
tm = self.options.timeout
url = urllib2.build_opener(HTTPSClientAuthHandler(self.key, self.cert))
if self.u2ver() < 2.6:
socket.setdefaulttimeout(tm)
return url.open(u2request)
else:
return url.open(u2request, timeout=tm)
class SilverSoap:
"""
The SilverSoap class manages all interactions with the SilverCore API.
This class deals with the heavy conversion of objects to their respective SilverRaw format.
.. note::
For more thorough documentation check the SilverRail wikipedia.
"""
booking_wsdl="https://hacktrain.railgds.net/booking-ws/services/Booking/v2?wsdl"
booking_location="https://hacktrain.railgds.net/booking-ws/services/Booking/v2"
shopping_wsdl="https://hacktrain.railgds.net/shopping-ws/services/Shopping/v2?wsdl"
shopping_location="https://hacktrain.railgds.net/shopping-ws/services/Shopping/v2"
def __init__(self, distributor, point_of_sale, channel, cert, key):
"""Gathers the information required to create the context on each request.
Args:
distributor (str): The name of the distributor for the SilverRail ticketing system provider.
point_of_sale (str): The point of sale, which can be location, etc.
channel (str): The channel code for the queries.
cert (str): The absolute path location for the ssl silverrail certificate that will be used to verify each request.
key (str): The absolute path location of the key file.
"""
self._distributor = distributor
self._point_of_sale = point_of_sale
self._channel = channel
self.shop_client = Client(SilverSoap.shopping_wsdl,
transport=HTTPSClientCertTransport(key, cert),
location=SilverSoap.shopping_location,
retxml=True)
self.book_client = Client(SilverSoap.booking_wsdl,
transport=HTTPSClientCertTransport(key, cert),
location=SilverSoap.booking_location,
retxml=True)
def _silver_send(self, soap_client, api_func, xml_func, pyxb_xml):
"""Sends a raw xml message to the SilverCore backend with the function given with the func_enum provided.
Args:
soap_client (SILVERCORE_API_CLIENT): The SOAP client to use for the request.
api_func (SILVERCORE_API_FUNC): SilverCore functions available through the SOAP API.
xml_func (SILVERCORE_XML_FUNC): The relevant silvercore xml element to create for the response
Returns:
SilverRaw.Element. A SilverRaw element containing the response of the SilverCore SOAP request::
"""
# Adding SOAP Envelope wrapper
senv = silvercore.Envelope()
senv.Header = b()
senv.Body = b()
getattr(senv.Body, xml_func).append(pyxb_xml)
xml = str(senv.toxml())
# Until nasty pyxb bug is fixed, we need to add namespace manually.
# Basically, when we append a pyxb object to a _PluralObject,
# the namespace for that object is not updated, so we have to add
# it manually.
# If anyone can submit a pull request to pyxb to fix it, or
# find a neater walkaround, let's do it.
xml = xml.replace(xml_func + ">", "ns3:" + xml_func + ">")
print xml
# Call the relevant SilverCore function with the raw XML given
client_found = getattr(self, soap_client)
result = getattr(client_found.service, api_func)(__inject={"msg": xml})
# Create respective SOAP SilverRaw object
silver_obj = silvercore.CreateFromDocument(result)
# Only one response object is supported at the time
assert(len(silver_obj.Body.content()) == 1)
# Return SilverCore response
return silver_obj.Body.content()[0]
def _get_xml_context(self):
"""Returns a pyxb BIND representation of a silverraw context object. In order for the context to be valid, it has to be assigned to an object's context property.
Returns:
b. A b object representing a silverraw context object::
"""
return b(
distributorCode="HACKTRAIN",
pointOfSaleCode="GB",
channelCode="CH1")
def _search_fare(self, fare_query):
"""This function creates the respective SOAP object from the FareQuery object and returns the results found from the API.
Args:
fare_query (FareSearch): The fare search object to query the silvercore backend with
Returns:
FareResult. The results from the FareQuery::
"""
# Creating a point to point shopping request
p2p = silvershop.pointToPointShoppingRequest()
# Obtaining the context
p2p.context = self._get_xml_context()
p2p.pointToPointShoppingQuery = b()
# Setting Fare fixlter to the compatible SilverCore string
p2p.pointToPointShoppingQuery.fareFilter = fare_query.fare_filter.value
# Adding Travel Point Pairs
p2p.pointToPointShoppingQuery.travelPointPairs = b()
i = 0
for tp in fare_query.travel_points:
p2p.pointToPointShoppingQuery.travelPointPairs.append(b())
p2p.pointToPointShoppingQuery.travelPointPairs.travelPointPair[i].originTravelPoint = b(tp.origin, type=tp.type)
p2p.pointToPointShoppingQuery.travelPointPairs.travelPointPair[i].destinationTravelPoint = b(tp.destination, type=tp.type)
if tp.departure:
p2p.pointToPointShoppingQuery.travelPointPairs.travelPointPair[i].departureDateTimeWindow = \
b(date=datetime.strftime(tp.departure, "%Y-%m-%d"), time=datetime.strftime(tp.departure, "%H:%M:%S"))
i = i + 1
for p in fare_query.passengers:
p2p.pointToPointShoppingQuery.passengerSpecs = b()
p2p.pointToPointShoppingQuery.passengerSpecs.append(b(passengerSpecID=p.id, age=p.age))
# Send point to point search request
response = self._silver_send(
"shop_client",
"PointToPointShop",
"pointToPointShoppingRequest",
p2p)
return response
def _create_booking_from_response(self, legs, fares, passengers, parameters=[], response_specs=[]):
"""Creates a silverraw request to send to SilverCore API with form the legs and fares given from the previous faresearch query, and passengers given.
Args:
legs (silverraw.Leg[]): An array of SilverRaw Leg objects chosen to book.
fares (silverraw.Fare[]): An array of SilverRaw Fare objects chosen to book.
passengers (Passenger[]): An array of passengers that will be present on each booking.
parameters (CREATE_BOOKING_PARAMS[]): Array of parameters to pass the create booking request (NOT SUPPORTED YET).
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.createBookingResponse. Returns a createBookingResponse object::
"""
cb = silverbook.createBookingRecordRequest()
# Obtaining the context
cb.context = self._get_xml_context()
# TODO: Add support for parameters
cb.parameters = b()
cb.parameters.priceAcceptance = b()
cb.parameters.priceAcceptance.acceptAny = True
# Adding point to point prices
cb.prices = b()
cb.prices.pointToPointPrice.extend(fares)
# Adding all legs for trip
cb.legSolutions = b()
cb.legSolutions.legSolution.extend(legs)
# Adding passengers
cb.passengers = b()
for idx1, passenger in enumerate(passengers):
cb.passengers.passenger.append(b())
p = cb.passengers.passenger[idx1]
p.passengerID = passenger.id
p.nameFirst = passenger.first_name
p.nameLast = passenger.last_name
p.ageAtTimeOfTravel = 40
p.contactInformation = b()
# Adding all contact information available for passenger
for contact in passenger.contact_info:
p.contactInformation.contact.append(b(
contactType=contact.type.value,
contactMedium=contact.medium.value,
contactInfo=contact.info))
# Send create booking request
response = self._silver_send(
"book_client",
"CreateBookingRecord",
"createBookingRecordRequest",
cb)
return response
def _create_booking(self, fares, passengers, parameters, response_specs):
"""Creates the relevant silverraw objects and sends a create booking request to the silvercore api.
Args:
fares (FareTotal[]): An array of fares chosen to book.
passengers (Passenger[]): An array of passengers that will be present on each booking.
parameters (CREATE_BOOKING_PARAMS[]): Array of parameters to pass the create booking request (NOT SUPPORTED YET).
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API.
Returns:
silverraw.createBookingResponse. Returns a createBookingResponse object::
"""
cb = silverbook.createBookingRecordRequest()
# Obtaining the context
cb.context = self._get_xml_context()
# TODO: Add support for parameters
cb.parameters = b()
cb.parameters.priceAcceptance = b()
cb.parameters.priceAcceptance.acceptAny = True
# TODO: Add support for response_spec
# if len(response_specs):
# cb.responseSpec = b()
# for r in response_specs:
# getattr(cb.responseSpec, r) = True
# Adding passengers
cb.passengers = b()
for idx1, passenger in enumerate(passengers):
cb.passengers.passenger.append(b())
p = cb.passengers.passenger[idx1]
p.passengerID = passenger.id
p.nameFirst = passenger.first_name
p.nameLast = passenger.last_name
p.ageAtTimeOfTravel = 40
p.contactInformation = b()
# Adding all contact information available for passenger
for contact in passenger.contact_info:
p.contactInformation.contact.append(b(
contactType=contact.type.value,
contactMedium=contact.medium.value,
contactInfo=contact.info))
# Adding point to point prices
cb.prices = b()
for idx2, fare in enumerate(fares):
cb.prices.pointToPointPrice.append(b())
pr = cb.prices.pointToPointPrice[idx2]
pr.priceID = fare.id
pr.totalPrice = b(fare.price, currency=fare.currency)
pr.holdExpiration = fare.expiration
pr.legReferences = b()
for leg in fare.legs:
pr.legReferences.legSolutionIDRef.append(b(leg.id))
# Add all ticketable fares
pr.ticketableFares = b()
for idx5, ticketable in enumerate(fare.ticketable_fares):
pr.ticketableFares.ticketableFare.append(b())
tf = pr.ticketableFares.ticketableFare[idx5]
tf.totalPrice = b(ticketable.price, currency=ticketable.currency)
# Adding all price breakdown
tf.prices = b()
for price in ticketable.prices:
tf.prices.price.append(b(price.price, type=price.type, currency=price.currency))
# Adding passenger references
tf.passengerReferences = b()
for idx6, p_ref in enumerate(ticketable.passenger_references):
tf.passengerReferences.passengerReference.append(b())
r = tf.passengerReferences.passengerReference[idx6]
r.passengerIDRef = p_ref.passenger.id
r.passengerTypeCode = p_ref.class_type.value
# Adding fare codes
r.fareCodes = b()
for id7, farecode in enumerate(p_ref.fare_codes):
r.fareCodes.fareCode.append(b())
fc = r.fareCodes.fareCode[id7]
fc.code = farecode.code
fc.serviceClass = farecode.service_class
fc.travelSegmentIDRef = farecode.travel_segment_id
fc.cabinClass = farecode.cabin_class
fc.fareDisplayName = farecode.fare_display_name
# Adding all legs for trip
cb.legSolutions = b()
for idx3, leg in enumerate(fare.legs):
cb.legSolutions.legSolution.append(b())
l = cb.legSolutions.legSolution[idx3]
l.legSolutionID = leg.id
# Adding all travel segments for each leg
l.travelSegments = b()
for idx4, segment in enumerate(leg.travel_segments):
l.travelSegments.travelSegment.append(b())
ts = l.travelSegments.travelSegment[idx4]
ts.sequence = segment.sequence
ts.travelSegmentID = segment.id
ts.type = segment.type
ts.originTravelPoint = b(segment.origin, type=segment.origin_type)
ts.destinationTravelPoint = b(segment.destination, type=segment.destination_type)
ts.departureDateTime = segment.departure
ts.arrivalDateTime = segment.arrival
ts.designator = segment.designator
ts.marketingCarrier = segment.marketing_carrier
ts.operatingCarrier = segment.operating_carrier
ts.equipmentType = b(segment.equipment_type_str, code=segment.equipment_type)
# Send create booking request
response = self._silver_send(
"book_client",
"CreateBookingRecord",
"createBookingRecordRequest",
cb)
return response
def _add_payment(self, payment, response_specs):
"""Creates the SilverRaw SOAP objects to communicate to the AddPayment API and add the payment given by the silver.PaymentMethod object
Args:
payment (PaymentMethod): The payment method to use.
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.addPaymentResponse. Returns a addPaymentResponse object::
"""
ap = silverbook.addPaymentRequest()
# Get context
ap.context = self._get_xml_context()
# Record locator to identify booking
ap.recordLocator = payment.record_locator
# Adding method of payment
ap.payment = b()
ap.payment.formOfPayment = b(payment.payment_form, type=payment.payment_form_type.value)
ap.payment.creditCard = b(type = payment.card_type)
ap.payment.creditCard.number = payment.card_number
expiration_ym = str(payment.expiration_year) + "-" + str(payment.expiration_month)
ap.payment.creditCard.expirationYearMonth = b(expiration_ym)
fl_name = payment.card_holder_first_name + " " + payment.card_holder_last_name
ap.payment.creditCard.cardholderName = fl_name
billing_address = payment.billing_address
ba = b(address1=billing_address.address1,
city=billing_address.city,
zipCode=billing_address.zip_code,
country=billing_address.country,
addressType=billing_address.type.value)
ap.payment.creditCard.billingAddress = ba
ap.payment.creditCard.validationNumber = payment.card_validation_number
ap.payment.amount = b(payment.amount, currency=payment.currency)
response = self._silver_send(
"book_client",
"AddPaymentRequest",
"addPaymentRequest",
ap)
return response
def _update_booking(self, booking_update, response_specs):
"""
Creates an silverraw update booking object based on the update options given and calls the silvercore api with the objects created.
Args:
booking_update (BookingUpdate): A booking update with the relevant booking update options.
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.updateBookingRecordRequest. Returns an updateBookingRecordRequest object::
"""
bu = silverbook.updateBookingRecordRequest()
# Adding context
bu.context = self._get_xml_context()
# Adding booking record id
bu.recordLocator = booking_update.record_locator
# Adding ticket option information
to = booking_update.ticket_option
if to:
bu.fulfillmentInformation = b()
bu.fulfillmentInformation.ticketOption = b()
bu.fulfillmentInformation.ticketOption.code = to.code.value
bu.fulfillmentInformation.ticketOption.code.fee = b(to.fee, currency=to.currency)
response = self._silver_send(
"book_client",
"UpdateBookingRecordRequest",
"updateBookingRecordRequest",
bu)
return response
def _confirm_booking(self, confirmation, response_specs=[]):
"""Creates the SilverRaw objects necessary to send a booking confirmation request to the server
Args:
confirmation (BookingConfirmation): The confirmation details to finalise and confirm booking.
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.confirmBookingResponse. Returns a confirmBookingResponse object::
"""
cb = silverbook.confirmBookingRecordRequest()
# Retreiving context
cb.context = self._get_xml_context()
# Record locator
cb.recordLocator = confirmation.record_locator
cb.confirmationInformation = b()
cb.confirmationInformation
cb.confirmationInformation.selectedConfirmationOption = b()
expiration_ym = str(confirmation.expiration_year) + "-" + str(confirmation.expiration_month)
conf_opt = b(cardholderNameLast=confirmation.card_holder_last_name,
cardholderNameFirst=confirmation.card_holder_first_name,
cardNumber=confirmation.card_number,
expirationYearMonth=expiration_ym)
cb.confirmationInformation.selectedConfirmationOption.creditCardOption = conf_opt
response = self._silver_send(
"book_client",
"ConfirmBookingRecordRequest",
"confirmBookingRecordRequest",
cb)
return response
class SilverCore(SilverSoap):
"""
The SilerCore class manages all interactions with the client and hides all the complexity from the SilverCore SOAP interface.
This exposes a simple interface for p2p search, bookings, and payemnts.
.. note::
For more thorough documentation check the SilverRail wikipedia.
"""
def __init__(self, distributor, point_of_sale, channel, cert=None, key=None):
"""Gathers the information required to create the context on each request.
Args:
distributor (str): The name of the distributor for the SilverRail ticketing system provider.
point_of_sale (str): The point of sale, which can be location, etc.
channel (str): The channel code for the queries.
cert (str): The absolute path location for the ssl silverrail certificate that will be used to verify each request.
key (str): The absolute path location of the key file.
.. note::
Remember never to hardcode the password in the python files. Always set it in the bash environment and request it with pwd = os.ENV["SILVERCORE_PASSWORD"].
"""
cert = cert or os.environ.get("SILVERCORE_CERT")
key = key or os.environ.get("SILVERCORE_KEY")
if not cert or not key:
raise Exception("Certificate and key absolute paths must be provided")
# Initializes it's parent SilverSoap class which will deal with all the heavy SOAP lifting.
SilverSoap.__init__(self, distributor, point_of_sale, channel, cert, key)
def set_credentials(self, cert_locat, pwd):
"""Retreives the location of the certificate to use as ssl verification with each of the requests..
Args:
cert_locat (str): The absolute location for the ssl silverrail certificate that will be used to verify each request.
pwd (str): The password used alongside with the certificate.
.. note::
Remember never to hardcode the password in the python files. Always set it in the bash environment and request it with pwd = os.ENV["SILVERCORE_PASSWORD"].
"""
self._set_credentials(cert_locat, pwd)
def search_fare(self, fare_query):
"""This function handles handles the fare_query object and returns the legs found and their respective fare results found.
Args:
fare_query (FareSearch): The fare search object to query the silvercore backend with
Returns:
FareResult. The results from the FareQuery::
"""
return self._search_fare(fare_query)
def create_booking(self, fares, passengers, parameters=[], response_specs = []):
"""Creates a booking with the fares and passengers given, and returns a response object
Args:
fares (FareTotal[]): An array of fares chosen to book.
passengers (Passenger[]): An array of passengers that will be present on each booking.
parameters (CREATE_BOOKING_PARAMS[]): Array of parameters to pass the create booking request (NOT SUPPORTED YET).
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.createBookingResponse. Returns a createBookingResponse object::
"""
return self._create_booking(fares, passengers, parameters, response_specs)
def create_booking_from_response(self, legs, fares, passengers, parameters=[], response_specs=[]):
"""Creates a booking with form the legs and fares given from the previous faresearch query, and passengers given.
Args:
legs (silverraw.Leg[]): An array of SilverRaw Leg objects chosen to book.
fares (silverraw.Fare[]): An array of SilverRaw Fare objects chosen to book.
passengers (Passenger[]): An array of passengers that will be present on each booking.
parameters (CREATE_BOOKING_PARAMS[]): Array of parameters to pass the create booking request (NOT SUPPORTED YET).
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.createBookingResponse. Returns a createBookingResponse object::
"""
return self._create_booking_from_response(legs, fares, passengers, parameters, response_specs)
def add_payment(self, payment, response_specs=[]):
"""Adds a form of payment to an existing booking referenced throuhg a record locator object
Args:
payment (PaymentMethod): The payment method to use.
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.addPaymentResponse. Returns a addPaymentResponse object::
"""
return self._add_payment(payment, response_specs)
def update_booking(self, booking_update, response_specs=[]):
"""
Args:
booking_update (BookingUpdate): A booking update with the relevant booking update options.
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.updateBookingRecordRequest. Returns an updateBookingRecordRequest object::
"""
return self._update_booking(booking_update, response_specs)
def confirm_booking(self, confirmation, response_specs=[]):
"""Confirms the booking assuming that payments have been added and everything has been finalised
Args:
confirmation (BookingConfirmation): The confirmation details to finalise and confirm booking.
response_specs (CREATE_BOOKING_SPECS[]): Array of specs for the response from the SilverCore API (NOT SUPPORTED YET).
Returns:
silverraw.confirmBookingResponse. Returns a confirmBookingResponse object::
"""
return self._confirm_booking(confirmation, response_specs)
|
|
import glob,os
# Build a map from REFPROP name to CAS code
RP2CAS = {}
for file in glob.glob('C:\\Program Files (x86)\\REFPROP\\fluids\\*.fld'):
lines = open(file,'r').readlines()
root,RPFluid = os.path.split(file)
for line in lines:
if line.find('CAS number') > -1:
CAS_number = line.split('!')[0].strip()
if not CAS_number:
raise ValueError(file+line)
RP2CAS[RPFluid.split('.')[0]] = CAS_number
break
# Handle pseudo-pure fluids
for file in glob.glob('C:\\Program Files (x86)\\REFPROP\\fluids\\*.ppf'):
root,RPFluid = os.path.split(file)
RP2CAS[RPFluid.split('.')[0]] = RPFluid
fluid_lookup = """1BUTENE butene 1-Butene 419.29 * * * - - - 2.59 0.5 1 0.983 1.079 - - - - - - - - -
ACETONE propanone Acetone 508.1 - 0.5 - - - - 1.46 0.1 0.2 0.16 9.40E-02 - - - - - - 1.39E+04 - -
AIR air Air 132.5306 - - - - - - N/A - - - - - - - - - - - - -
AMMONIA ammonia Ammonia 405.4 R-717 X X X - - - N/A - - - 1.0E-01 - - - - - - 1.0E+06 3.50E-01 1.60E+00
ARGON argon Argon 150.687 R-740 0 0 0 - - - N/A - - - - - - - - - - - - -
BENZENE benzene Benzene 562.02 * * * - - - 3.65 0.4 0.2 0.318 2.2E-01 1.9E+03 8.4E-05 2.8E-03 6.4E-05 1.3E-03 1.6E-05 - - -
BUTANE butane n-Butane 425.125 R-600 N/A 3 N/A - - - 2.15 0.5 0.4 0.485 3.52E-01 - - - - - - - - -
C12 dodecane n-Dodecane 658.1 X X X - - - 2.19 0.3 0.4 0.452 3.57E-01 - - - - - - - - -
C1CC6 methylcyclohexane CoolProp error: Your fluid name [] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid - 0.639 - - - - N/A 0.5 0.6 0.392 1.87 - - - - - - - - -
C2BUTENE cis-2-butene cis-2-Butene 435.75 * * * - - - 2.57 0.4 1 1.15 - - - - - - - - -
C3CC6 propylcyclohexane CoolProp error: Your fluid name [] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid * * * - - - - - - - 2.57 - - - - - - - - -
C4F10 perfluorobutane CoolProp error: Your fluid name [] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid 6330 8860 12500 - - - N/A - - - - - - - - - - - - -
C5F12 perfluoropentane CoolProp error: Your fluid name [] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid 6510 9160 13300 - - - N/A - - - - - - - - - - - - -
CF3I trifluoroiodomethane CoolProp error: Your fluid name [] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid 1* 0.4* 0.1* - - - N/A - - - - - - - - - - - - -
CO carbon monoxide CarbonMonoxide 132.86 - 1.6* - - - - 0.331 0.04 0.03 0.032 2.70E-02 - - - - - - - - -
CO2 carbon dioxide CarbonDioxide 304.1282 1 1 1 - - - 0.108 - - - - - - - - - - - - -
COS carbonyl sulphide CarbonylSulfide 378.77 97 27 - - - - N/A - - - - - - - - - - - - -
CYCLOHEX cyclohexane CycloHexane 553.64 X X X - - - N/A N/A N/A N/A N/A - - - - - - - - -
CYCLOPEN cyclopentane Cyclopentane 511.72 * * * - - - N/A N/A N/A N/A N/A - - - - - - - - -
CYCLOPRO cyclopropane CycloPropane 398.3 * * * - - - N/A N/A N/A N/A N/A - - - - - - - - -
D2 deuterium Deuterium CoolProp error: Your fluid name [Deuterium] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid 0 0 0 - - - N/A - - - - - - - - - - - - -
D2O deuterium oxide DeuteriumOxide CoolProp error: Your fluid name [DeuteriumOxide] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid - - - - - - N/A - - - - - - - - - - - - -
D4 octamethylcyclotetrasiloxane D4 586.5 N/A N/A N/A - - - N/A - - - - - - - - - - - - -
D5 decamethylcyclotetrasiloxane D5 619.15 N/A N/A N/A - - - N/A - - - - - - - - - - - - -
D6 dodecamethylcyclotetrasiloxane D6 645.78 N/A N/A N/A - - - N/A - - - - - - - - - - - - -
DECANE decane n-Decane 617.7 * * * - - - 2.45 0.4 0.5 0.509 3.84E-01 - - - - - - - - -
DMC dimethyl carbonate DimethylCarbonate 557 N/A N/A N/A - - - N/A - - - 2.50E-02 - - - - - - - - -
DME dimethylether DimethylEther 400.378 1 1 <<1 - - - 1.66 0.3 0.3 1.89E-01 - - - - - - - - -
ETHANE ethane Ethane 305.322 R-170 N/A 2.9 N/A - - - 1.46 0.1 0.1 0.121 1.23E-01 - - - - - - - - -
ETHANOL ethanol Ethanol 514.71 - - - - - - 1.95 0.2 0.3 0.317 3.99E-01 - - - - - - 1.56E+06 - -
ETHYLENE ethene Ethylene 282.35 R-1150 N/A 6.8 N/A - - - 3.45 1 1 1 1.0E+00 6.4E-01 1.4E-11 7.9E-11 9.0E-12 7.1E-11 1.3E-12 - - -
FLUORINE fluorine Fluorine CoolProp error: Your fluid name [Fluorine] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid - - - - - - 4.86 - - - - - - - - - - - - -
H2S hydrogen sulfide HydrogenSulfide 373.1 - - - - - - 6.89 - - - - 2.2E-01 - - - - - 2.3E+09 - -
HELIUM helium Helium 5.1953 R-704 - - - - - - N/A - - - - - - - - - - - -
HEPTANE heptane n-Heptane 540.13 * * * - - - 2.58 0.5 0.5 0.592 - - - - - - - - -
HEXANE hexane n-Hexane 507.82 * 3.1 * - - - 2.57 0.5 0.4 0.495 4.94E-01 - - - - - - - - -
HYDROGEN Hydrogen 33.145 R-702 - - - - - - N/A - - - 4.94E-01 - - - - - - - - -
IBUTENE 2-methyl-1-propene/methylpropene/isobutene/isobutylene Isobutene 418.09 * * * - - - N/A 0.6 0.6 6.27E-01 - - - - - - 6.67E+04
IHEXANE 2-methylpentane (methylpentane) Isohexane 497.7 * * * - - - N/A - - - - - - - - - - - -
IPENTANE 2-methylbutane Isopentane 460.35 * * * - - - 1.8 0.3 0.3 4.05E-01 - - - - - - - - -
ISOBUTAN 2-methylpropane IsoButane 407.817 * * * - - - 1.74 0.4 0.3 3.07E-01 - - - - - - - - -
KRYPTON kr Krypton 209.48 R-784 - - - - - - N/A - - - - - - - - - - - - -
MD2M decamethyltetrasiloxane MD2M 599.4 * * * - - - N/A - - - - - - - - - - - -
MD3M dodecamethylcyclotetrasiloxane MD3M 628.36 * * * - - - N/A - - - - - - - - - - - -
MD4M tetradecamethylhexasiloxane MD4M 653.2 * * * - - - N/A - - - - - - - - - - - -
MDM octamethyltrisiloxane MDM 564.09 * * * - - - N/A - - - - - - - - - - - -
METHANE methane Methane 190.564 72 25 7.6 - - - 2.72 0.007 0.007 6.00E-03 - - - - - - - - -
METHANOL methanol Methanol CoolProp error: Your fluid name [Methanol] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid N/A 2.8 N/A - - - 1.44 0.2 0.1 0.178 1.40E-01 - - - - - - 1.37E+04 - -
MLINOLEA methyl linoleate (methyl (z,z)-9,12-octadecadienoate) MethylLinoleate 799 N/A N/A N/A - - - N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A
MLINOLEN methyl linolenate (methyl (z,z,z)-9,12,15-octadecatrienoate) MethylLinolenate 772 N/A N/A N/A - - - N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A
MM hexamethyldisiloxane MM 518.75 N/A N/A N/A - - - N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A
MOLEATE methyl oleate (methyl cis-9-octadecenoate) MethylOleate 782 N/A N/A N/A - - - N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A
MPALMITA methyl hexadecanoate MethylPalmitate 755 N/A N/A N/A - - - N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A
MSTEARAT methyl octadecanoate MethylStearate 775 N/A N/A N/A - - - N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A
N2O nitrous oxide NitrousOxide 309.52 290 320 180 - - - N/A N/A N/A N/A N/A - - - - - - - - -
NEON neon Neon 44.4918 R-720 - - - - - - N/A N/A N/A N/A N/A - - - - - - - - -
NEOPENTN neopentane (2,2-dimethylpropane) Neopentane 433.74 * * * - - - 2.25 - - - 1.73E-01 - - - - - - - - -
NF3 nitrogen trifluoride NitrogenTrifluoride CoolProp error: Your fluid name [NitrogenTrifluoride] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid 12300 17200 20700 - - - N/A - - - - - - - - - - - -
NITROGEN nitrogen Nitrogen 126.192 R-728 - - - - - - N/A - - - - - - - - - - - -
NONANE nonane n-Nonane 594.55 * * * - - - 2.29 0.4 0.5 0.463 4.14E-01 - - - - - - - - -
OCTANE octane n-Octane 569.32 * * * - - - 2.41 0.5 0.5 0.544 4.53E-01 - - - - - - - - -
ORTHOHYD orthohydrogen OrthoHydrogen 33.22 - - - - - - N/A - - - - - - - - - - - - -
OXYGEN oxygen Oxygen 154.581 - - - - - - N/A - - - - - - - - - - - - -
PARAHYD parahydrogen ParaHydrogen 32.938 - - - - - - N/A - - - - - - - - - - - - -
PENTANE pentane n-Pentane 469.7 R-601 * * * - - - N/A 0.3 0.4 0.387 3.95E-01 - - - - - - - - -
PROPANE propane n-Propane 369.89 R-290 * 3 * - - - 2.24 0.5 0.4 0.518 1.76E-01 - - - - - - - - -
PROPYLEN propylene Propylene 364.211 * 3.1 * - - - 2.64 0.6 1 1.06 1.12E+00 - - - - - - - - -
PROPYNE propyne/methylacetylene Propyne CoolProp error: Your fluid name [Propyne] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid * * * - - - N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A
R11 trichlorofluoromethane R11 471.06 CFC-11 6730 4750 1620 1 1 1 541 - - - - - - - - - - - - -
R113 1,1,2-trichloro-1,2,2-trifluoroethane R113 CoolProp error: Your fluid name [R113] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid CFC-113 6540 6130 2700 0.8 0.8 1 659 - - - - - - - - - - - - -
R114 1,2-dichloro-1,1,2,2-tetrafluoroethane R114 CoolProp error: Your fluid name [R114] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid CFC-114 8040 10000 8730 1 1 1 1110 - - - - - - - - - - - - -
R115 chloropentafluoroethane R115 CoolProp error: Your fluid name [R115] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid CFC-115 5310 7370 9990 0.6 0.6 0.44 1080 - - - - - - - - - - - - -
R116 hexafluoroethane/perfluoroethane R116 293.03 FC-116 N/A 9200-12200 (2003) N/A - - - 1380 - - - - - - - - - - - - -
R12 dichlorodifluoromethane R12 CoolProp error: Your fluid name [R12] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid CFC-12 11000 10900 5200 1 1 1 1040 - - - - - - - - - - - - -
R123 2,2-dichloro-1,1,1-trifluoroethane R123 456.82 HCFC-123 273 77 24 0.02 0.02 0.02 12.3 - - - - - - - - - - - - -
R1234YF 2,3,3,3-tetrafluoroprop-1-ene R1234yf 367.85 R-1234yf/HFO-1234yf N/A 4 N/A - - - N/A - - - - - - - - - - - - -
R1234ZE trans-1,3,3,3-tetrafluoropropene R1234ze(E) 382.52 R-1234ze N/A 6 N/A - - - N/A - - - - - - - - - - - - -
R124 1-chloro-1,2,2,2-tetrafluoroethane R124 395.425 HCFC-124 2070 609 185 0.022 0.022 0.022 55.3 - - - - - - - - - - - - -
R125 pentafluoroethane R125 339.173 HFC-125 6350 3500 1100 - - - 354 - - - - - - - - - - - - -
R13 chlorotrifluoromethane R13 CoolProp error: Your fluid name [R13] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid CFC-13 10800 14400 16400 1 1 - 1390 - - - - - - - - - - - - -
R134A 1,1,1,2-tetrafluoroethane R134a 374.21 HFC-134a 3830 1430 435 - - - 144 - - - - - - - - - - - - -
R14 tetrafluoromethane/perfluoromethane R14 CoolProp error: Your fluid name [R14] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid FC-14 5210 7390 11200 - - - 697 - - - - - - - - - - - - -
R141B 1,1-dichloro-1-fluoroethane R141b CoolProp error: Your fluid name [R141B] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid HCFC-141b 2250 725 220 0.11 0.11 0.12 80.6 - - - - - - - - - - - - -
R142B 1-chloro-1,1-difluoroethane R142b CoolProp error: Your fluid name [R142B] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid HCFC-142b 5490 2310 705 0.065 0.065 0.07 228 - - - - - - - - - - - - -
R143A 1,1,1-trifluoroethane R143a 345.857 HFC-143a 5890 4470 1590 - - - 487 - - - - - - - - - - - - -
R152A 1,1-difluoroethane R152A 386.411 HFC-152a 437 124 38 - - - 15.5 - - - - - - - - - - - - -
R161 fluoroethane/ethylfluoride R161 375.25 N/A 10 N/A - - - N/A - - - - - - - - - - - - -
R21 dichlorofluoromethane R21 CoolProp error: Your fluid name [R21] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid HCFC-21 530 151 46 0.04 0.04 N/A N/A - - - - - - - - - - - - -
R218 octafluoropropane/perfluoropropane R218 345.02 F-218 6310 8830 12500 - - - N/A - - - - - - - - - - - - -
R22 chlorodifluoromethane R22 369.295 HCFC-22 5160 1810 549 0.055 0.055 0.05 194 - - - - - - - - - - - - -
R227EA 1,1,1,2,3,3,3-heptafluoropropane R227EA 374.9 HFC-227ea 5310 3220 1040 - - - 365 - - - - - - - - - - - - -
R23 trifluoromethane R23 299.293 HFC-23 12000 14800 12200 - - - 1340 - - - - - - - - - - - - -
R236EA 1,1,1,2,3,3-hexafluoropropane R236EA 412.44 HFC-236ea N/A 1200 N/A - - - N/A - - - - - - - - - - - - -
R236FA 1,1,1,3,3,3-hexafluoropropane R236FA 398.07 HFC-236fa 8100 9810 7660 - - - N/A - - - - - - - - - - - - -
R245CA 1,1,2,2,3-pentafluoropropane R245CA CoolProp error: Your fluid name [R245CA] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid HFC-245ca N/A N/A N/A - - - 67.5 - - - - - - - - - - - - -
R245FA 1,1,1,3,3-pentafluoropropane R245fa 427.16 HFC-245fa 3380 1030 314 - - - N/A - - - - - - - - - - - - -
R32 difluoromethane R32 351.255 HFC-32 2330 675 205 - - - 64.2 - - - - - - - - - - - - -
R365MFC 1,1,1,3,3-pentafluorobutane R365MFC 460 HFC-365mfc 2520 794 241 - - - N/A - - - - - - - - - - - - -
R404A 44% r-125, 52% r143a, r134a R404A 345.27 Blend N/A 3900 N/A 0.04 0 0 N/A - - - - - - - - - - - - -
R407C 23% r-32, 25% r-125, 52% r134a R407C 359.345 Blend N/A 1800 N/A 0 N/A N/A N/A - - - - - - - - - - - - -
R41 fluoromethane R41 317.28 HFC-41 323 92 28 - - - N/A - - - - - - - - - - - - -
R410A 50% r-32, 50% r-125 R410A 344.494 Blend N/A 2088 N/A - - - N/A - - - - - - - - - - - - -
R507A 50% r-125, 50% r-143a R507A 343.765 Blend N/A 3985 N/A - - - N/A - - - - - - - - - - - - -
RC318 octafluorocyclobutane/perfluorocyclobutane RC318 CoolProp error: Your fluid name [RC318] is not a CoolProp fluid, a REFPROP fluid, a brine or a liquid FC-C318 7310 10300 14700 - - - 1010 - - - - - - - - - - - - -
SF6 sulfur hexafluoride/sulphur hexafluoride SulfurHexafluoride 318.7232 16300 22800 32600 - - - 2760 - - - - - - - - - - - - -
SO2 sulfur dioxide/sulphur dioxide SulfurDioxide 430.64 R-764 ** ** ** - - - N/A - - - - - - - - - - - - -
T2BUTENE trans-2-butene trans-2-Butene 428.61 C4H8 * * * - - - 2.57 - - - 1.13E+00 - - - - - - - - -
TOLUENE methylbenzane Toluene 591.75 N/A 3.3 N/A - - - 1.95 0.5 0.6 0.565 6.4E-01 3.3E-01 7.0E-05 7.0E-04 5.0E-05 5.8E-04 1.6E-05 2.6E+05 - -
WATER Water 647.096 *** *** *** - - - N/A - - - - - - - - - - - - -
XENON Xenon 289.733 - - - - - - N/A - - - - - - - - - - - - -"""
name_dict,ODP_dict,GWP20_dict,GWP100_dict,GWP500_dict = {},{},{},{},{}
for row in fluid_lookup.split('\n'):
a = row.split('\t')
# Refprop fluid name
RPName = a[0].strip()
# CAS number for this fluid
CAS = RP2CAS[RPName]
name_dict[CAS] = a[2].strip()
ODP_dict[CAS] = a[10].strip()
GWP20_dict[CAS] = a[5].strip()
GWP100_dict[CAS] = a[6].strip()
GWP500_dict[CAS] = a[7].strip()
ASHRAE34data = """R11 A1
R12 A1
R13 A1
R21 B1
R22 A1
R23 A1
R30 B2
R32 A2
R40 B2
METHANE A3
R113 A1
R114 A1
R115 A1
R116 A1
R123 B1
R124 A1
R125F A1
R134A A1
R142B A2
R143A A2
R152A A2
ETHANE A3
DME A3
R218 A1
R227EA A1
R236FA A1
R245FA B1
PROPANE A3
RC318 A1
BUTANE A3
ISOBUTANE A3
IPENTANE A3
HYDROGEN A3
HELIUM A1
AMMONIA B2
WATER A1
NEON A1
NITROGEN A1
ARGON A1
CO2 A1
SO2 B1
ETHYLENE A3
PROPYLEN A3
R404A A1
R507A A1
R410A A1
R407C A1
R1234YF A2L"""
ASHRAE34_dict = {}
for row in ASHRAE34data.split('\n'):
a = row.split('\t')
if a[0] in RP2CAS:
ASHRAE34_dict[RP2CAS[a[0]]] = a[1]
else:
print 'Missing CAS number for ' + a[0]
fluids = """:'1BUTENE.FLD','ACETONE.FLD','AIR.PPF','AMMONIA.FLD','ARGON.FLD',
:'BENZENE.FLD','BUTANE.FLD','C1CC6.FLD','C2BUTENE.FLD','C3CC6.FLD',
:'C4F10.FLD','C5F12.FLD','C12.FLD','CF3I.FLD','CO.FLD','CO2.FLD',
:'COS.FLD','CYCLOHEX.FLD','CYCLOPEN.FLD','CYCLOPRO.FLD','D2.FLD',
:'D2O.FLD','D4.FLD','D5.FLD','D6.FLD','DECANE.FLD','DMC.FLD',
:'DME.FLD','ETHANE.FLD','ETHANOL.FLD','ETHYLENE.FLD','FLUORINE.FLD'
:,'H2S.FLD','HELIUM.FLD','HEPTANE.FLD','HEXANE.FLD','HYDROGEN.FLD',
:'IBUTENE.FLD','IHEXANE.FLD','IPENTANE.FLD','ISOBUTAN.FLD',
:'KRYPTON.FLD','MD2M.FLD','MD3M.FLD','MDM.FLD','METHANE.FLD',
:'METHANOL.FLD','MLINOLEA.FLD','MLINOLEN.FLD','MM.FLD',
:'MOLEATE.FLD','MPALMITA.FLD','MSTEARAT.FLD','N2O.FLD','NEON.FLD',
:'NEOPENTN.FLD','NF3.FLD','NITROGEN.FLD','NONANE.FLD',
:'OCTANE.FLD','ORTHOHYD.FLD','OXYGEN.FLD','PARAHYD.FLD',
:'PENTANE.FLD','PROPANE.FLD','PROPYLEN.FLD','PROPYNE.FLD',
:'R32.FLD','R41.FLD','R115.FLD','R116.FLD','R124.FLD','R125.FLD',
:'R141B.FLD','R142B.FLD','R143A.FLD','R161.FLD','R218.FLD',
:'R227EA.FLD','R236EA.FLD','R236FA.FLD','R245CA.FLD','R245FA.FLD',
:'R365MFC.FLD','R507A.PPF','R1234YF.FLD','R1234ZE.FLD','SF6.FLD',
:'SO2.FLD','T2BUTENE.FLD','TOLUENE.FLD','WATER.FLD','XENON.FLD',
:'R11.FLD','R12.FLD','R13.FLD','R14.FLD','R21.FLD','R22.FLD',
:'R23.FLD','R113.FLD','R114.FLD','R123.FLD','R134A.FLD','R152A.FLD',
:'R404A.PPF','R407C.PPF','R410A.PPF','RC318.FLD'"""
HH = """:'0','2','0','3','0',
:'2','1','2','0','NA',
:'1','NA','2','1','1','1',
:'3','1','2','2','NA',
:'NA','NA','NA','NA','2','2',
:'1','1','2','2','4'
:,'4','0','1','2','0',
:'1','2','1','1',
:'0','1','1','1','0',
:'2','NA','NA','2',
:'2','1','0','1','0',
:'1','1','0','2',
:'2','NA','0','NA',
:'2','1','1','1',
:'1','2','1','1','1','1',
:'1','1','1','NA','2',
:'1','NA','1','NA','2',
:'NA','1','1','1','1',
:'3','0','2','0','0',
:'1','1','NA','NA','NA','1',
:'1','1','1','2','1','1',
:'1','1','1','1'"""
FH = """:'4','3','0','1','0',
:'3','4','3','4','NA',
:'0','NA','2','0','4','0',
:'4','3','3','2','NA',
:'NA','NA','NA','NA','2','3',
:'4','4','3','4','3'
:,'4','0','3','3','4',
:'4','3','4','4',
:'0','2','2','2','4',
:'3','NA','NA','3',
:'1','0','1','0','0',
:'4','0','0','3',
:'3','NA','0','NA',
:'4','4','4','4',
:'4','3','0','0','1','1',
:'1','1','1','NA','1',
:'0','NA','0','NA','0',
:'NA','1','2','2','0',
:'0','4','3','0','0',
:'1','1','NA','NA','NA','1',
:'1','0','0','1','1','4',
:'1','1','1','0' """
PH = """:'0','0','0','0','0',
:'0','0','0','1','NA',
:'0','NA','0','0','3','0',
:'1','0','1','0','NA',
:'NA','NA','NA','NA','0','0',
:'2','0','0','2','0'
:,'0','0','0','0','0',
:'2','1','0','0',
:'0','0','0','0','0',
:'0','NA','NA','1',
:'0','0','0','0','0',
:'0','3','0','0',
:'0','NA','0','NA',
:'0','0','1','1',
:'1','2','1','1','0','0',
:'0','0','0','NA','1',
:'0','NA','1','NA','1',
:'NA','0','0','0','0',
:'0','1','0','0','3',
:'0','0','NA','NA','NA','0',
:'0','0','0','0','0','1',
:'0','0','0','2'"""
pp_fluids = fluids.replace(':','').replace('\n','').replace('.FLD','').replace('.PPF','').replace("'",'').split(",")
pp_HH = HH.replace(':','').replace('\n','').replace("'",'').split(",")
pp_FH = FH.replace(':','').replace('\n','').replace("'",'').split(",")
pp_PH = PH.replace(':','').replace('\n','').replace("'",'').split(",")
HH_dict = {RP2CAS[k]:v for k,v in zip(pp_fluids,pp_HH)}
FH_dict = {RP2CAS[k]:v for k,v in zip(pp_fluids,pp_FH)}
PH_dict = {RP2CAS[k]:v for k,v in zip(pp_fluids,pp_PH)}
def get_env_data(fluid):
a = dict(
HH = HH_dict[fluid],
FH = FH_dict[fluid],
PH = PH_dict[fluid],
ODP = ODP_dict[fluid],
GWP20 = GWP20_dict[fluid],
GWP100 = GWP100_dict[fluid],
GWP500 = GWP500_dict[fluid]
)
for k,v in a.iteritems():
try:
a[k] = int(v)
except ValueError:
try:
a[k] = float(v)
except ValueError:
a[k] = -1
for term in ['GWP100','GWP20','GWP500','ODP']:
try:
a[term] = float(a[term])
except TypeError:
a[term] = -1
if fluid in ASHRAE34_dict:
a['ASHRAE34'] = ASHRAE34_dict[fluid]
else:
a['ASHRAE34'] = 'UNKNOWN'
a['Name'] = name_dict[fluid]
return a
import json
code = {}
for fluid in pp_fluids:
fluid = RP2CAS[fluid]
if name_dict[fluid]:
code[fluid] = get_env_data(fluid)
else:
continue
f = open('DTU_environmental.json','w')
f.write(json.dumps(code, sort_keys=True, indent=2, separators=(',', ': ')))
f.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.