gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import unittest
import numpy
import pytest
from cupy import testing
from cupy_tests.core_tests.fusion_tests import fusion_utils
class FusionUnaryUfuncTestBase(unittest.TestCase):
def generate_inputs(self, xp, dtype):
x = testing.shaped_random((3, 4), xp, dtype, scale=10, seed=0)
return (x,), {}
class FusionBinaryUfuncTestBase(unittest.TestCase):
def generate_inputs(self, xp, dtype1, dtype2):
x = testing.shaped_random((3, 4), xp, dtype1, scale=10, seed=0)
y = testing.shaped_random((3, 4), xp, dtype2, scale=10, seed=1)
return (x, y), {}
@testing.gpu
@testing.parameterize(*testing.product({
'func': [
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'left_shift', 'right_shift'
]
}))
class TestFusionBitwiseBinary(FusionBinaryUfuncTestBase):
@testing.for_int_dtypes_combination(names=('dtype1', 'dtype2'))
@fusion_utils.check_fusion()
def test_bitwise(self, xp, dtype1, dtype2):
def impl(x, y):
if ((x.dtype == 'uint64' and y.dtype.kind == 'i')
or (y.dtype == 'uint64' and x.dtype.kind == 'i')):
# Skip TypeError case.
return
return getattr(xp, self.func)(x, y)
return impl
class TestFusionBitwiseUnary(FusionUnaryUfuncTestBase):
@testing.for_int_dtypes()
@fusion_utils.check_fusion()
def test_invert(self, xp, dtype):
return lambda x: xp.invert(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': [
'greater', 'greater_equal', 'less', 'less_equal', 'equal', 'not_equal',
'logical_and', 'logical_or', 'logical_xor',
'maximum', 'minimum', 'fmax', 'fmin',
]
}))
class TestFusionComparisonBinary(FusionBinaryUfuncTestBase):
@testing.for_all_dtypes_combination(
no_complex=True, names=('dtype1', 'dtype2'))
@fusion_utils.check_fusion()
def test_comparison(self, xp, dtype1, dtype2):
return lambda x, y: getattr(xp, self.func)(x, y)
@testing.gpu
class TestFusionComparisonUnary(FusionUnaryUfuncTestBase):
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_comparison(self, xp, dtype):
return lambda x: xp.logical_not(x)
@testing.gpu
class TestFusionArrayContents(FusionUnaryUfuncTestBase):
def generate_inputs(self, xp, has_nan, dtype):
if numpy.dtype(dtype).kind not in ('f', 'c'):
return super(TestFusionArrayContents, self).generate_inputs(
xp, dtype)
nan = numpy.nan
inf = dtype(float('inf'))
if has_nan:
x = xp.array([-3, nan, -1, nan, 0, nan, inf], dtype=dtype)
else:
x = xp.array([-3, inf, -1, -inf, 0, 1, 2], dtype=dtype)
return (x,), {}
@testing.for_all_dtypes()
@fusion_utils.check_fusion(generate_inputs_args=(False,))
def test_isfinite(self, xp, dtype):
return lambda x: xp.isfinite(x)
@testing.for_all_dtypes()
@fusion_utils.check_fusion(generate_inputs_args=(False,))
def test_isinf(self, xp, dtype):
return lambda x: xp.isinf(x)
@testing.for_all_dtypes()
@fusion_utils.check_fusion(generate_inputs_args=(True,))
def test_isnan(self, xp, dtype):
return lambda x: xp.isnan(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': [
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
],
}))
class TestFusionTrigonometricUnary(unittest.TestCase):
def generate_inputs(self, xp, dtype):
if numpy.dtype(dtype).kind not in ('f', 'c'):
x = xp.array([0, 1])
else:
x = testing.shaped_random((3, 4), xp, dtype, scale=1, seed=0)
return (x,), {}
@testing.for_all_dtypes()
@fusion_utils.check_fusion()
def test_trigonometric(self, xp, dtype):
def impl(x):
with numpy.errstate(divide='ignore', invalid='ignore'):
return getattr(xp, self.func)(x)
return impl
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['arctan2', 'hypot']
}))
class TestFusionTrigonometricBinary(FusionBinaryUfuncTestBase):
@testing.for_all_dtypes_combination(
no_complex=True, names=('dtype1', 'dtype2'))
@fusion_utils.check_fusion()
def test_trigonometric(self, xp, dtype1, dtype2):
return lambda x, y: getattr(xp, self.func)(x, y)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['deg2rad', 'rad2deg', 'degrees', 'radians']
}))
class TestFusionDegRad(FusionUnaryUfuncTestBase):
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_trigonometric(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['around', 'round', 'round_', 'rint', 'floor', 'ceil', 'trunc',
'fix']
}))
class TestFusionRounding(FusionUnaryUfuncTestBase):
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_rounding(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['exp', 'expm1', 'exp2', 'log', 'log10', 'log2', 'log1p']
}))
class TestFusionExpLogUnary(unittest.TestCase):
def generate_inputs(self, xp, dtype):
x = testing.shaped_random((3, 4), xp, dtype, scale=10, seed=0) + 1
return (x,), {}
@testing.for_all_dtypes()
@fusion_utils.check_fusion()
def test_explog(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['logaddexp', 'logaddexp2']
}))
class TestFusionExpLogBinary(FusionBinaryUfuncTestBase):
@testing.for_all_dtypes_combination(
no_complex=True, names=('dtype1', 'dtype2'))
@fusion_utils.check_fusion()
def test_explog(self, xp, dtype1, dtype2):
return lambda x, y: getattr(xp, self.func)(x, y)
@testing.gpu
class TestFusionLdexp(FusionBinaryUfuncTestBase):
@testing.for_float_dtypes(name='dtype1')
@testing.for_dtypes(['i', 'l'], name='dtype2')
@fusion_utils.check_fusion()
def test_explog(self, xp, dtype1, dtype2):
return lambda x, y: xp.ldexp(x, y)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['signbit', 'frexp']
}))
class TestFusionFloatingUnary(FusionUnaryUfuncTestBase):
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_floating_point_routine(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['copysign', 'nextafter']
}))
class TestFusionFloatingBinary(FusionBinaryUfuncTestBase):
@testing.for_all_dtypes_combination(
names=('dtype1', 'dtype2'), no_complex=True)
@fusion_utils.check_fusion()
def test_floating_point_routine(self, xp, dtype1, dtype2):
return lambda x, y: getattr(xp, self.func)(x, y)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['reciprocal', 'negative', 'angle', 'conj', 'real', 'imag']
}))
class TestArithmeticUnary(FusionUnaryUfuncTestBase):
def generate_inputs(self, xp, dtype):
x = testing.shaped_random((3, 4), xp, dtype, scale=10, seed=0)
x[x == 0] = 1
return (x,), {}
@testing.for_all_dtypes(no_bool=True)
@fusion_utils.check_fusion()
def test_arithmetic(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
@testing.gpu
class TestModf(FusionUnaryUfuncTestBase):
def generate_inputs(self, xp, dtype):
x = testing.shaped_random((3, 4), xp, dtype, scale=10, seed=0)
return (x,), {}
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_arithmetic(self, xp, dtype):
return lambda x: xp.modf(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['add', 'subtract', 'multiply', 'power']
}))
class TestArithmeticBinary(FusionBinaryUfuncTestBase):
def generate_inputs(self, xp, dtype1, dtype2):
x = testing.shaped_random((3, 4), xp, dtype1, scale=5, seed=0)
y = testing.shaped_random((3, 4), xp, dtype2, scale=5, seed=0)
return (x, y), {}
@testing.for_all_dtypes_combination(
names=('dtype1', 'dtype2'), no_complex=True, no_bool=True)
@fusion_utils.check_fusion()
def test_arithmetic(self, xp, dtype1, dtype2):
# TODO(unno): boolean subtract causes DeprecationWarning in numpy>=1.13
return lambda x, y: getattr(xp, self.func)(x, y)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['divide', 'true_divide', 'floor_divide', 'fmod', 'remainder']
}))
class TestDivide(unittest.TestCase):
def generate_inputs(self, xp, dtype1, dtype2):
x = testing.shaped_random((3, 4), xp, dtype1, scale=10, seed=0)
y = testing.shaped_random((3, 4), xp, dtype2, scale=10, seed=1)
y[y == 0] = 1
return (x, y), {}
@testing.for_all_dtypes_combination(
names=('dtype1', 'dtype2'), no_complex=True)
@fusion_utils.check_fusion()
def test_divide(self, xp, dtype1, dtype2):
return lambda x, y: getattr(xp, self.func)(x, y)
@testing.gpu
class TestDivmod(unittest.TestCase):
def generate_inputs(self, xp, dtype1, dtype2):
x = testing.shaped_random((3, 4), xp, dtype1, scale=10, seed=0)
y = testing.shaped_random((3, 4), xp, dtype2, scale=10, seed=1)
y[y == 0] = 1
return (x, y), {}
@testing.with_requires('numpy>=1.13')
@testing.for_all_dtypes_combination(
names=('dtype1', 'dtype2'), no_complex=True)
@fusion_utils.check_fusion()
def test_divmod(self, xp, dtype1, dtype2):
return lambda x, y: xp.divmod(x, y)
@testing.gpu
class TestFusionMisc(FusionUnaryUfuncTestBase):
@testing.with_requires('numpy>=1.11.2')
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_sqrt(self, xp, dtype):
return lambda x: xp.sqrt(x)
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_cbrt(self, xp, dtype):
return lambda x: xp.cbrt(x)
@testing.for_all_dtypes()
@fusion_utils.check_fusion()
def test_square(self, xp, dtype):
return lambda x: xp.square(x)
@testing.for_all_dtypes(no_complex=True, no_bool=True)
@fusion_utils.check_fusion()
def test_absolute(self, xp, dtype):
return lambda x: xp.absolute(x)
@testing.for_all_dtypes(no_complex=True, no_bool=True)
@fusion_utils.check_fusion()
def test_abs(self, xp, dtype):
return lambda x: xp.abs(x)
@testing.for_all_dtypes(no_complex=True, no_bool=True)
@fusion_utils.check_fusion()
def test_sign(self, xp, dtype):
return lambda x: xp.sign(x)
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_clip(self, xp, dtype):
return lambda x: xp.clip(x, dtype(2), dtype(4))
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['i0', 'sinc']
}))
class TestFusionSpecialMath(FusionUnaryUfuncTestBase):
# TODO(imanishi): Fix for integer tests
@testing.for_float_dtypes()
@fusion_utils.check_fusion()
def test_special_math(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
class TestFusionManipulation(unittest.TestCase):
def generate_inputs(self, xp, dtype1, dtype2):
cond = testing.shaped_random((3, 4), xp, 'bool_', seed=0)
x = testing.shaped_random((3, 4), xp, dtype1, scale=10, seed=1)
y = testing.shaped_random((3, 4), xp, dtype2, scale=10, seed=2)
return (cond, x, y), {}
@testing.for_all_dtypes_combination(names=('dtype1', 'dtype2'))
@fusion_utils.check_fusion()
def test_where(self, xp, dtype1, dtype2):
return lambda cond, x, y: xp.where(cond, x, y)
# TODO(imanishi): Supoort complex dtypes
@testing.for_all_dtypes_combination(
names=('dtype1', 'dtype2'), no_complex=True)
@fusion_utils.check_fusion(accept_error=(TypeError,))
def test_copyto(self, xp, dtype1, dtype2):
return lambda cond, x, y: xp.copyto(x, y)
# TODO(imanishi): Supoort complex dtypes
@pytest.mark.xfail(reason='Issue #5848')
@testing.for_all_dtypes_combination(
names=('dtype1', 'dtype2'), no_complex=True)
@fusion_utils.check_fusion(accept_error=(TypeError,))
def test_copyto_compat_broadcast(self, xp, dtype1, dtype2):
return lambda cond, x, y: xp.copyto(x, y[None])
# TODO(imanishi): Supoort complex dtypes
@testing.for_all_dtypes_combination(
names=('dtype1', 'dtype2'), no_complex=True)
@fusion_utils.check_fusion(accept_error=(TypeError,))
def test_copyto_where(self, xp, dtype1, dtype2):
return lambda cond, x, y: xp.where(x, y, where=cond)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['sum', 'prod', 'amax', 'amin', 'max', 'min']
}))
class TestFusionNumericalReduction(FusionUnaryUfuncTestBase):
@testing.for_all_dtypes()
@fusion_utils.check_fusion()
def test_reduction(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['all', 'any']
}))
class TestFusionLogicalReduction(FusionUnaryUfuncTestBase):
@testing.for_all_dtypes(no_complex=True)
@fusion_utils.check_fusion()
def test_reduction(self, xp, dtype):
return lambda x: getattr(xp, self.func)(x)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops.random_gamma."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomGammaTest(test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def setUp(self):
np.random.seed(137)
random_seed.set_random_seed(137)
def _Sampler(self, num, alpha, beta, dtype, use_gpu=True, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_gamma(
[num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in range(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testNpDtypes(self):
self.evaluate(random_ops.random_gamma(
[5], alpha=np.ones([2, 1, 3]), beta=np.ones([3]), dtype=np.float32))
def testEmptySamplingNoError(self):
self.evaluate(random_ops.random_gamma(
[5], alpha=np.ones([2, 0, 3]), beta=np.ones([3]), dtype=dtypes.float32))
@test_util.run_deprecated_v1
def testMomentsFloat32(self):
self._testMoments(dtypes.float32)
@test_util.run_deprecated_v1
def testMomentsFloat64(self):
self._testMoments(dtypes.float64)
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s" % e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
if dt == dtypes.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
# Gamma moments only defined for values less than the scale param.
max_moment = min(6, scale // 2)
sampler = self._Sampler(20000, alpha, 1 / scale, dt, seed=12345)
z_scores = util.test_moment_matching(
sampler(),
max_moment,
stats.gamma(alpha, scale=scale),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
def _testZeroDensity(self, alpha):
"""Zero isn't in the support of the gamma distribution.
But quantized floating point math has its limits.
TODO(bjp): Implement log-gamma sampler for small-shape distributions.
Args:
alpha: float shape value to test
"""
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(10000, alpha, 1.0, dt, seed=12345)
x = sampler()
allowable = allowable_zeros[dt] * x.size
allowable = allowable * 2 if allowable < 10 else allowable * 1.05
if np.sum(x <= 0) > allowable:
failures += [dt]
self.assertEqual([], failures)
def testNonZeroSmallShape(self):
self._testZeroDensity(0.01)
def testNonZeroSmallishShape(self):
self._testZeroDensity(0.35)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
count_limit = 20 if dt == dtypes.float16 else 10
self.assertLess(count, count_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, seed=345)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
with self.cached_session():
rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
@test_util.run_deprecated_v1
def testShape(self):
# Fully known shape.
rnd = random_ops.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], array_ops.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([20, 30], array_ops.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
[123], array_ops.placeholder(
dtypes.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(1,)), array_ops.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(3,)), array_ops.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
rnd = random_ops.random_gamma([50], array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
@test_util.run_deprecated_v1
def testPositive(self):
n = int(10e3)
for dt in [dtypes.float16, dtypes.float32, dtypes.float64]:
with self.cached_session():
x = random_ops.random_gamma(shape=[n], alpha=0.001, dtype=dt, seed=0)
self.assertEqual(0, math_ops.reduce_sum(math_ops.cast(
math_ops.less_equal(x, 0.), dtype=dtypes.int64)).eval())
if __name__ == "__main__":
test.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the apiclient module."""
import unittest
import mock
import pkg_resources
from apache_beam.metrics.cells import DistributionData
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.runners.dataflow.internal import dependency
from apache_beam.runners.dataflow.internal.clients import dataflow
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
apiclient = None
# pylint: enable=wrong-import-order, wrong-import-position
FAKE_PIPELINE_URL = "gs://invalid-bucket/anywhere"
@unittest.skipIf(apiclient is None, 'GCP dependencies are not installed')
class UtilTest(unittest.TestCase):
@unittest.skip("Enable once BEAM-1080 is fixed.")
def test_create_application_client(self):
pipeline_options = PipelineOptions()
apiclient.DataflowApplicationClient(pipeline_options)
def test_set_network(self):
pipeline_options = PipelineOptions(
['--network', 'anetworkname',
'--temp_location', 'gs://any-location/temp'])
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0', #any environment version
FAKE_PIPELINE_URL)
self.assertEqual(env.proto.workerPools[0].network,
'anetworkname')
def test_set_subnetwork(self):
pipeline_options = PipelineOptions(
['--subnetwork', '/regions/MY/subnetworks/SUBNETWORK',
'--temp_location', 'gs://any-location/temp'])
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0', #any environment version
FAKE_PIPELINE_URL)
self.assertEqual(env.proto.workerPools[0].subnetwork,
'/regions/MY/subnetworks/SUBNETWORK')
def test_invalid_default_job_name(self):
# Regexp for job names in dataflow.
regexp = '^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$'
job_name = apiclient.Job._build_default_job_name('invalid.-_user_n*/ame')
self.assertRegexpMatches(job_name, regexp)
job_name = apiclient.Job._build_default_job_name(
'invalid-extremely-long.username_that_shouldbeshortened_or_is_invalid')
self.assertRegexpMatches(job_name, regexp)
def test_default_job_name(self):
job_name = apiclient.Job.default_job_name(None)
regexp = 'beamapp-.*-[0-9]{10}-[0-9]{6}'
self.assertRegexpMatches(job_name, regexp)
def test_split_int(self):
number = 12345
split_number = apiclient.to_split_int(number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(number, 0))
shift_number = number << 32
split_number = apiclient.to_split_int(shift_number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(0, number))
def test_translate_distribution(self):
metric_update = dataflow.CounterUpdate()
distribution_update = DistributionData(16, 2, 1, 15)
apiclient.translate_distribution(distribution_update, metric_update)
self.assertEqual(metric_update.distribution.min.lowBits,
distribution_update.min)
self.assertEqual(metric_update.distribution.max.lowBits,
distribution_update.max)
self.assertEqual(metric_update.distribution.sum.lowBits,
distribution_update.sum)
self.assertEqual(metric_update.distribution.count.lowBits,
distribution_update.count)
def test_translate_means(self):
metric_update = dataflow.CounterUpdate()
accumulator = mock.Mock()
accumulator.sum = 16
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_int(accumulator,
metric_update)
self.assertEqual(metric_update.integerMean.sum.lowBits, accumulator.sum)
self.assertEqual(metric_update.integerMean.count.lowBits, accumulator.count)
accumulator.sum = 16.0
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_float(accumulator,
metric_update)
self.assertEqual(metric_update.floatingPointMean.sum, accumulator.sum)
self.assertEqual(
metric_update.floatingPointMean.count.lowBits, accumulator.count)
def test_default_ip_configuration(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp'])
env = apiclient.Environment([],
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL)
self.assertEqual(env.proto.workerPools[0].ipConfiguration, None)
def test_public_ip_configuration(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp',
'--use_public_ips'])
env = apiclient.Environment([],
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL)
self.assertEqual(
env.proto.workerPools[0].ipConfiguration,
dataflow.WorkerPool.IpConfigurationValueValuesEnum.WORKER_IP_PUBLIC)
def test_private_ip_configuration(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp',
'--no_use_public_ips'])
env = apiclient.Environment([],
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL)
self.assertEqual(
env.proto.workerPools[0].ipConfiguration,
dataflow.WorkerPool.IpConfigurationValueValuesEnum.WORKER_IP_PRIVATE)
def test_harness_override_present_in_dataflow_distributions(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp', '--streaming'])
override = ''.join(
['runner_harness_container_image=',
dependency.DATAFLOW_CONTAINER_IMAGE_REPOSITORY,
'/harness:2.2.0'])
distribution = pkg_resources.Distribution(version='2.2.0')
with mock.patch(
'apache_beam.runners.dataflow.internal.dependency.pkg_resources'
'.get_distribution',
mock.MagicMock(return_value=distribution)):
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL) #any environment version
self.assertIn(override, env.proto.experiments)
@mock.patch('apache_beam.runners.dataflow.internal.dependency.'
'beam_version.__version__', '2.2.0')
def test_harness_override_present_in_beam_releases(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp', '--streaming'])
override = ''.join(
['runner_harness_container_image=',
dependency.DATAFLOW_CONTAINER_IMAGE_REPOSITORY,
'/harness:2.2.0'])
with mock.patch(
'apache_beam.runners.dataflow.internal.dependency.pkg_resources'
'.get_distribution',
mock.Mock(side_effect=pkg_resources.DistributionNotFound())):
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL) #any environment version
self.assertIn(override, env.proto.experiments)
@mock.patch('apache_beam.runners.dataflow.internal.dependency.'
'beam_version.__version__', '2.2.0-dev')
def test_harness_override_absent_in_unreleased_sdk(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp', '--streaming'])
with mock.patch(
'apache_beam.runners.dataflow.internal.dependency.pkg_resources'
'.get_distribution',
mock.Mock(side_effect=pkg_resources.DistributionNotFound())):
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL) #any environment version
if env.proto.experiments:
for experiment in env.proto.experiments:
self.assertNotIn('runner_harness_container_image=', experiment)
def test_labels(self):
pipeline_options = PipelineOptions(
['--project', 'test_project', '--job_name', 'test_job_name',
'--temp_location', 'gs://test-location/temp'])
job = apiclient.Job(pipeline_options, FAKE_PIPELINE_URL)
self.assertIsNone(job.proto.labels)
pipeline_options = PipelineOptions(
['--project', 'test_project', '--job_name', 'test_job_name',
'--temp_location', 'gs://test-location/temp',
'--label', 'key1=value1',
'--label', 'key2',
'--label', 'key3=value3',
'--labels', 'key4=value4',
'--labels', 'key5'])
job = apiclient.Job(pipeline_options, FAKE_PIPELINE_URL)
self.assertEqual(5, len(job.proto.labels.additionalProperties))
self.assertEqual('key1', job.proto.labels.additionalProperties[0].key)
self.assertEqual('value1', job.proto.labels.additionalProperties[0].value)
self.assertEqual('key2', job.proto.labels.additionalProperties[1].key)
self.assertEqual('', job.proto.labels.additionalProperties[1].value)
self.assertEqual('key3', job.proto.labels.additionalProperties[2].key)
self.assertEqual('value3', job.proto.labels.additionalProperties[2].value)
self.assertEqual('key4', job.proto.labels.additionalProperties[3].key)
self.assertEqual('value4', job.proto.labels.additionalProperties[3].value)
self.assertEqual('key5', job.proto.labels.additionalProperties[4].key)
self.assertEqual('', job.proto.labels.additionalProperties[4].value)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Luis G. Torres, Mark Moll
try:
from ompl import util as ou
from ompl import base as ob
from ompl import geometric as og
except:
# if the ompl module is not in the PYTHONPATH assume it is installed in a
# subdirectory of the parent directory called "py-bindings."
from os.path import abspath, dirname, join
import sys
sys.path.insert(0, join(dirname(dirname(abspath(__file__))),'py-bindings'))
from ompl import util as ou
from ompl import base as ob
from ompl import geometric as og
from math import sqrt
from sys import argv
import argparse
## @cond IGNORE
# Our "collision checker". For this demo, our robot's state space
# lies in [0,1]x[0,1], with a circular obstacle of radius 0.25
# centered at (0.5,0.5). Any states lying in this circular region are
# considered "in collision".
class ValidityChecker(ob.StateValidityChecker):
def __init__(self, si):
super(ValidityChecker, self).__init__(si)
# Returns whether the given state's position overlaps the
# circular obstacle
def isValid(self, state):
return self.clearance(state) > 0.0
# Returns the distance from the given state's position to the
# boundary of the circular obstacle.
def clearance(self, state):
# Extract the robot's (x,y) position from its state
x = state[0]
y = state[1]
# Distance formula between two points, offset by the circle's
# radius
return sqrt((x-0.5)*(x-0.5) + (y-0.5)*(y-0.5)) - 0.25
## Returns a structure representing the optimization objective to use
# for optimal motion planning. This method returns an objective
# which attempts to minimize the length in configuration space of
# computed paths.
def getPathLengthObjective(si):
return ob.PathLengthOptimizationObjective(si)
## Returns an optimization objective which attempts to minimize path
# length that is satisfied when a path of length shorter than 1.51
# is found.
def getThresholdPathLengthObj(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostThreshold(ob.Cost(1.51))
return obj
## Defines an optimization objective which attempts to steer the
# robot away from obstacles. To formulate this objective as a
# minimization of path cost, we can define the cost of a path as a
# summation of the costs of each of the states along the path, where
# each state cost is a function of that state's clearance from
# obstacles.
#
# The class StateCostIntegralObjective represents objectives as
# summations of state costs, just like we require. All we need to do
# then is inherit from that base class and define our specific state
# cost function by overriding the stateCost() method.
#
class ClearanceObjective(ob.StateCostIntegralObjective):
def __init__(self, si):
super(ClearanceObjective, self).__init__(si, True)
self.si_ = si
# Our requirement is to maximize path clearance from obstacles,
# but we want to represent the objective as a path cost
# minimization. Therefore, we set each state's cost to be the
# reciprocal of its clearance, so that as state clearance
# increases, the state cost decreases.
def stateCost(self, s):
return ob.Cost(1 / self.si_.getStateValidityChecker().clearance(s))
## Return an optimization objective which attempts to steer the robot
# away from obstacles.
def getClearanceObjective(si):
return ClearanceObjective(si)
## Create an optimization objective which attempts to optimize both
# path length and clearance. We do this by defining our individual
# objectives, then adding them to a MultiOptimizationObjective
# object. This results in an optimization objective where path cost
# is equivalent to adding up each of the individual objectives' path
# costs.
#
# When adding objectives, we can also optionally specify each
# objective's weighting factor to signify how important it is in
# optimal planning. If no weight is specified, the weight defaults to
# 1.0.
def getBalancedObjective1(si):
lengthObj = ob.PathLengthOptimizationObjective(si)
clearObj = ClearanceObjective(si)
opt = ob.MultiOptimizationObjective(si)
opt.addObjective(lengthObj, 5.0)
opt.addObjective(clearObj, 1.0)
return opt
## Create an optimization objective equivalent to the one returned by
# getBalancedObjective1(), but use an alternate syntax.
# THIS DOESN'T WORK YET. THE OPERATORS SOMEHOW AREN'T EXPORTED BY Py++.
# def getBalancedObjective2(si):
# lengthObj = ob.PathLengthOptimizationObjective(si)
# clearObj = ClearanceObjective(si)
#
# return 5.0*lengthObj + clearObj
## Create an optimization objective for minimizing path length, and
# specify a cost-to-go heuristic suitable for this optimal planning
# problem.
def getPathLengthObjWithCostToGo(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostToGoHeuristic(ob.CostToGoHeuristic(ob.goalRegionCostToGo))
return obj
# Keep these in alphabetical order and all lower case
def allocatePlanner(si, plannerType):
if plannerType.lower() == "bfmtstar":
return og.BFMT(si)
elif plannerType.lower() == "bitstar":
return og.BITstar(si)
elif plannerType.lower() == "fmtstar":
return og.FMT(si)
elif plannerType.lower() == "informedrrtstar":
return og.InformedRRTstar(si)
elif plannerType.lower() == "prmstar":
return og.PRMstar(si)
elif plannerType.lower() == "rrtstar":
return og.RRTstar(si)
elif plannerType.lower() == "sorrtstar":
return og.SORRTstar(si)
else:
OMPL_ERROR("Planner-type is not implemented in allocation function.");
# Keep these in alphabetical order and all lower case
def allocateObjective(si, objectiveType):
if objectiveType.lower() == "pathclearance":
return getClearanceObjective(si)
elif objectiveType.lower() == "pathlength":
return getPathLengthObjective(si)
elif objectiveType.lower() == "thresholdpathlength":
return getThresholdPathLengthObj(si)
elif objectiveType.lower() == "weightedlengthandclearancecombo":
return getBalancedObjective1(si)
else:
OMPL_ERROR("Optimization-objective is not implemented in allocation function.");
def plan(runTime, plannerType, objectiveType, fname):
# Construct the robot state space in which we're planning. We're
# planning in [0,1]x[0,1], a subset of R^2.
space = ob.RealVectorStateSpace(2)
# Set the bounds of space to be in [0,1].
space.setBounds(0.0, 1.0)
# Construct a space information instance for this state space
si = ob.SpaceInformation(space)
# Set the object used to check which states in the space are valid
validityChecker = ValidityChecker(si)
si.setStateValidityChecker(validityChecker)
si.setup()
# Set our robot's starting state to be the bottom-left corner of
# the environment, or (0,0).
start = ob.State(space)
start[0] = 0.0
start[1] = 0.0
# Set our robot's goal state to be the top-right corner of the
# environment, or (1,1).
goal = ob.State(space)
goal[0] = 1.0
goal[1] = 1.0
# Create a problem instance
pdef = ob.ProblemDefinition(si)
# Set the start and goal states
pdef.setStartAndGoalStates(start, goal)
# Create the optimization objective specified by our command-line argument.
# This helper function is simply a switch statement.
pdef.setOptimizationObjective(allocateObjective(si, objectiveType))
# Construct the optimal planner specified by our command line argument.
# This helper function is simply a switch statement.
optimizingPlanner = allocatePlanner(si, plannerType)
# Set the problem instance for our planner to solve
optimizingPlanner.setProblemDefinition(pdef)
optimizingPlanner.setup()
# attempt to solve the planning problem in the given runtime
solved = optimizingPlanner.solve(runTime)
if solved:
# Output the length of the path found
print("{0} found solution of path length {1:.4f} with an optimization objective value of {2:.4f}".format(optimizingPlanner.getName(), pdef.getSolutionPath().length(), pdef.getSolutionPath().cost(pdef.getOptimizationObjective()).value()))
# If a filename was specified, output the path as a matrix to
# that file for visualization
if fname:
with open(fname,'w') as outFile:
outFile.write(pdef.getSolutionPath().printAsMatrix())
else:
print("No solution found.")
if __name__ == "__main__":
# Create an argument parser
parser = argparse.ArgumentParser(description='Optimal motion planning demo program.')
# Add a filename argument
parser.add_argument('-t', '--runtime', type=float, default=1.0, help='(Optional) Specify the runtime in seconds. Defaults to 1 and must be greater than 0.')
parser.add_argument('-p', '--planner', default='RRTstar', choices=['BFMTstar', 'BITstar', 'FMTstar', 'InformedRRTstar', 'PRMstar', 'RRTstar', 'SORRTstar'], help='(Optional) Specify the optimal planner to use, defaults to RRTstar if not given.') # Alphabetical order
parser.add_argument('-o', '--objective', default='PathLength', choices=['PathClearance', 'PathLength', 'ThresholdPathLength', 'WeightedLengthAndClearanceCombo'], help='(Optional) Specify the optimization objective, defaults to PathLength if not given.') # Alphabetical order
parser.add_argument('-f', '--file', default=None, help='(Optional) Specify an output path for the found solution path.')
parser.add_argument('-i', '--info', type=int, default=0, choices=[0, 1, 2], help='(Optional) Set the OMPL log level. 0 for WARN, 1 for INFO, 2 for DEBUG. Defaults to WARN.')
# Parse the arguments
args = parser.parse_args()
# Check that time is positive
if args.runtime <= 0:
raise argparse.ArgumentTypeError("argument -t/--runtime: invalid choice: %r (choose a positive number greater than 0)"%(args.runtime,))
# Set the log level
if args.info == 0:
ou.setLogLevel(ou.LOG_WARN)
elif args.info == 1:
ou.setLogLevel(ou.LOG_INFO)
elif args.info == 2:
ou.setLogLevel(ou.LOG_DEBUG)
else:
OMPL_ERROR("Invalid log-level integer.");
# Solve the planning problem
plan(args.runtime, args.planner, args.objective, args.file)
## @endcond
|
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
from django.test import TestCase
from django.utils.timezone import now
from django.core.urlresolvers import reverse
from pttrack.models import Patient, ProviderType
from pttrack.test_views import build_provider, log_in_provider
from . import models
from .tests import wu_dict
class ViewsExistTest(TestCase):
'''
Verify that views involving the wokrup are functioning.
'''
fixtures = ['workup', 'pttrack']
def setUp(self):
models.ClinicDate.objects.create(
clinic_type=models.ClinicType.objects.first(),
clinic_date=now().date())
self.provider = build_provider()
log_in_provider(self.client, self.provider)
self.wu = models.Workup.objects.create(
clinic_day=models.ClinicDate.objects.first(),
chief_complaint="SOB",
diagnosis="MI",
HPI="A", PMH_PSH="B", meds="C", allergies="D", fam_hx="E",
soc_hx="F", ros="", pe="", A_and_P="",
author=models.Provider.objects.first(),
author_type=ProviderType.objects.first(),
patient=Patient.objects.first())
def test_clindate_create_redirect(self):
'''Verify that if no clindate exists, we're properly redirected to a
clindate create page.'''
# First delete clindate that's created in setUp.
models.ClinicDate.objects.all().delete()
pt = Patient.objects.first()
pt_url = 'new-workup'
response = self.client.get(reverse(pt_url, args=(pt.id,)))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('new-clindate', args=(pt.id,)))
def test_new_workup_view(self):
pt = Patient.objects.first()
response = self.client.get(reverse('new-workup', args=(pt.id,)))
self.assertEqual(response.status_code, 200)
def test_workup_urls(self):
wu_urls = ['workup',
'workup-update']
# test the creation of many workups, just in case.
for i in range(10):
models.Workup.objects.bulk_create(
[models.Workup(**wu_dict()) for i in range(77)])
wu = models.Workup.objects.last()
wu.diagnosis_categories.add(models.DiagnosisType.objects.first())
for wu_url in wu_urls:
response = self.client.get(reverse(wu_url, args=(wu.id,)))
self.assertEqual(response.status_code, 200)
def test_workup_initial(self):
pt = Patient.objects.first()
date_string = self.wu.written_datetime.strftime("%B %d, %Y")
heading_text = "Migrated from previous workup on %s. Please delete this heading and modify the following:\n\n" % date_string
# TODO test use of settings.OSLER_WORKUP_COPY_FORWARD_FIELDS
response = self.client.get(reverse('new-workup', args=(pt.id,)))
self.assertEqual(response.context['form'].initial['PMH_PSH'],
heading_text + "B")
self.assertEqual(response.context['form'].initial['meds'],
heading_text + "C")
self.assertEqual(response.context['form'].initial['allergies'],
heading_text + "D")
self.assertEqual(response.context['form'].initial['fam_hx'],
heading_text + "E")
self.assertEqual(response.context['form'].initial['soc_hx'],
heading_text + "F")
def test_workup_update(self):
'''
Updating should be possible always for attendings, only without
attestation for non-attendings.
'''
# if the wu is unsigned, all can access update.
for role in ["Preclinical", "Clinical", "Coordinator", "Attending"]:
log_in_provider(self.client, build_provider([role]))
response = self.client.get(
reverse('workup-update', args=(self.wu.id,)))
self.assertEqual(response.status_code, 200)
self.wu.sign(build_provider(["Attending"]).associated_user)
self.wu.save()
# nonattesting cannot access
for role in ["Preclinical", "Clinical", "Coordinator"]:
log_in_provider(self.client, build_provider([role]))
response = self.client.get(
reverse('workup-update', args=(self.wu.id,)))
self.assertRedirects(response,
reverse('workup', args=(self.wu.id,)))
# attesting can
log_in_provider(self.client, build_provider(["Attending"]))
response = self.client.get(
reverse('workup-update', args=(self.wu.id,)))
self.assertEqual(response.status_code, 200)
def test_workup_signing(self):
'''
Verify that singing is possible for attendings, and not for others.
'''
wu_url = "workup-sign"
self.wu.diagnosis_categories.add(models.DiagnosisType.objects.first())
self.wu.save()
# Fresh workups should be unsigned
self.assertFalse(self.wu.signed())
# Providers with can_attend == False should not be able to sign
for nonattesting_role in ["Preclinical", "Clinical", "Coordinator"]:
log_in_provider(self.client, build_provider([nonattesting_role]))
response = self.client.get(
reverse(wu_url, args=(self.wu.id,)))
self.assertRedirects(response,
reverse('workup', args=(self.wu.id,)))
self.assertFalse(models.Workup.objects.get(pk=self.wu.id).signed())
# Providers able to attend should be able to sign.
log_in_provider(self.client, build_provider(["Attending"]))
response = self.client.get(reverse(wu_url, args=(self.wu.id,)))
self.assertRedirects(response, reverse('workup', args=(self.wu.id,)),)
# the self.wu has been updated, so we have to hit the db again.
self.assertTrue(models.Workup.objects.get(pk=self.wu.id).signed())
def test_workup_pdf(self):
'''
Verify that pdf download with the correct naming protocol is working
'''
wu_url = "workup-pdf"
self.wu.diagnosis_categories.add(models.DiagnosisType.objects.first())
self.wu.save()
for nonstaff_role in ProviderType.objects.filter(staff_view=False):
log_in_provider(self.client, build_provider([nonstaff_role]))
response = self.client.get(reverse(wu_url, args=(self.wu.id,)))
self.assertRedirects(response,
reverse('workup', args=(self.wu.id,)))
for staff_role in ProviderType.objects.filter(staff_view=True):
log_in_provider(self.client, build_provider([staff_role.pk]))
response = self.client.get(reverse(wu_url, args=(self.wu.id,)))
self.assertEqual(response.status_code, 200)
def test_workup_submit(self):
"""verify we can submit a valid workup as a signer and nonsigner"""
for provider_type in ["Attending", "Clinical"]:
provider = build_provider([provider_type])
log_in_provider(self.client, provider)
pt_id = Patient.objects.first().pk
wu_count = models.Workup.objects.all().count()
wu_data = wu_dict(units=True)
wu_data['diagnosis_categories'] = [
models.DiagnosisType.objects.first().pk]
wu_data['clinic_day'] = wu_data['clinic_day'].pk
r = self.client.post(
reverse('new-workup', args=(pt_id,)),
data=wu_data)
self.assertRedirects(r, reverse("patient-detail", args=(pt_id,)))
self.assertEqual(wu_count + 1, models.Workup.objects.all().count())
self.assertEqual(
models.Workup.objects.last().signed(),
provider.clinical_roles.first().signs_charts)
def test_invalid_workup_submit_preserves_units(self):
# first, craft a workup that has units, but fail to set the
# diagnosis categories, so that it will fail to be accepted.
wu_data = wu_dict(units=True)
pt_id = Patient.objects.first().pk
r = self.client.post(
reverse('new-workup', args=(pt_id,)),
data=wu_data)
# verify we're bounced back to workup-create
self.assertEqual(r.status_code, 200)
self.assertTemplateUsed(r, 'workup/workup-create.html')
self.assertFormError(r, 'form', 'diagnosis_categories',
'This field is required.')
for unit in ['height_units', 'weight_units', 'temperature_units']:
self.assertContains(r, '<input name="%s"' % (unit))
self.assertEqual(
r.context['form'][unit].value(),
wu_data[unit])
class TestProgressNoteViews(TestCase):
'''
Verify that views involving the wokrup are functioning.
'''
fixtures = ['workup', 'pttrack']
def setUp(self):
self.formdata = {
'title': 'Depression',
'text': 'so sad does testing work???',
'patient': Patient.objects.first(),
'author': models.Provider.objects.first(),
'author_type': ProviderType.objects.first()
}
models.ClinicDate.objects.create(
clinic_type=models.ClinicType.objects.first(),
clinic_date=now().date())
provider = build_provider()
log_in_provider(self.client, provider)
def test_progressnote_urls(self):
url = reverse('new-progress-note', args=(1,))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, self.formdata)
self.assertRedirects(response, reverse('patient-detail',
args=(1,)))
response = self.client.get(reverse('progress-note-update', args=(1,)))
self.assertEqual(response.status_code, 200)
self.formdata['text'] = 'actually not so bad'
response = self.client.post(url, self.formdata)
self.assertRedirects(
response, reverse('patient-detail', args=(1,)))
def test_progressnote_signing(self):
"""Verify that singing is possible for attendings and not for others.
"""
sign_url = "progress-note-sign"
pn = models.ProgressNote.objects.create(
title='Depression',
text='so sad does testing work???',
patient=Patient.objects.first(),
author=models.Provider.objects.first(),
author_type=ProviderType.objects.first()
)
# Fresh notes should be unsigned
self.assertFalse(pn.signed())
# Providers with can_attend == False should not be able to sign
for nonattesting_role in ["Preclinical", "Clinical", "Coordinator"]:
log_in_provider(self.client, build_provider([nonattesting_role]))
response = self.client.get(
reverse(sign_url, args=(pn.id,)))
self.assertRedirects(response,
reverse('progress-note-detail',
args=(pn.id,)))
self.assertFalse(models.ProgressNote.objects
.get(pk=pn.id)
.signed())
# Providers able to attend should be able to sign.
log_in_provider(self.client, build_provider(["Attending"]))
response = self.client.get(reverse(sign_url, args=(pn.id,)))
self.assertRedirects(response, reverse('progress-note-detail',
args=(pn.id,)),)
# the pn has been updated, so we have to hit the db again.
self.assertTrue(models.ProgressNote.objects.get(pk=pn.id).signed())
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
import warnings
import CoolProp
from CoolProp.Plots.Common import IsoLine, BasePlot, interpolate_values_1d
from CoolProp.Plots.SimpleCycles import StateContainer
class PropertyPlot(BasePlot):
def __init__(self, fluid_name, graph_type, **kwargs):
"""
Create graph for the specified fluid properties
Parameters
----------
fluid_name : string or AbstractState
The name of the fluid to be plotted or a state instance
graph_type : string
The graph type to be plotted, like \"PH\" or \"TS\"
axis : :func:`matplotlib.pyplot.gca()`, Optional
The current axis system to be plotted to.
Default: create a new axis system
fig : :func:`matplotlib.pyplot.figure()`, Optional
The current figure to be plotted to.
Default: create a new figure
unit_system : string, ['EUR','KSI','SI']
Select the units used for the plotting. 'EUR' is bar, kJ, C; 'KSI' is kPa, kJ, K; 'SI' is Pa, J, K
tp_limits : string, ['NONE','DEF','ACHP','ORC']
Select the limits in T and p.
reciprocal_density : bool
NOT IMPLEMENTED: If True, 1/rho will be plotted instead of rho
Examples
--------
>>> from CoolProp.Plots import PropertyPlot
>>> plot = PropertyPlot('HEOS::Water', 'TS')
>>> plot.calc_isolines()
>>> plot.show()
>>> import CoolProp
>>> from CoolProp.Plots import PropertyPlot
>>> plot = PropertyPlot('HEOS::R134a', 'PH', unit_system='EUR', tp_limits='ACHP')
>>> plot.calc_isolines(CoolProp.iQ, num=11)
>>> plot.calc_isolines(CoolProp.iT, num=25)
>>> plot.calc_isolines(CoolProp.iSmass, num=15)
>>> plot.show()
>>> import CoolProp
>>> from CoolProp.Plots import PropertyPlot
>>> plot = PropertyPlot('HEOS::R245fa', 'TS', unit_system='EUR', tp_limits='ORC')
>>> plot.calc_isolines(CoolProp.iQ, num=11)
>>> plot.calc_isolines(CoolProp.iP, iso_range=[1,50], num=10, rounding=True)
>>> plot.draw()
>>> plot.isolines.clear()
>>> plot.props[CoolProp.iP]['color'] = 'green'
>>> plot.props[CoolProp.iP]['lw'] = '0.5'
>>> plot.calc_isolines(CoolProp.iP, iso_range=[1,50], num=10, rounding=False)
>>> plot.show()
.. note::
See the online documentation for a list of the available fluids and
graph types
"""
super(PropertyPlot, self).__init__(fluid_name, graph_type, **kwargs)
self._isolines = {}
#self._plines = {}
#self._ppoints = {}
self.get_axis_limits()
self._plot_default_annotations()
@property
def isolines(self): return self._isolines
# @property
#def plines(self): return self._plines
# @property
#def ppoints(self): return self._ppoints
def show(self):
self.draw()
super(PropertyPlot, self).show()
def savefig(self, *args, **kwargs):
self.draw()
super(PropertyPlot, self).savefig(*args, **kwargs)
def _plotRound(self, values):
"""
A function round an array-like object while maintaining the
amount of entries. This is needed for the isolines since we
want the labels to look pretty (=rounding), but we do not
know the spacing of the lines. A fixed number of digits after
rounding might lead to reduced array size.
"""
inVal = np.unique(np.sort(np.array(values)))
output = inVal[1:] * 0.0
digits = -1
limit = 10
lim = inVal * 0.0 + 10
# remove less from the numbers until same length,
# more than 10 significant digits does not really
# make sense, does it?
while len(inVal) > len(output) and digits < limit:
digits += 1
val = (np.around(np.log10(np.abs(inVal))) * -1) + digits + 1
val = np.where(val < lim, val, lim)
val = np.where(val > -lim, val, -lim)
output = np.zeros(inVal.shape)
for i in range(len(inVal)):
output[i] = np.around(inVal[i], decimals=int(val[i]))
output = np.unique(output)
return output
def calc_isolines(self, iso_type=None, iso_range=None, num=15, rounding=False, points=250):
"""Calculate lines with constant values of type 'iso_type' in terms of x and y as
defined by the plot object. 'iso_range' either is a collection of values or
simply the minimum and maximum value between which 'num' lines get calculated.
The 'rounding' parameter can be used to generate prettier labels if needed.
"""
if iso_type is None or iso_type == 'all':
for i_type in IsoLine.XY_SWITCH:
if IsoLine.XY_SWITCH[i_type].get(self.y_index * 10 + self.x_index, None) is not None:
self.calc_isolines(i_type, None, num, rounding, points)
return
if iso_range is None:
if iso_type is CoolProp.iQ:
iso_range = [0.0, 1.0]
else:
limits = self.get_axis_limits(iso_type, CoolProp.iT)
iso_range = [limits[0], limits[1]]
if len(iso_range) <= 1 and num != 1:
raise ValueError('You have to provide two values for the iso_range, {0} is not valid.'.format(iso_range))
if len(iso_range) == 2 and (num is None or num < 2):
raise ValueError('Please specify the number of isoline you want e.g. num=10.')
iso_range = np.sort(np.unique(iso_range))
# Generate iso ranges
if len(iso_range) == 2:
iso_range = self.generate_ranges(iso_type, iso_range[0], iso_range[1], num)
if rounding:
iso_range = self._plotRound(iso_range)
# Limits are already in SI units
limits = self._get_axis_limits()
ixrange = self.generate_ranges(self._x_index, limits[0], limits[1], points)
iyrange = self.generate_ranges(self._y_index, limits[2], limits[3], points)
dim = self._system[iso_type]
lines = self.isolines.get(iso_type, [])
for i in range(num):
lines.append(IsoLine(iso_type, self._x_index, self._y_index, value=dim.to_SI(iso_range[i]), state=self._state))
lines[-1].calc_range(ixrange, iyrange)
lines[-1].sanitize_data()
self.isolines[iso_type] = lines
return
def draw_isolines(self):
dimx = self._system[self._x_index]
dimy = self._system[self._y_index]
sat_props = self.props[CoolProp.iQ].copy()
if 'lw' in sat_props: sat_props['lw'] *= 2.0
else: sat_props['lw'] = 1.0
if 'alpha' in sat_props: min([sat_props['alpha'] * 2.0, 1.0])
else: sat_props['alpha'] = 1.0
for i in self.isolines:
props = self.props[i]
dew = None; bub = None
xcrit = None; ycrit = None
if i == CoolProp.iQ:
for line in self.isolines[i]:
if line.value == 0.0: bub = line
elif line.value == 1.0: dew = line
if dew is not None and bub is not None:
xmin, xmax, ymin, ymax = self.get_axis_limits()
xmin = dimx.to_SI(xmin)
xmax = dimx.to_SI(xmax)
ymin = dimy.to_SI(ymin)
ymax = dimy.to_SI(ymax)
dx = xmax - xmin
dy = ymax - ymin
dew_filter = np.logical_and(np.isfinite(dew.x), np.isfinite(dew.y))
#dew_filter = np.logical_and(dew_filter,dew.x>dew.x[-1])
stp = min([dew_filter.size, 10])
dew_filter[0:-stp] = False
bub_filter = np.logical_and(np.isfinite(bub.x), np.isfinite(bub.y))
if self._x_index == CoolProp.iP or self._x_index == CoolProp.iDmass:
filter_x = lambda x: np.log10(x)
else:
filter_x = lambda x: x
if self._y_index == CoolProp.iP or self._y_index == CoolProp.iDmass:
filter_y = lambda y: np.log10(y)
else:
filter_y = lambda y: y
if ( # (filter_x(dew.x[dew_filter][-1])-filter_x(bub.x[bub_filter][-1])) > 0.010*filter_x(dx) and
(filter_x(dew.x[dew_filter][-1]) - filter_x(bub.x[bub_filter][-1])) < 0.050 * filter_x(dx) or
(filter_y(dew.y[dew_filter][-1]) - filter_y(bub.y[bub_filter][-1])) < 0.010 * filter_y(dy)):
x = np.linspace(bub.x[bub_filter][-1], dew.x[dew_filter][-1], 11)
y = interpolate_values_1d(
np.append(bub.x[bub_filter], dew.x[dew_filter][::-1]),
np.append(bub.y[bub_filter], dew.y[dew_filter][::-1]),
x_points=x,
kind='cubic')
self.axis.plot(dimx.from_SI(x), dimy.from_SI(y), **sat_props)
warnings.warn("Detected an incomplete phase envelope, fixing it numerically.")
xcrit = x[5]; ycrit = y[5]
#Tcrit = self.state.trivial_keyed_output(CoolProp.iT_critical)
#Dcrit = self.state.trivial_keyed_output(CoolProp.irhomass_critical)
# try:
# self.state.update(CoolProp.DmassT_INPUTS, Dcrit, Tcrit)
# xcrit = self.state.keyed_output(self._x_index)
# ycrit = self.state.keyed_output(self._y_index)
# except:
# xcrit = x[5]; ycrit = y[5]
# pass
#self.axis.plot(dimx.from_SI(np.array([bub.x[bub_filter][-1], dew.x[dew_filter][-1]])),dimy.from_SI(np.array([bub.y[bub_filter][-1], dew.y[dew_filter][-1]])),'o')
for line in self.isolines[i]:
if line.i_index == CoolProp.iQ:
if line.value == 0.0 or line.value == 1.0:
self.axis.plot(dimx.from_SI(line.x), dimy.from_SI(line.y), **sat_props)
else:
if xcrit is not None and ycrit is not None:
self.axis.plot(dimx.from_SI(np.append(line.x, xcrit)), dimy.from_SI(np.append(line.y, ycrit)), **props)
# try:
# x = np.append(line.x,[xcrit])
# y = np.append(line.y,[ycrit])
# fltr = np.logical_and(np.isfinite(x),np.isfinite(y))
# f = interp1d(x[fltr][-3:],y[fltr][-3:],kind='linear') # could also be quadratic
# x = np.linspace(x[fltr][-2], x[fltr][-1], 5)
# y = f(x)
# #f = interp1d(y[fltr][-5:],x[fltr][-5:],kind='cubic')
# #y = np.linspace(y[fltr][-2], y[fltr][-1], 5)
# #x = f(y)
# self.axis.plot(dimx.from_SI(np.append(line.x,x)),dimy.from_SI(np.append(line.y,y)),**props)
# except:
# self.axis.plot(dimx.from_SI(np.append(line.x,xcrit)),dimy.from_SI(np.append(line.y,ycrit)),**props)
# pass
else:
self.axis.plot(dimx.from_SI(line.x), dimy.from_SI(line.y), **props)
def draw(self):
self.get_axis_limits()
self.draw_isolines()
# def label_isolines(self, dx=0.075, dy=0.100):
# [xmin, xmax, ymin, ymax] = self.get_axis_limits()
# for i in self.isolines:
# for line in self.isolines[i]:
# if self.get_x_y_dydx(xv, yv, x)
def draw_process(self, statecontainer, points=None, line_opts=None):
""" Draw process or cycle from x and y values in axis units
Parameters
----------
statecontainer : CoolProp.Plots.SimpleCycles.StateContainer()
A state container object that contains all the information required to draw the process.
Note that points that appear several times get added to a special of highlighted points.
line_opts : dict
Line options (please see :func:`matplotlib.pyplot.plot`), optional
Use this parameter to pass a label for the legend.
Examples
--------
>>> import CoolProp
>>> from CoolProp.Plots import PropertyPlot
>>> pp = PropertyPlot('HEOS::Water', 'TS', unit_system='EUR')
>>> pp.calc_isolines(CoolProp.iP )
>>> pp.calc_isolines(CoolProp.iHmass )
>>> pp.calc_isolines(CoolProp.iQ, num=11)
>>> cycle = SimpleRankineCycle('HEOS::Water', 'TS', unit_system='EUR')
>>> T0 = 300
>>> pp.state.update(CoolProp.QT_INPUTS,0.0,T0+15)
>>> p0 = pp.state.keyed_output(CoolProp.iP)
>>> T2 = 700
>>> pp.state.update(CoolProp.QT_INPUTS,1.0,T2-150)
>>> p2 = pp.state.keyed_output(CoolProp.iP)
>>> cycle.simple_solve(T0, p0, T2, p2, 0.7, 0.8, SI=True)
>>> cycle.steps = 50
>>> sc = cycle.get_state_changes()
>>> pp.draw_process(sc)
>>> # The same calculation can be carried out in another unit system:
>>> cycle.simple_solve(T0-273.15-10, p0/1e5, T2-273.15+50, p2/1e5-5, 0.7, 0.8, SI=False)
>>> sc2 = cycle.get_state_changes()
>>> pp.draw_process(sc2, line_opts={'color':'blue', 'lw':1.5})
>>> pp.show()
"""
warnings.warn("You called the function \"draw_process\", which is not tested.", UserWarning)
# Default values
line_opts = line_opts or {'color': 'r', 'lw': 1.5}
dimx = self.system[self.x_index]
dimy = self.system[self.y_index]
marker = line_opts.pop('marker', 'o')
style = line_opts.pop('linestyle', 'solid')
style = line_opts.pop('ls', style)
if points is None: points = StateContainer()
xdata = []
ydata = []
old = statecontainer[len(statecontainer) - 1]
for i in statecontainer:
point = statecontainer[i]
if point == old:
points.append(point)
old = point
continue
xdata.append(point[self.x_index])
ydata.append(point[self.y_index])
old = point
xdata = dimx.from_SI(np.asarray(xdata))
ydata = dimy.from_SI(np.asarray(ydata))
self.axis.plot(xdata, ydata, marker='None', linestyle=style, **line_opts)
xdata = np.empty(len(points))
ydata = np.empty(len(points))
for i in points:
point = points[i]
xdata[i] = point[self.x_index]
ydata[i] = point[self.y_index]
xdata = dimx.from_SI(np.asarray(xdata))
ydata = dimy.from_SI(np.asarray(ydata))
line_opts['label'] = ''
self.axis.plot(xdata, ydata, marker=marker, linestyle='None', **line_opts)
def InlineLabel(xv, yv, x=None, y=None, axis=None, fig=None):
warnings.warn("You called the deprecated function \"InlineLabel\", use \"BasePlot.inline_label\".", DeprecationWarning)
plot = PropertyPlot("water", "TS", figure=fig, axis=axis)
return plot.inline_label(xv, yv, x, y)
class PropsPlot(PropertyPlot):
def __init__(self, fluid_name, graph_type, units='KSI', reciprocal_density=False, **kwargs):
super(PropsPlot, self).__init__(fluid_name, graph_type, unit_system=units, reciprocal_density=reciprocal_density, **kwargs)
warnings.warn("You called the deprecated class \"PropsPlot\", use \"PropertyPlot\".", DeprecationWarning)
if __name__ == "__main__":
plot = PropertyPlot('HEOS::n-Pentane', 'PD', unit_system='EUR') # , reciprocal_density=True)
plot.calc_isolines(CoolProp.iT)
plot.calc_isolines(CoolProp.iQ, num=11)
# plot.calc_isolines(CoolProp.iSmass)
# plot.calc_isolines(CoolProp.iHmass)
plot.show()
|
|
#(C) Copyright Syd Logan 2020
#(C) Copyright Thousand Smiles Foundation 2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
unit tests for audiogram application. Assumes django server is up
and running on the specified host and port
'''
import unittest
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
from tschartslib.patient.patient import CreatePatient, DeletePatient
from tschartslib.clinic.clinic import CreateClinic, DeleteClinic
from tschartslib.station.station import CreateStation, DeleteStation
from tschartslib.image.image import CreateImage, DeleteImage
class CreateAudiogram(ServiceAPI):
def __init__(self, host, port, token, clinic, image, patient, comment=""):
super(CreateAudiogram, self).__init__()
self.setHttpMethod("POST")
self.setHost(host)
self.setPort(port)
self.setToken(token)
payload = {"patient": patient, "clinic": clinic, "image": image, "comment": comment}
self.setPayload(payload)
self.setURL("tscharts/v1/audiogram/")
class GetAudiogram(ServiceAPI):
def makeURL(self):
hasQArgs = False
if not self._id == None:
base = "tscharts/v1/audiogram/{}/".format(self._id)
else:
base = "tscharts/v1/audiogram/"
if not self._clinic == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "clinic={}".format(self._clinic)
hasQArgs = True
if not self._patient == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "patient={}".format(self._patient)
hasQArgs = True
self.setURL(base)
def __init__(self, host, port, token):
super(GetAudiogram, self).__init__()
self.setHttpMethod("GET")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._patient = None
self._clinic = None
self._image = None
self._id = None
def setId(self, id):
self._id = id;
self.makeURL()
def setClinic(self, clinic):
self._clinic = clinic
self.makeURL()
def setPatient(self, patient):
self._patient = patient
self.makeURL()
def setImage(self, image):
self._image = image
self.makeURL()
class UpdateAudiogram(ServiceAPI):
def __init__(self, host, port, token, id):
super(UpdateAudiogram, self).__init__()
self.setHttpMethod("PUT")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._payload = {}
self.setPayload(self._payload)
self.setURL("tscharts/v1/audiogram/{}/".format(id))
def setComment(self, comment):
self._payload["comment"] = comment
self.setPayload(self._payload)
class DeleteAudiogram(ServiceAPI):
def __init__(self, host, port, token, id):
super(DeleteAudiogram, self).__init__()
self.setHttpMethod("DELETE")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setURL("tscharts/v1/audiogram/{}/".format(id))
class TestTSAudiogram(unittest.TestCase):
def setUp(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("token" in ret[1])
global token
token = ret[1]["token"]
def testCreateAudiogram(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
x = CreateStation(host, port, token, "ENT")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stationid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "[email protected]"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "[email protected]"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Audiogram")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
imageid = int(ret[1]["id"])
x = CreateAudiogram(host, port, token, patient=patientid, clinic=clinicid, image=imageid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
imageId = int(ret[1]["image"])
self.assertTrue(imageId == imageid)
x = DeleteAudiogram(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
# non-existent clinic param
x = CreateAudiogram(host, port, token, clinic=9999, image=imageid, patient=patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
# non-existent image param
x = CreateAudiogram(host, port, token, clinic=clinicid, image=9999, patient=patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
# non-existent patient param
x = CreateAudiogram(host, port, token, clinic=clinicid, image=imageid, patient=9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteStation(host, port, token, stationid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testDeleteAudiogram(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
x = CreateStation(host, port, token, "ENT")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stationid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "[email protected]"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "[email protected]"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Audiogram")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
imageid = int(ret[1]["id"])
x = CreateAudiogram(host, port, token, patient=patientid, clinic=clinicid, image=imageid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
imageId = int(ret[1]["image"])
self.assertTrue(imageId == imageid)
x = DeleteAudiogram(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
x = DeleteAudiogram(host, port, token, 9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteAudiogram(host, port, token, None)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteAudiogram(host, port, token, "")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteAudiogram(host, port, token, "Hello")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteStation(host, port, token, stationid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testUpdateAudiogram(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
x = CreateStation(host, port, token, "ENT")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stationid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "[email protected]"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "[email protected]"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Audiogram")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
imageid = int(ret[1]["id"])
x = CreateAudiogram(host, port, token, patient=patientid, clinic=clinicid, image=imageid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("image" in ret[1])
imageId = int(ret[1]["image"])
self.assertTrue(imageId == imageid)
self.assertTrue("comment" in ret[1])
self.assertTrue(ret[1]["comment"] == "")
x = UpdateAudiogram(host, port, token, id)
x.setComment("A test comment")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("image" in ret[1])
imageId = int(ret[1]["image"])
self.assertTrue(imageId == imageid)
self.assertTrue("comment" in ret[1])
self.assertTrue(ret[1]["comment"] == "A test comment")
x = UpdateAudiogram(host, port, token, id)
x.setComment("No comment")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("image" in ret[1])
imageId = int(ret[1]["image"])
self.assertTrue(imageId == imageId)
self.assertTrue("comment" in ret[1])
self.assertTrue(ret[1]["comment"] == "No comment")
x = UpdateAudiogram(host, port, token, id)
x.setComment("Yet another comment")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("image" in ret[1])
imageId = int(ret[1]["image"])
self.assertTrue(imageId == imageid)
self.assertTrue("comment" in ret[1])
self.assertTrue(ret[1]["comment"] == "Yet another comment")
x = UpdateAudiogram(host, port, token, id)
x.setComment("")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("image" in ret[1])
imageId = int(ret[1]["image"])
self.assertTrue(imageId == imageid)
self.assertTrue("comment" in ret[1])
self.assertTrue(ret[1]["comment"] == "")
x = UpdateAudiogram(host, port, token, id)
x.setComment(999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = GetAudiogram(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("image" in ret[1])
patientId = int(ret[1]["image"])
self.assertTrue(imageId == imageid)
self.assertTrue("comment" in ret[1])
self.assertTrue(ret[1]["comment"] == "")
x = DeleteAudiogram(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteStation(host, port, token, stationid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testGetAllAudiograms(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
x = CreateStation(host, port, token, "ENT")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stationid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "[email protected]"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "[email protected]"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
ids = []
delids = []
imageids = []
for x in range(0, 10):
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Audiogram")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
imageid = int(ret[1]["id"])
imageids.append(imageid)
x = CreateAudiogram(host, port, token, patient=patientid, clinic=clinicid, image=imageid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ids.append(ret[1]["id"])
delids.append(ret[1]["id"])
x = GetAudiogram(host, port, token)
x.setClinic(9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setPatient(9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setClinic(9999)
x.setPatient(9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setClinic(clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
rtcs = ret[1]
self.assertTrue(len(rtcs) == len(ids))
x = GetAudiogram(host, port, token)
x.setPatient(patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
rtcs = ret[1]
self.assertTrue(len(rtcs) == len(ids))
x = GetAudiogram(host, port, token)
x.setImage(imageid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
rtcs = ret[1]
self.assertTrue(len(rtcs) == len(ids))
for x in rtcs:
if x["id"] in ids:
ids.remove(x["id"])
if len(ids):
self.assertTrue("failed to find all created audiogram items {}".format(ids) == None)
for x in delids:
y = DeleteAudiogram(host, port, token, x)
ret = y.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetAudiogram(host, port, token)
x.setClinic(clinicid)
x.setPatient(patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setClinic(clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setPatient(patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setClinic(9999)
x.setPatient(9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setClinic(9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
x = GetAudiogram(host, port, token)
x.setPatient(9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 0)
for x in imageids:
y = DeleteImage(host, port, token, x)
ret = y.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteStation(host, port, token, stationid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def usage():
print("audiogram [-h host] [-p port] [-u username] [-w password]")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
global host
host = "127.0.0.1"
global port
port = 8000
global username
username = None
global password
password = None
for o, a in opts:
if o == "-h":
host = a
elif o == "-p":
port = int(a)
elif o == "-u":
username = a
elif o == "-w":
password = a
else:
assert False, "unhandled option"
unittest.main(argv=[sys.argv[0]])
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.i18n import _LW
from nova import objects
from nova.openstack.common import log as logging
from nova.pci import device
from nova.pci import stats
LOG = logging.getLogger(__name__)
class PciDevTracker(object):
"""Manage pci devices in a compute node.
This class fetches pci passthrough information from hypervisor
and trackes the usage of these devices.
It's called by compute node resource tracker to allocate and free
devices to/from instances, and to update the available pci passthrough
devices information from hypervisor periodically. The devices
information is updated to DB when devices information is changed.
"""
def __init__(self, node_id=None):
"""Create a pci device tracker.
If a node_id is passed in, it will fetch pci devices information
from database, otherwise, it will create an empty devices list
and the resource tracker will update the node_id information later.
"""
super(PciDevTracker, self).__init__()
self.stale = {}
self.node_id = node_id
self.stats = stats.PciDeviceStats()
if node_id:
self.pci_devs = list(
objects.PciDeviceList.get_by_compute_node(context, node_id))
else:
self.pci_devs = []
self._initial_instance_usage()
def _initial_instance_usage(self):
self.allocations = collections.defaultdict(list)
self.claims = collections.defaultdict(list)
for dev in self.pci_devs:
uuid = dev['instance_uuid']
if dev['status'] == 'claimed':
self.claims[uuid].append(dev)
elif dev['status'] == 'allocated':
self.allocations[uuid].append(dev)
elif dev['status'] == 'available':
self.stats.add_device(dev)
@property
def all_devs(self):
return self.pci_devs
def save(self, context):
for dev in self.pci_devs:
if dev.obj_what_changed():
dev.save(context)
self.pci_devs = [dev for dev in self.pci_devs
if dev['status'] != 'deleted']
@property
def pci_stats(self):
return self.stats
def set_hvdevs(self, devices):
"""Sync the pci device tracker with hypervisor information.
To support pci device hot plug, we sync with the hypervisor
periodically, fetching all devices information from hypervisor,
update the tracker and sync the DB information.
Devices should not be hot-plugged when assigned to a guest,
but possibly the hypervisor has no such guarantee. The best
we can do is to give a warning if a device is changed
or removed while assigned.
"""
exist_addrs = set([dev['address'] for dev in self.pci_devs])
new_addrs = set([dev['address'] for dev in devices])
for existed in self.pci_devs:
if existed['address'] in exist_addrs - new_addrs:
try:
device.remove(existed)
except exception.PciDeviceInvalidStatus as e:
LOG.warning(_LW("Trying to remove device with %(status)s "
"ownership %(instance_uuid)s because of "
"%(pci_exception)s"),
{'status': existed.status,
'instance_uuid': existed.instance_uuid,
'pci_exception': e.format_message()})
# Note(yjiang5): remove the device by force so that
# db entry is cleaned in next sync.
existed.status = 'removed'
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
self.stats.remove_device(existed)
else:
new_value = next((dev for dev in devices if
dev['address'] == existed['address']))
new_value['compute_node_id'] = self.node_id
if existed['status'] in ('claimed', 'allocated'):
# Pci properties may change while assigned because of
# hotplug or config changes. Although normally this should
# not happen.
# As the devices have been assigned to a instance, we defer
# the change till the instance is destroyed. We will
# not sync the new properties with database before that.
# TODO(yjiang5): Not sure if this is a right policy, but
# at least it avoids some confusion and, if needed,
# we can add more action like killing the instance
# by force in future.
self.stale[new_value['address']] = new_value
else:
device.update_device(existed, new_value)
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
dev_obj = objects.PciDevice.create(dev)
self.pci_devs.append(dev_obj)
self.stats.add_device(dev_obj)
def _claim_instance(self, context, instance, prefix=''):
pci_requests = objects.InstancePCIRequests.get_by_instance(
context, instance)
if not pci_requests.requests:
return None
devs = self.stats.consume_requests(pci_requests.requests)
if not devs:
raise exception.PciDeviceRequestFailed(pci_requests)
for dev in devs:
device.claim(dev, instance)
return devs
def _allocate_instance(self, instance, devs):
for dev in devs:
device.allocate(dev, instance)
def _free_device(self, dev, instance=None):
device.free(dev, instance)
stale = self.stale.pop(dev['address'], None)
if stale:
device.update_device(dev, stale)
self.stats.add_device(dev)
def _free_instance(self, instance):
# Note(yjiang5): When a instance is resized, the devices in the
# destination node are claimed to the instance in prep_resize stage.
# However, the instance contains only allocated devices
# information, not the claimed one. So we can't use
# instance['pci_devices'] to check the devices to be freed.
for dev in self.pci_devs:
if (dev['status'] in ('claimed', 'allocated') and
dev['instance_uuid'] == instance['uuid']):
self._free_device(dev)
def update_pci_for_instance(self, context, instance):
"""Update instance's pci usage information.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
uuid = instance['uuid']
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state == vm_states.DELETED:
if self.allocations.pop(uuid, None):
self._free_instance(instance)
elif self.claims.pop(uuid, None):
self._free_instance(instance)
elif task_state == task_states.RESIZE_MIGRATED:
devs = self.allocations.pop(uuid, None)
if devs:
self._free_instance(instance)
elif task_state == task_states.RESIZE_FINISH:
devs = self.claims.pop(uuid, None)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
elif (uuid not in self.allocations and
uuid not in self.claims):
devs = self._claim_instance(context, instance)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
def update_pci_for_migration(self, context, instance, sign=1):
"""Update instance's pci usage information when it is migrated.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.
:param sign: claim devices for instance when sign is 1, remove
the claims when sign is -1
"""
uuid = instance['uuid']
if sign == 1 and uuid not in self.claims:
devs = self._claim_instance(context, instance, 'new_')
if devs:
self.claims[uuid] = devs
if sign == -1 and uuid in self.claims:
self._free_instance(instance)
def clean_usage(self, instances, migrations, orphans):
"""Remove all usages for instances not passed in the parameter.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
existed = [inst['uuid'] for inst in instances]
existed += [mig['instance_uuid'] for mig in migrations]
existed += [inst['uuid'] for inst in orphans]
for uuid in self.claims.keys():
if uuid not in existed:
devs = self.claims.pop(uuid, [])
for dev in devs:
self._free_device(dev)
for uuid in self.allocations.keys():
if uuid not in existed:
devs = self.allocations.pop(uuid, [])
for dev in devs:
self._free_device(dev)
def set_compute_node_id(self, node_id):
"""Set the compute node id that this object is tracking for.
In current resource tracker implementation, the
compute_node entry is created in the last step of
update_available_resoruces, thus we have to lazily set the
compute_node_id at that time.
"""
if self.node_id and self.node_id != node_id:
raise exception.PciTrackerInvalidNodeId(node_id=self.node_id,
new_node_id=node_id)
self.node_id = node_id
for dev in self.pci_devs:
dev.compute_node_id = node_id
def get_instance_pci_devs(inst, request_id=None):
"""Get the devices allocated to one or all requests for an instance.
- For generic PCI request, the request id is None.
- For sr-iov networking, the request id is a valid uuid
- There are a couple of cases where all the PCI devices allocated to an
instance need to be returned. Refer to libvirt driver that handles
soft_reboot and hard_boot of 'xen' instances.
"""
pci_devices = inst.pci_devices
return [device for device in pci_devices if
device.request_id == request_id or request_id == 'all']
|
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module contains code for extracting features from data
"""
from __future__ import absolute_import, division, print_function
from six.moves import zip
import numpy as np
from collections import deque
from .fitting import fit_quad_to_peak
import logging
logger = logging.getLogger(__name__)
class PeakRejection(Exception):
"""Custom exception class to indicate that the refine function rejected
the candidate peak.
This uses the exception handling framework in a method akin to
`StopIteration` to indicate that there will be no return value.
"""
pass
def peak_refinement(x, y, cands, window, refine_function, refine_args=None):
"""Refine candidate locations
Parameters
----------
x : array
The independent variable, does not need to be evenly spaced.
y : array
The dependent variable. Must correspond 1:1 with the values in `x`
cands : array
Array of the indices in `x` (and `y`) for the candidate peaks.
refine_function : function
A function which takes a section of data with a peak in it and returns
the location and height of the peak to sub-sample accuracy. Additional
parameters can be passed through via the refine_args kwarg.
The function signature must be::
center, height = refine_func(x, y, **kwargs)
This function may raise `PeakRejection` to indicate no suitable
peak was found
window : int
How many samples to extract on either side of the
candidate locations are passed to the refine function. The
window will be truncated near the boundaries. The length of the
data passed to the refine function will be (2 * window + 1).
refine_args : dict, optional
The passed to the refine_function
Returns
-------
peak_locations : array
The locations of the peaks
peak_heights : array
The heights of the peaks
Examples
--------
>>> x = np.arange(512)
>>> tt = np.zeros(512)
>>> tt += np.exp(-((x - 150.55)/10)**2)
>>> tt += np.exp(-((x - 450.75)/10)**2)
>>> cands = scipy.signal.argrelmax(tt)[0]
>>> print(peak_refinement(x, tt, cands, 10, refine_quadratic))
(array([ 150.62286432, 450.7909412 ]), array([ 0.96435832, 0.96491501]))
>>> print(peak_refinement(x, tt, cands, 10, refine_log_quadratic))
(array([ 150.55, 450.75]), array([ 1., 1.]))
"""
# clean up input
x = np.asarray(x)
y = np.asarray(y)
cands = np.asarray(cands, dtype=int)
window = int(window)
if refine_args is None:
refine_args = dict()
# local working variables
out_tmp = deque()
max_ind = len(x)
for ind in cands:
slc = slice(np.max([0, ind-window]),
np.min([max_ind, ind + window + 1]))
try:
ret = refine_function(x[slc], y[slc], **refine_args)
except PeakRejection:
# We are catching the PeakRejections raised here as
# an indication that no suitable peak was found
continue
else:
out_tmp.append(ret)
return tuple([np.array(_) for _ in zip(*out_tmp)])
def refine_quadratic(x, y, Rval_thresh=None):
"""
Attempts to refine the peaks by fitting to
a quadratic function.
Parameters
----------
x : array
Independent variable
y : array
Dependent variable
Rval_thresh : float, optional
Threshold for R^2 value of fit, If the computed R^2 is worse than
this threshold PeakRejection will be raised
Returns
-------
center : float
Refined estimate for center
height : float
Refined estimate for height
Raises
------
PeakRejection
Raised to indicate that no suitable peak was found in the
interval
"""
beta, R2 = fit_quad_to_peak(x, y)
if Rval_thresh is not None and R2 < Rval_thresh:
raise PeakRejection()
return beta[1], beta[2]
def refine_log_quadratic(x, y, Rval_thresh=None):
"""
Attempts to refine the peaks by fitting a quadratic to the log of
the y-data. This is a linear approximation of fitting a Gaussian.
Parameters
----------
x : array
Independent variable
y : array
Dependent variable
Rval_thresh : float, optional
Threshold for R^2 value of fit, If the computed R^2 is worse than
this threshold PeakRejection will be raised
Returns
-------
center : float
Refined estimate for center
height : float
Refined estimate for height
Raises
------
PeakRejection
Raised to indicate that no suitable peak was found in the
interval
"""
beta, R2 = fit_quad_to_peak(x, np.log(y))
if Rval_thresh is not None and R2 < Rval_thresh:
raise PeakRejection()
return beta[1], np.exp(beta[2])
def filter_n_largest(y, cands, N):
"""Filters the N largest candidate peaks
Return a maximum of N largest candidates. If N > len(cands) then
all of the cands will be returned sorted, else the indices
of the N largest peaks will be returned in descending order.
Parameters
----------
y : array
Independent variable
cands : array
An array containing the indices of candidate peaks
N : int
The maximum number of peaks to return, sorted by size.
Must be positive
Returns
-------
cands : array
An array of the indices of up to the N largest candidates
"""
cands = np.asarray(cands)
N = int(N)
if N <= 0:
raise ValueError("The maximum number of peaks to return must "
"be positive not {}".format(N))
sorted_args = np.argsort(y[cands])
# cut out if asking for more peaks than exist
if len(cands) < N:
return cands[sorted_args][::-1]
return cands[sorted_args[-N:]][::-1]
def filter_peak_height(y, cands, thresh, window=5):
"""
Filter to remove candidate that are too small. This
is implemented by looking at the relative height (max - min)
of the peak in a window around the candidate peak.
Parameters
----------
y : array
Independent variable
cands : array
An array containing the indices of candidate peaks
thresh : int
The minimum peak-to-peak size of the candidate peak to be accepted
window : int, optional
The size of the window around the peak to consider
Returns
-------
cands : array
An array of the indices which pass the filter
"""
y = np.asarray(y)
out_tmp = deque()
max_ind = len(y)
for ind in cands:
slc = slice(np.max([0, ind-window]),
np.min([max_ind, ind + window + 1]))
pk_hght = np.ptp(y[slc])
if pk_hght > thresh:
out_tmp.append(ind)
return np.array(out_tmp)
# add our refinement functions as an attribute on peak_refinement
# ta make auto-wrapping for vistrials easier.
peak_refinement.refine_function = [refine_log_quadratic, refine_quadratic]
|
|
#! /opt/local/bin/pythonw2.7
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["SubsegmentPen","SubsegmentsToCurvesPen", "segmentGlyph", "fitGlyph"]
from fontTools.pens.basePen import BasePen
import numpy as np
from numpy import array as v
from numpy.linalg import norm
from robofab.pens.adapterPens import GuessSmoothPointPen
from robofab.pens.pointPen import BasePointToSegmentPen
class SubsegmentsToCurvesPointPen(BasePointToSegmentPen):
def __init__(self, glyph, subsegmentGlyph, subsegments):
BasePointToSegmentPen.__init__(self)
self.glyph = glyph
self.subPen = SubsegmentsToCurvesPen(None, glyph.getPen(), subsegmentGlyph, subsegments)
def setMatchTangents(self, b):
self.subPen.matchTangents = b
def _flushContour(self, segments):
#
# adapted from robofab.pens.adapterPens.rfUFOPointPen
#
assert len(segments) >= 1
# if we only have one point and it has a name, we must have an anchor
first = segments[0]
segmentType, points = first
pt, smooth, name, kwargs = points[0]
if len(segments) == 1 and name != None:
self.glyph.appendAnchor(name, pt)
return
else:
segmentType, points = segments[-1]
movePt, smooth, name, kwargs = points[-1]
if smooth:
# last point is smooth, set pen to start smooth
self.subPen.setLastSmooth(True)
if segmentType == 'line':
del segments[-1]
self.subPen.moveTo(movePt)
# do the rest of the segments
for segmentType, points in segments:
isSmooth = True in [smooth for pt, smooth, name, kwargs in points]
pp = [pt for pt, smooth, name, kwargs in points]
if segmentType == "line":
assert len(pp) == 1
if isSmooth:
self.subPen.smoothLineTo(pp[0])
else:
self.subPen.lineTo(pp[0])
elif segmentType == "curve":
assert len(pp) == 3
if isSmooth:
self.subPen.smoothCurveTo(*pp)
else:
self.subPen.curveTo(*pp)
elif segmentType == "qcurve":
assert 0, "qcurve not supported"
else:
assert 0, "illegal segmentType: %s" % segmentType
self.subPen.closePath()
def addComponent(self, glyphName, transform):
self.subPen.addComponent(glyphName, transform)
class SubsegmentsToCurvesPen(BasePen):
def __init__(self, glyphSet, otherPen, subsegmentGlyph, subsegments):
BasePen.__init__(self, None)
self.otherPen = otherPen
self.ssglyph = subsegmentGlyph
self.subsegments = subsegments
self.contourIndex = -1
self.segmentIndex = -1
self.lastPoint = (0,0)
self.lastSmooth = False
self.nextSmooth = False
def setLastSmooth(self, b):
self.lastSmooth = b
def _moveTo(self, (x, y)):
self.contourIndex += 1
self.segmentIndex = 0
self.startPoint = (x,y)
p = self.ssglyph.contours[self.contourIndex][0].points[0]
self.otherPen.moveTo((p.x, p.y))
self.lastPoint = (x,y)
def _lineTo(self, (x, y)):
self.segmentIndex += 1
index = self.subsegments[self.contourIndex][self.segmentIndex][0]
p = self.ssglyph.contours[self.contourIndex][index].points[0]
self.otherPen.lineTo((p.x, p.y))
self.lastPoint = (x,y)
self.lastSmooth = False
def smoothLineTo(self, (x, y)):
self.lineTo((x,y))
self.lastSmooth = True
def smoothCurveTo(self, (x1, y1), (x2, y2), (x3, y3)):
self.nextSmooth = True
self.curveTo((x1, y1), (x2, y2), (x3, y3))
self.nextSmooth = False
self.lastSmooth = True
def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)):
self.segmentIndex += 1
c = self.ssglyph.contours[self.contourIndex]
n = len(c)
startIndex = (self.subsegments[self.contourIndex][self.segmentIndex-1][0])
segmentCount = (self.subsegments[self.contourIndex][self.segmentIndex][1])
endIndex = (startIndex + segmentCount + 1) % (n)
indices = [(startIndex + i) % (n) for i in range(segmentCount + 1)]
points = np.array([(c[i].points[0].x, c[i].points[0].y) for i in indices])
prevPoint = (c[(startIndex - 1)].points[0].x, c[(startIndex - 1)].points[0].y)
nextPoint = (c[(endIndex) % n].points[0].x, c[(endIndex) % n].points[0].y)
prevTangent = prevPoint - points[0]
nextTangent = nextPoint - points[-1]
tangent1 = points[1] - points[0]
tangent3 = points[-2] - points[-1]
prevTangent /= np.linalg.norm(prevTangent)
nextTangent /= np.linalg.norm(nextTangent)
tangent1 /= np.linalg.norm(tangent1)
tangent3 /= np.linalg.norm(tangent3)
tangent1, junk = self.smoothTangents(tangent1, prevTangent, self.lastSmooth)
tangent3, junk = self.smoothTangents(tangent3, nextTangent, self.nextSmooth)
if self.matchTangents == True:
cp = fitBezier(points, tangent1, tangent3)
cp[1] = norm(cp[1] - cp[0]) * tangent1 / norm(tangent1) + cp[0]
cp[2] = norm(cp[2] - cp[3]) * tangent3 / norm(tangent3) + cp[3]
else:
cp = fitBezier(points)
# if self.ssglyph.name == 'r':
# print "-----------"
# print self.lastSmooth, self.nextSmooth
# print "%i %i : %i %i \n %i %i : %i %i \n %i %i : %i %i"%(x1,y1, cp[1,0], cp[1,1], x2,y2, cp[2,0], cp[2,1], x3,y3, cp[3,0], cp[3,1])
self.otherPen.curveTo((cp[1,0], cp[1,1]), (cp[2,0], cp[2,1]), (cp[3,0], cp[3,1]))
self.lastPoint = (x3, y3)
self.lastSmooth = False
def smoothTangents(self,t1,t2,forceSmooth = False):
if forceSmooth or (abs(t1.dot(t2)) > .95 and norm(t1-t2) > 1):
# print t1,t2,
t1 = (t1 - t2) / 2
t2 = -t1
# print t1,t2
return t1 / norm(t1), t2 / norm(t2)
def _closePath(self):
self.otherPen.closePath()
def _endPath(self):
self.otherPen.endPath()
def addComponent(self, glyphName, transformation):
self.otherPen.addComponent(glyphName, transformation)
class SubsegmentPointPen(BasePointToSegmentPen):
def __init__(self, glyph, resolution):
BasePointToSegmentPen.__init__(self)
self.glyph = glyph
self.resolution = resolution
self.subPen = SubsegmentPen(None, glyph.getPen())
def getSubsegments(self):
return self.subPen.subsegments[:]
def _flushContour(self, segments):
#
# adapted from robofab.pens.adapterPens.rfUFOPointPen
#
assert len(segments) >= 1
# if we only have one point and it has a name, we must have an anchor
first = segments[0]
segmentType, points = first
pt, smooth, name, kwargs = points[0]
if len(segments) == 1 and name != None:
self.glyph.appendAnchor(name, pt)
return
else:
segmentType, points = segments[-1]
movePt, smooth, name, kwargs = points[-1]
if segmentType == 'line':
del segments[-1]
self.subPen.moveTo(movePt)
# do the rest of the segments
for segmentType, points in segments:
points = [pt for pt, smooth, name, kwargs in points]
if segmentType == "line":
assert len(points) == 1
self.subPen.lineTo(points[0])
elif segmentType == "curve":
assert len(points) == 3
self.subPen.curveTo(*points)
elif segmentType == "qcurve":
assert 0, "qcurve not supported"
else:
assert 0, "illegal segmentType: %s" % segmentType
self.subPen.closePath()
def addComponent(self, glyphName, transform):
self.subPen.addComponent(glyphName, transform)
class SubsegmentPen(BasePen):
def __init__(self, glyphSet, otherPen, resolution=25):
BasePen.__init__(self,glyphSet)
self.resolution = resolution
self.otherPen = otherPen
self.subsegments = []
self.startContour = (0,0)
self.contourIndex = -1
def _moveTo(self, (x, y)):
self.contourIndex += 1
self.segmentIndex = 0
self.subsegments.append([])
self.subsegmentCount = 0
self.subsegments[self.contourIndex].append([self.subsegmentCount, 0])
self.startContour = (x,y)
self.lastPoint = (x,y)
self.otherPen.moveTo((x,y))
def _lineTo(self, (x, y)):
count = self.stepsForSegment((x,y),self.lastPoint)
if count < 1:
count = 1
self.subsegmentCount += count
self.subsegments[self.contourIndex].append([self.subsegmentCount, count])
for i in range(1,count+1):
x1 = self.lastPoint[0] + (x - self.lastPoint[0]) * i/float(count)
y1 = self.lastPoint[1] + (y - self.lastPoint[1]) * i/float(count)
self.otherPen.lineTo((x1,y1))
self.lastPoint = (x,y)
def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)):
count = self.stepsForSegment((x3,y3),self.lastPoint)
if count < 2:
count = 2
self.subsegmentCount += count
self.subsegments[self.contourIndex].append([self.subsegmentCount,count])
x = self.renderCurve((self.lastPoint[0],x1,x2,x3),count)
y = self.renderCurve((self.lastPoint[1],y1,y2,y3),count)
assert len(x) == count
if (x3 == self.startContour[0] and y3 == self.startContour[1]):
count -= 1
for i in range(count):
self.otherPen.lineTo((x[i],y[i]))
self.lastPoint = (x3,y3)
def _closePath(self):
if not (self.lastPoint[0] == self.startContour[0] and self.lastPoint[1] == self.startContour[1]):
self._lineTo(self.startContour)
# round values used by otherPen (a RoboFab SegmentToPointPen) to decide
# whether to delete duplicate points at start and end of contour
#TODO(jamesgk) figure out why we have to do this hack, then remove it
c = self.otherPen.contour
for i in [0, -1]:
c[i] = [[round(n, 5) for n in c[i][0]]] + list(c[i][1:])
self.otherPen.closePath()
def _endPath(self):
self.otherPen.endPath()
def addComponent(self, glyphName, transformation):
self.otherPen.addComponent(glyphName, transformation)
def stepsForSegment(self, p1, p2):
dist = np.linalg.norm(v(p1) - v(p2))
out = int(dist / self.resolution)
return out
def renderCurve(self,p,count):
curvePoints = []
t = 1.0 / float(count)
temp = t * t
f = p[0]
fd = 3 * (p[1] - p[0]) * t
fdd_per_2 = 3 * (p[0] - 2 * p[1] + p[2]) * temp
fddd_per_2 = 3 * (3 * (p[1] - p[2]) + p[3] - p[0]) * temp * t
fddd = fddd_per_2 + fddd_per_2
fdd = fdd_per_2 + fdd_per_2
fddd_per_6 = fddd_per_2 * (1.0 / 3)
for i in range(count):
f = f + fd + fdd_per_2 + fddd_per_6
fd = fd + fdd + fddd_per_2
fdd = fdd + fddd
fdd_per_2 = fdd_per_2 + fddd_per_2
curvePoints.append(f)
return curvePoints
def fitBezierSimple(pts):
T = [np.linalg.norm(pts[i]-pts[i-1]) for i in range(1,len(pts))]
tsum = np.sum(T)
T = [0] + T
T = [np.sum(T[0:i+1])/tsum for i in range(len(pts))]
T = [[t**3, t**2, t, 1] for t in T]
T = np.array(T)
M = np.array([[-1, 3, -3, 1],
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]])
T = T.dot(M)
T = np.concatenate((T, np.array([[100,0,0,0], [0,0,0,100]])))
# pts = np.vstack((pts, pts[0] * 100, pts[-1] * 100))
C = np.linalg.lstsq(T, pts)
return C[0]
def subdivideLineSegment(pts):
out = [pts[0]]
for i in range(1, len(pts)):
out.append(pts[i-1] + (pts[i] - pts[i-1]) * .5)
out.append(pts[i])
return np.array(out)
def fitBezier(pts,tangent0=None,tangent3=None):
if len(pts < 4):
pts = subdivideLineSegment(pts)
T = [np.linalg.norm(pts[i]-pts[i-1]) for i in range(1,len(pts))]
tsum = np.sum(T)
T = [0] + T
T = [np.sum(T[0:i+1])/tsum for i in range(len(pts))]
T = [[t**3, t**2, t, 1] for t in T]
T = np.array(T)
M = np.array([[-1, 3, -3, 1],
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]])
T = T.dot(M)
n = len(pts)
pout = pts.copy()
pout[:,0] -= (T[:,0] * pts[0,0]) + (T[:,3] * pts[-1,0])
pout[:,1] -= (T[:,0] * pts[0,1]) + (T[:,3] * pts[-1,1])
TT = np.zeros((n*2,4))
for i in range(n):
for j in range(2):
TT[i*2,j*2] = T[i,j+1]
TT[i*2+1,j*2+1] = T[i,j+1]
pout = pout.reshape((n*2,1),order="C")
if tangent0 != None and tangent3 != None:
tangentConstraintsT = np.array([
[tangent0[1], -tangent0[0], 0, 0],
[0, 0, tangent3[1], -tangent3[0]]
])
tangentConstraintsP = np.array([
[pts[0][1] * -tangent0[0] + pts[0][0] * tangent0[1]],
[pts[-1][1] * -tangent3[0] + pts[-1][0] * tangent3[1]]
])
TT = np.concatenate((TT, tangentConstraintsT * 1000))
pout = np.concatenate((pout, tangentConstraintsP * 1000))
C = np.linalg.lstsq(TT,pout)[0].reshape((2,2))
return np.array([pts[0], C[0], C[1], pts[-1]])
def segmentGlyph(glyph,resolution=50):
g1 = glyph.copy()
g1.clear()
dp = SubsegmentPointPen(g1, resolution)
glyph.drawPoints(dp)
return g1, dp.getSubsegments()
def fitGlyph(glyph, subsegmentGlyph, subsegmentIndices, matchTangents=True):
outGlyph = glyph.copy()
outGlyph.clear()
fitPen = SubsegmentsToCurvesPointPen(outGlyph, subsegmentGlyph, subsegmentIndices)
fitPen.setMatchTangents(matchTangents)
# smoothPen = GuessSmoothPointPen(fitPen)
glyph.drawPoints(fitPen)
outGlyph.width = subsegmentGlyph.width
return outGlyph
if __name__ == '__main__':
p = SubsegmentPen(None, None)
pts = np.array([
[0,0],
[.5,.5],
[.5,.5],
[1,1]
])
print np.array(p.renderCurve(pts,10)) * 10
|
|
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
# The Dataset comes from:
# https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits
# At face value, this looks like an easy lab;
# But it has many parts to it, so prepare yourself before starting...
def load(path_test, path_train):
# Load up the data.
# You probably could have written this..
with open(path_test, 'r') as f: testing = pd.read_csv(f)
with open(path_train, 'r') as f: training = pd.read_csv(f)
# The number of samples between training and testing can vary
# But the number of features better remain the same!
n_features = testing.shape[1]
X_test = testing.ix[:,:n_features-1]
X_train = training.ix[:,:n_features-1]
y_test = testing.ix[:,n_features-1:].values.ravel()
y_train = training.ix[:,n_features-1:].values.ravel()
#
# Special:
return X_train, X_test, y_train, y_test
def peekData(X_train):
# The 'targets' or labels are stored in y. The 'samples' or data is stored in X
print ("Peeking your data...")
fig = plt.figure()
cnt = 0
for col in range(5):
for row in range(10):
plt.subplot(5, 10, cnt + 1)
plt.imshow(X_train.ix[cnt,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest')
plt.axis('off')
cnt += 1
fig.set_tight_layout(True)
plt.show()
def drawPredictions(X_train, X_test, y_train, y_test):
fig = plt.figure()
# Make some guesses
y_guess = model.predict(X_test)
#
# INFO: This is the second lab we're demonstrating how to
# do multi-plots using matplot lab. In the next assignment(s),
# it'll be your responsibility to use this and assignment #1
# as tutorials to add in the plotting code yourself!
num_rows = 10
num_cols = 5
index = 0
for col in range(num_cols):
for row in range(num_rows):
plt.subplot(num_cols, num_rows, index + 1)
# 8x8 is the size of the image, 64 pixels
plt.imshow(X_test.ix[index,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest')
# Green = Guessed right
# Red = Fail!
fontcolor = 'g' if y_test[index] == y_guess[index] else 'r'
plt.title('Label: %i' % y_guess[index], fontsize=6, color=fontcolor)
plt.axis('off')
index += 1
fig.set_tight_layout(True)
plt.show()
#
# TODO: Pass in the file paths to the .tes and the .tra files
X_train, X_test, y_train, y_test = load('Datasets/optdigits.tes', 'Datasets/optdigits.tra')
import matplotlib.pyplot as plt
from sklearn import svm
#
# Get to know your data. It seems its already well organized in
# [n_samples, n_features] form. Our dataset looks like (4389, 784).
# Also your labels are already shaped as [n_samples].
#peekData(X_train)
#
# TODO: Create an SVC classifier. Leave C=1, but set gamma to 0.001
# and set the kernel to linear. Then train the model on the training
# data / labels:
print ("Training SVC Classifier...")
#
# .. your code here ..
from sklearn.svm import SVC
model = SVC(C=1, gamma=0.001, kernel='linear')
model.fit(X_train, y_train)
# TODO: Calculate the score of your SVC against the testing data
print ("Scoring SVC Classifier...")
#
# .. your code here ..
score = model.score(X_test, y_test)
print ("Score:\n", score)
# Visual Confirmation of accuracy
drawPredictions(X_train, X_test, y_train, y_test)
#
# TODO: Print out the TRUE value of the 1000th digit in the test set
# By TRUE value, we mean, the actual provided label for that sample
#
# .. your code here ..
true_1000th_test_value = y_test[999]
print ("1000th test label: ", true_1000th_test_value)
#label is 1
#
# TODO: Predict the value of the 1000th digit in the test set.
# Was your model's prediction correct?
# INFO: If you get a warning on your predict line, look at the
# notes from the previous module's labs.
#
# .. your code here ..
guess_1000th_test_value = model.predict(X_test.iloc[999])
print("1000th test prediction: ", guess_1000th_test_value)
#
# TODO: Use IMSHOW to display the 1000th test image, so you can
# visually check if it was a hard image, or an easy image
#
# .. your code here ..
#nope
#
# TODO: Were you able to beat the USPS advertised accuracy score
# of 98%? If so, STOP and answer the lab questions. But if you
# weren't able to get that high of an accuracy score, go back
# and change your SVC's kernel to 'poly' and re-run your lab
# again.
print('trail 2')
model = SVC(C=1, gamma=0.001, kernel='poly')
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print ("poly Score:\n", score)
#
# TODO: Were you able to beat the USPS advertised accuracy score
# of 98%? If so, STOP and answer the lab questions. But if you
# weren't able to get that high of an accuracy score, go back
# and change your SVC's kernel to 'rbf' and re-run your lab
# again.
print('trail 2')
model = SVC(C=1, gamma=0.001, kernel='rbf')
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print ("rbf Score:\n", score)
#################################################
#
# TODO: Once you're able to beat the +98% accuracy score of the
# USPS, go back into the load() method. Look for the line that
# reads "# Special:"
#
# Immediately under that line, alter X_train and y_train ONLY.
# Keep just the ___FIRST___ 4% of the samples. In other words,
# for every 100 samples found, throw away 96 of them. Make sure
# all the samples (and labels) you keep come from the start of
# X_train and y_train.
# If the first 4% is a decimal number, then use int + ceil to
# round up to the nearest whole integer.
# That operation might require some Pandas indexing skills, or
# perhaps some numpy indexing skills if you'd like to go that
# route. Feel free to ask on the class forum if you want; but
# try to exercise your own muscles first, for at least 30
# minutes, by reviewing the Pandas documentation and stack
# overflow. Through that, in the process, you'll pick up a lot.
# Part of being a machine learning practitioner is know what
# questions to ask and where to ask them, so this is a great
# time to start!
# Re-Run your application after throwing away 96% your training
# data. What accuracy score do you get now?
#
# TODO: Lastly, change your kernel back to linear and run your
# assignment one last time. What's the accuracy score this time?
# Surprised?
print('Olalalalaaaaaaaaaaaaaaaaa!')
|
|
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
from __future__ import absolute_import, print_function
import os
import sys
import platform
import warnings
import numpy
from numpy import (
array, arange, empty, zeros, int32, int64, uint16, complex_, float64, rec,
copy, ones_like, where, alltrue, linspace,
sum, prod, sqrt, fmod,
sin, cos, tan, arcsin, arccos, arctan, arctan2,
sinh, cosh, tanh, arcsinh, arccosh, arctanh,
log, log1p, log10, exp, expm1, conj)
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from numpy import shape, allclose, array_equal, ravel, isnan, isinf
import numexpr
from numexpr import E, NumExpr, evaluate, disassemble, use_vml
import unittest
TestCase = unittest.TestCase
double = numpy.double
# Recommended minimum versions
minimum_numpy_version = "1.6"
class test_numexpr(TestCase):
"""Testing with 1 thread"""
nthreads = 1
def setUp(self):
numexpr.set_num_threads(self.nthreads)
def test_simple(self):
ex = 2.0 * E.a + 3.0 * E.b * E.c
sig = [('a', double), ('b', double), ('c', double)]
func = NumExpr(ex, signature=sig)
x = func(array([1., 2, 3]), array([4., 5, 6]), array([7., 8, 9]))
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
func = NumExpr(E.a)
x = arange(100.0)
y = func(x)
assert_array_equal(x, y)
def test_simple_expr(self):
func = NumExpr(E.a)
x = arange(1e6)
y = func(x)
assert_array_equal(x, y)
def test_rational_expr(self):
func = NumExpr((E.a + 2.0 * E.b) / (1 + E.a + 4 * E.b * E.b))
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = func(a, b)
assert_array_almost_equal(x, y)
def test_reductions(self):
# Check that they compile OK.
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=None)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', None)])
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=1)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', 1)])
assert_equal(disassemble(
NumExpr("prod(x**2+2, axis=2)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'prod_ddn', b'r0', b't3', 2)])
# Check that full reductions work.
x = zeros(1e5) + .01 # checks issue #41
assert_allclose(evaluate("sum(x+2,axis=None)"), sum(x + 2, axis=None))
assert_allclose(evaluate("sum(x+2,axis=0)"), sum(x + 2, axis=0))
assert_allclose(evaluate("prod(x,axis=0)"), prod(x, axis=0))
x = arange(10.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
x = arange(100.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
x = linspace(0.1, 1.0, 2000)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
# Check that reductions along an axis work
y = arange(9.0).reshape(3, 3)
assert_allclose(evaluate("sum(y**2, axis=1)"), sum(y ** 2, axis=1))
assert_allclose(evaluate("sum(y**2, axis=0)"), sum(y ** 2, axis=0))
assert_allclose(evaluate("sum(y**2, axis=None)"), sum(y ** 2, axis=None))
assert_allclose(evaluate("prod(y**2, axis=1)"), prod(y ** 2, axis=1))
assert_allclose(evaluate("prod(y**2, axis=0)"), prod(y ** 2, axis=0))
assert_allclose(evaluate("prod(y**2, axis=None)"), prod(y ** 2, axis=None))
# Check integers
x = arange(10.)
x = x.astype(int)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
# Check longs
x = x.astype(long)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
# Check complex
x = x + .1j
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
def test_in_place(self):
x = arange(10000.).reshape(1000, 10)
evaluate("x + 3", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) + 3)
y = arange(10)
evaluate("(x - 3) * y + (x - 3)", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) * (arange(10) + 1))
def test_axis(self):
y = arange(9.0).reshape(3, 3)
try:
evaluate("sum(y, axis=2)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
evaluate("sum(y, axis=-3)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
# Negative axis are not supported
evaluate("sum(y, axis=-1)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
def test_r0_reuse(self):
assert_equal(disassemble(NumExpr("x * x + 2", [('x', double)])),
[(b'mul_ddd', b'r0', b'r1[x]', b'r1[x]'),
(b'add_ddd', b'r0', b'r0', b'c2[2.0]')])
def test_str_contains_basic0(self):
res = evaluate('contains(b"abc", b"ab")')
assert_equal(res, True)
def test_str_contains_basic1(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(haystack, b"ab")')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic2(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(b"abcd", haystack)')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic3(self):
haystacks = array(
[b'abckkk', b'adef', b'xyz', b'x11abcp', b'za', b'abc'])
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab'])
res = evaluate('contains(haystacks, needles)')
assert_equal(res, [True, True, False, False, False, True])
def test_str_contains_basic4(self):
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab c', b' abc',
b'abc '])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, False, False, False, False, True, True])
def test_str_contains_basic5(self):
needles = array(
[b'abc', b'ab c', b' abc', b' abc ', b'\tabc', b'c h'])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, True, True, False, True])
# Compare operation of Python 'in' operator with 'contains' using a
# product of two lists of strings.
def test_str_contains_listproduct(self):
from itertools import product
small = [
'It w', 'as th', 'e Whit', 'e Rab', 'bit,', ' tro', 'tting',
' sl', 'owly', ' back ', 'again,', ' and', ' lo', 'okin', 'g a',
'nxious', 'ly a', 'bou', 't a', 's it w', 'ent,', ' as i', 'f it',
' had l', 'ost', ' some', 'thi', 'ng; a', 'nd ', 'she ', 'heard ',
'it mut', 'terin', 'g to ', 'its', 'elf ', "'The",
' Duch', 'ess! T', 'he ', 'Duches', 's! Oh ', 'my dea', 'r paws',
'! Oh ', 'my f', 'ur ', 'and ', 'whiske', 'rs! ', 'She', "'ll g",
'et me', ' ex', 'ecu', 'ted, ', 'as su', 're a', 's f', 'errets',
' are f', 'errets', '! Wh', 'ere ', 'CAN', ' I hav', 'e d',
'roppe', 'd t', 'hem,', ' I wo', 'nder?', "' A", 'lice',
' gu', 'essed', ' in a', ' mom', 'ent ', 'tha', 't it w', 'as ',
'looki', 'ng f', 'or ', 'the fa', 'n and ', 'the', ' pai',
'r of w', 'hit', 'e kid', ' glo', 'ves', ', and ', 'she ',
'very g', 'ood', '-na', 'turedl', 'y be', 'gan h', 'unt', 'ing',
' about', ' for t', 'hem', ', but', ' they ', 'wer', 'e nowh',
'ere to', ' be', ' se', 'en--', 'ever', 'ythin', 'g seem', 'ed ',
'to ', 'have c', 'hang', 'ed ', 'since', ' he', 'r swim', ' in',
' the', ' pool,', ' and', ' the g', 'reat ', 'hal', 'l, w', 'ith',
' th', 'e gl', 'ass t', 'abl', 'e and ', 'the', ' li', 'ttle',
' doo', 'r, ha', 'd v', 'ani', 'shed c', 'omp', 'lete', 'ly.']
big = [
'It wa', 's the', ' W', 'hit', 'e ', 'Ra', 'bb', 'it, t', 'ro',
'tting s', 'lowly', ' back ', 'agai', 'n, and', ' l', 'ookin',
'g ', 'an', 'xiously', ' about ', 'as it w', 'ent, as', ' if ',
'it had', ' los', 't ', 'so', 'mething', '; and', ' she h',
'eard ', 'it ', 'mutteri', 'ng to', ' itself', " 'The ",
'Duchess', '! ', 'Th', 'e ', 'Duchess', '! Oh m', 'y de',
'ar paws', '! ', 'Oh my ', 'fu', 'r and w', 'hiskers', "! She'",
'll ', 'get', ' me ', 'execute', 'd,', ' a', 's ', 'su', 're as ',
'fe', 'rrets', ' are f', 'errets!', ' Wher', 'e CAN', ' I ha',
've dro', 'pped t', 'hem', ', I ', 'won', "der?' A",
'lice g', 'uess', 'ed ', 'in a m', 'omen', 't that', ' i',
't was l', 'ook', 'ing f', 'or th', 'e ', 'fan and', ' th', 'e p',
'air o', 'f whit', 'e ki', 'd glove', 's, and ', 'she v', 'ery ',
'good-na', 'tu', 'redl', 'y be', 'gan hun', 'ti', 'ng abou',
't for t', 'he', 'm, bu', 't t', 'hey ', 'were n', 'owhere',
' to b', 'e s', 'een-', '-eve', 'rythi', 'ng see', 'me', 'd ',
'to ha', 've', ' c', 'hanged', ' sinc', 'e her s', 'wim ',
'in the ', 'pool,', ' an', 'd the g', 'rea', 't h', 'all, wi',
'th the ', 'glas', 's t', 'able an', 'd th', 'e littl', 'e door,',
' had va', 'ni', 'shed co', 'mpletel', 'y.']
p = list(product(small, big))
python_in = [x[0] in x[1] for x in p]
a = [x[0].encode() for x in p]
b = [x[1].encode() for x in p]
res = [bool(x) for x in evaluate('contains(b, a)')]
assert_equal(res, python_in)
def test_str_contains_withemptystr1(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(b"abcd", withemptystr)')
assert_equal(res, [True, False, True])
def test_str_contains_withemptystr2(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(withemptystr, b"")')
assert_equal(res, [True, True, True])
class test_numexpr2(test_numexpr):
"""Testing with 2 threads"""
nthreads = 2
class test_evaluate(TestCase):
def test_simple(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c")
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
x = arange(100.0)
y = evaluate("x")
assert_array_equal(x, y)
def test_simple_expr(self):
x = arange(1e6)
y = evaluate("x")
assert_array_equal(x, y)
# Test for issue #37
if sys.version_info[0] < 3:
# In python 3 '/' perforns true division, not integer division.
# Integer division '//' is still not suppoerted by numexpr
def test_zero_div(self):
x = arange(100, dtype='i4')
y = evaluate("1/x")
x2 = zeros(100, dtype='i4')
x2[1] = 1
assert_array_equal(x2, y)
# Test for issue #22
def test_true_div(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x/2"), x / 2)
assert_array_equal(evaluate("x/2", truediv=False), x / 2)
assert_array_equal(evaluate("x/2", truediv='auto'), x / 2)
assert_array_equal(evaluate("x/2", truediv=True), x / 2.0)
def test_left_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x<<2"), x << 2)
def test_right_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x>>2"), x >> 2)
# PyTables uses __nonzero__ among ExpressionNode objects internally
# so this should be commented out for the moment. See #24.
def test_boolean_operator(self):
x = arange(10, dtype='i4')
try:
evaluate("(x > 1) and (x < 9)")
except TypeError:
pass
else:
raise ValueError("should raise exception!")
def test_rational_expr(self):
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = evaluate("(a + 2*b) / (1 + a + 4*b*b)")
assert_array_almost_equal(x, y)
def test_complex_expr(self):
def complex(a, b):
c = zeros(a.shape, dtype=complex_)
c.real = a
c.imag = b
return c
a = arange(1e4)
b = arange(1e4) ** 1e-5
z = a + 1j * b
x = z.imag
x = sin(complex(a, b)).real + z.imag
y = evaluate("sin(complex(a, b)).real + z.imag")
assert_array_almost_equal(x, y)
def test_complex_strides(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(50).reshape(5, 10)
assert_array_equal(evaluate("a+b"), a + b)
c = empty([10], dtype=[('c1', int32), ('c2', uint16)])
c['c1'] = arange(10)
c['c2'].fill(0xaaaa)
c1 = c['c1']
a0 = a[0]
assert_array_equal(evaluate("c1"), c1)
assert_array_equal(evaluate("a0+c1"), a0 + c1)
def test_broadcasting(self):
a = arange(100).reshape(10, 10)[::2]
c = arange(10)
d = arange(5).reshape(5, 1)
assert_array_equal(evaluate("a+c"), a + c)
assert_array_equal(evaluate("a+d"), a + d)
expr = NumExpr("2.0*a+3.0*c", [('a', double), ('c', double)])
assert_array_equal(expr(a, c), 2.0 * a + 3.0 * c)
def test_all_scalar(self):
a = 3.
b = 4.
assert_allclose(evaluate("a+b"), a + b)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_equal(expr(a, b), 2 * a + 3 * b)
def test_run(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(10)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_array_equal(expr(a, b), expr.run(a, b))
def test_illegal_value(self):
a = arange(3)
try:
evaluate("a < [0, 0, 0]")
except TypeError:
pass
else:
self.fail()
if 'sparc' not in platform.machine():
# Execution order set here so as to not use too many threads
# during the rest of the execution. See #33 for details.
def test_changing_nthreads_00_inc(self):
a = linspace(-1, 1, 1e6)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(1, 7):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
def test_changing_nthreads_01_dec(self):
a = linspace(-1, 1, 1e6)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(6, 1, -1):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
tests = [
('MISC', ['b*c+d*e',
'2*a+3*b',
'-a',
'sinh(a)',
'2*a + (cos(3)+5)*sinh(cos(b))',
'2*a + arctan2(a, b)',
'arcsin(0.5)',
'where(a != 0.0, 2, a)',
'where(a > 10, b < a, b > a)',
'where((a-10).real != 0.0, a, 2)',
'0.25 * (a < 5) + 0.33 * (a >= 5)',
'cos(1+1)',
'1+1',
'1',
'cos(a2)',
])]
optests = []
for op in list('+-*/%') + ['**']:
optests.append("(a+1) %s (b+3)" % op)
optests.append("3 %s (b+3)" % op)
optests.append("(a+1) %s 4" % op)
optests.append("2 %s (b+3)" % op)
optests.append("(a+1) %s 2" % op)
optests.append("(a+1) %s -1" % op)
optests.append("(a+1) %s 0.5" % op)
# Check divisions and modulus by zero (see ticket #107)
optests.append("(a+1) %s 0" % op)
tests.append(('OPERATIONS', optests))
cmptests = []
for op in ['<', '<=', '==', '>=', '>', '!=']:
cmptests.append("a/2+5 %s b" % op)
cmptests.append("a/2+5 %s 7" % op)
cmptests.append("7 %s b" % op)
cmptests.append("7.0 %s 5" % op)
tests.append(('COMPARISONS', cmptests))
func1tests = []
for func in ['copy', 'ones_like', 'sqrt',
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
'log', 'log1p', 'log10', 'exp', 'expm1', 'abs', 'conj']:
func1tests.append("a + %s(b+c)" % func)
tests.append(('1_ARG_FUNCS', func1tests))
func2tests = []
for func in ['arctan2', 'fmod']:
func2tests.append("a + %s(b+c, d+1)" % func)
func2tests.append("a + %s(b+c, 1)" % func)
func2tests.append("a + %s(1, d+1)" % func)
tests.append(('2_ARG_FUNCS', func2tests))
powtests = []
# n = -1, 0.5, 2, 4 already handled in section "OPERATIONS"
for n in (-7, -2.5, -1.5, -1.3, -.5, 0, 0.0, 1, 2.3, 2.5, 3):
powtests.append("(a+1)**%s" % n)
tests.append(('POW_TESTS', powtests))
def equal(a, b, exact):
if array_equal(a, b):
return True
if hasattr(a, 'dtype') and a.dtype in ['f4', 'f8']:
nnans = isnan(a).sum()
if nnans > 0:
# For results containing NaNs, just check that the number
# of NaNs is the same in both arrays. This check could be
# made more exhaustive, but checking element by element in
# python space is very expensive in general.
return nnans == isnan(b).sum()
ninfs = isinf(a).sum()
if ninfs > 0:
# Ditto for Inf's
return ninfs == isinf(b).sum()
if exact:
return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0)
else:
if hasattr(a, 'dtype') and a.dtype == 'f4':
atol = 1e-5 # Relax precission for special opcodes, like fmod
else:
atol = 1e-8
return (shape(a) == shape(b) and
allclose(ravel(a), ravel(b), atol=atol))
class Skip(Exception): pass
def test_expressions():
test_no = [0]
def make_test_method(a, a2, b, c, d, e, x, expr,
test_scalar, dtype, optimization, exact, section):
this_locals = locals()
def method():
# We don't want to listen at RuntimeWarnings like
# "overflows" or "divide by zero" in plain eval().
warnings.simplefilter("ignore")
npval = eval(expr, globals(), this_locals)
warnings.simplefilter("always")
npval = eval(expr, globals(), this_locals)
try:
neval = evaluate(expr, local_dict=this_locals,
optimization=optimization)
assert equal(npval, neval, exact), """%r
(test_scalar=%r, dtype=%r, optimization=%r, exact=%r,
npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (expr, test_scalar, dtype.__name__,
optimization, exact,
npval, type(npval), shape(npval),
neval, type(neval), shape(neval))
except AssertionError:
raise
except NotImplementedError:
print('%r not implemented for %s (scalar=%d, opt=%s)'
% (expr, dtype.__name__, test_scalar, optimization))
except:
print('numexpr error for expression %r' % (expr,))
raise
method.description = ('test_expressions(%s, test_scalar=%r, '
'dtype=%r, optimization=%r, exact=%r)') \
% (expr, test_scalar, dtype.__name__, optimization, exact)
test_no[0] += 1
method.__name__ = 'test_scalar%d_%s_%s_%s_%04d' % (test_scalar,
dtype.__name__,
optimization.encode('ascii'),
section.encode('ascii'),
test_no[0])
return method
x = None
for test_scalar in (0, 1, 2):
for dtype in (int, long, numpy.float32, double, complex):
array_size = 100
a = arange(2 * array_size, dtype=dtype)[::2]
a2 = zeros([array_size, array_size], dtype=dtype)
b = arange(array_size, dtype=dtype) / array_size
c = arange(array_size, dtype=dtype)
d = arange(array_size, dtype=dtype)
e = arange(array_size, dtype=dtype)
if dtype == complex:
a = a.real
for x in [a2, b, c, d, e]:
x += 1j
x *= 1 + 1j
if test_scalar == 1:
a = a[array_size // 2]
if test_scalar == 2:
b = b[array_size // 2]
for optimization, exact in [
('none', False), ('moderate', False), ('aggressive', False)]:
for section_name, section_tests in tests:
for expr in section_tests:
if (dtype == complex and
('<' in expr or '>' in expr or '%' in expr
or "arctan2" in expr or "fmod" in expr)):
# skip complex comparisons or functions not
# defined in complex domain.
continue
if (dtype in (int, long) and test_scalar and
expr == '(a+1) ** -1'):
continue
m = make_test_method(a, a2, b, c, d, e, x,
expr, test_scalar, dtype,
optimization, exact,
section_name)
yield m
class test_int64(TestCase):
def test_neg(self):
a = array([2 ** 31 - 1, 2 ** 31, 2 ** 32, 2 ** 63 - 1], dtype=int64)
res = evaluate('-a')
assert_array_equal(res, [1 - 2 ** 31, -(2 ** 31), -(2 ** 32), 1 - 2 ** 63])
self.assertEqual(res.dtype.name, 'int64')
class test_int32_int64(TestCase):
if sys.version_info[0] < 2:
# no long literals in python 3
def test_small_long(self):
# Small longs should not be downgraded to ints.
res = evaluate('42L')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_small_int(self):
# Small ints (32-bit ones) should not be promoted to longs.
res = evaluate('2')
assert_array_equal(res, 2)
self.assertEqual(res.dtype.name, 'int32')
def test_big_int(self):
# Big ints should be promoted to longs.
res = evaluate('2**40')
assert_array_equal(res, 2 ** 40)
self.assertEqual(res.dtype.name, 'int64')
def test_long_constant_promotion(self):
int32array = arange(100, dtype='int32')
itwo = numpy.int32(2)
ltwo = numpy.int64(2)
res = int32array * 2
res32 = evaluate('int32array * itwo')
res64 = evaluate('int32array * ltwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
int32array = arange(100, dtype='int32')
int64array = arange(100, dtype='int64')
respy = int32array * int64array
resnx = evaluate('int32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_uint32_int64(TestCase):
def test_small_uint32(self):
# Small uint32 should not be downgraded to ints.
a = numpy.uint32(42)
res = evaluate('a')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_uint32_constant_promotion(self):
int32array = arange(100, dtype='int32')
stwo = numpy.int32(2)
utwo = numpy.uint32(2)
res = int32array * utwo
res32 = evaluate('int32array * stwo')
res64 = evaluate('int32array * utwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
uint32array = arange(100, dtype='uint32')
int64array = arange(100, dtype='int64')
respy = uint32array * int64array
resnx = evaluate('uint32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_strings(TestCase):
BLOCK_SIZE1 = 128
BLOCK_SIZE2 = 8
str_list1 = [b'foo', b'bar', b'', b' ']
str_list2 = [b'foo', b'', b'x', b' ']
str_nloops = len(str_list1) * (BLOCK_SIZE1 + BLOCK_SIZE2 + 1)
str_array1 = array(str_list1 * str_nloops)
str_array2 = array(str_list2 * str_nloops)
str_constant = b'doodoo'
def test_null_chars(self):
str_list = [
b'\0\0\0', b'\0\0foo\0', b'\0\0foo\0b', b'\0\0foo\0b\0',
b'foo\0', b'foo\0b', b'foo\0b\0', b'foo\0bar\0baz\0\0']
for s in str_list:
r = evaluate('s')
self.assertEqual(s, r.tostring()) # check *all* stored data
def test_compare_copy(self):
sarr = self.str_array1
expr = 'sarr'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 >= sarr2'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_variable(self):
sarr = self.str_array1
svar = self.str_constant
expr = 'sarr >= svar'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_constant(self):
sarr = self.str_array1
expr = 'sarr >= %r' % self.str_constant
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_add_string_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 + sarr2'
self.assert_missing_op('add_sss', expr, locals())
def test_add_numeric_array(self):
sarr = self.str_array1
narr = arange(len(sarr), dtype='int32')
expr = 'sarr >= narr'
self.assert_missing_op('ge_bsi', expr, locals())
def assert_missing_op(self, op, expr, local_dict):
msg = "expected NotImplementedError regarding '%s'" % op
try:
evaluate(expr, local_dict)
except NotImplementedError, nie:
if "'%s'" % op not in nie.args[0]:
self.fail(msg)
else:
self.fail(msg)
def test_compare_prefix(self):
# Check comparing two strings where one is a prefix of the
# other.
for s1, s2 in [(b'foo', b'foobar'), (b'foo', b'foo\0bar'),
(b'foo\0a', b'foo\0bar')]:
self.assertTrue(evaluate('s1 < s2'))
self.assertTrue(evaluate('s1 <= s2'))
self.assertTrue(evaluate('~(s1 == s2)'))
self.assertTrue(evaluate('~(s1 >= s2)'))
self.assertTrue(evaluate('~(s1 > s2)'))
# Check for NumPy array-style semantics in string equality.
s1, s2 = b'foo', b'foo\0\0'
self.assertTrue(evaluate('s1 == s2'))
# Case for testing selections in fields which are aligned but whose
# data length is not an exact multiple of the length of the record.
# The following test exposes the problem only in 32-bit machines,
# because in 64-bit machines 'c2' is unaligned. However, this should
# check most platforms where, while not unaligned, 'len(datatype) >
# boundary_alignment' is fullfilled.
class test_irregular_stride(TestCase):
def test_select(self):
f0 = arange(10, dtype=int32)
f1 = arange(10, dtype=float64)
irregular = rec.fromarrays([f0, f1])
f0 = irregular['f0']
f1 = irregular['f1']
i0 = evaluate('f0 < 5')
i1 = evaluate('f1 < 5')
assert_array_equal(f0[i0], arange(5, dtype=int32))
assert_array_equal(f1[i1], arange(5, dtype=float64))
# Cases for testing arrays with dimensions that can be zero.
class test_zerodim(TestCase):
def test_zerodim1d(self):
a0 = array([], dtype=int32)
a1 = array([], dtype=float64)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
def test_zerodim3d(self):
a0 = array([], dtype=int32).reshape(0, 2, 4)
a1 = array([], dtype=float64).reshape(0, 2, 4)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
# Case test for threads
class test_threading(TestCase):
def test_thread(self):
import threading
class ThreadTest(threading.Thread):
def run(self):
a = arange(3)
assert_array_equal(evaluate('a**3'), array([0, 1, 8]))
test = ThreadTest()
test.start()
# The worker function for the subprocess (needs to be here because Windows
# has problems pickling nested functions with the multiprocess module :-/)
def _worker(qout=None):
ra = numpy.arange(1e3)
rows = evaluate('ra > 0')
#print "Succeeded in evaluation!\n"
if qout is not None:
qout.put("Done")
# Case test for subprocesses (via multiprocessing module)
class test_subprocess(TestCase):
def test_multiprocess(self):
try:
import multiprocessing as mp
except ImportError:
return
# Check for two threads at least
numexpr.set_num_threads(2)
#print "**** Running from main process:"
_worker()
#print "**** Running from subprocess:"
qout = mp.Queue()
ps = mp.Process(target=_worker, args=(qout,))
ps.daemon = True
ps.start()
result = qout.get()
#print result
def print_versions():
"""Print the versions of software that numexpr relies on."""
if numpy.__version__ < minimum_numpy_version:
print("*Warning*: NumPy version is lower than recommended: %s < %s" % \
(numpy.__version__, minimum_numpy_version))
print('-=' * 38)
print("Numexpr version: %s" % numexpr.__version__)
print("NumPy version: %s" % numpy.__version__)
print('Python version: %s' % sys.version)
if os.name == 'posix':
(sysname, nodename, release, version, machine) = os.uname()
print('Platform: %s-%s' % (sys.platform, machine))
print("AMD/Intel CPU? %s" % numexpr.is_cpu_amd_intel)
print("VML available? %s" % use_vml)
if use_vml:
print("VML/MKL version: %s" % numexpr.get_vml_version())
print("Number of threads used by default: %d "
"(out of %d detected cores)" % (numexpr.nthreads, numexpr.ncores))
print('-=' * 38)
def test():
"""
Run all the tests in the test suite.
"""
print_versions()
return unittest.TextTestRunner().run(suite())
test.__test__ = False
def suite():
import unittest
import platform as pl
theSuite = unittest.TestSuite()
niter = 1
class TestExpressions(TestCase):
pass
def add_method(func):
def method(self):
return func()
setattr(TestExpressions, func.__name__,
method.__get__(None, TestExpressions))
for func in test_expressions():
add_method(func)
for n in range(niter):
theSuite.addTest(unittest.makeSuite(test_numexpr))
if 'sparc' not in platform.machine():
theSuite.addTest(unittest.makeSuite(test_numexpr2))
theSuite.addTest(unittest.makeSuite(test_evaluate))
theSuite.addTest(unittest.makeSuite(TestExpressions))
theSuite.addTest(unittest.makeSuite(test_int32_int64))
theSuite.addTest(unittest.makeSuite(test_uint32_int64))
theSuite.addTest(unittest.makeSuite(test_strings))
theSuite.addTest(
unittest.makeSuite(test_irregular_stride))
theSuite.addTest(unittest.makeSuite(test_zerodim))
# multiprocessing module is not supported on Hurd/kFreeBSD
if (pl.system().lower() not in ('gnu', 'gnu/kfreebsd')):
theSuite.addTest(unittest.makeSuite(test_subprocess))
# I need to put this test after test_subprocess because
# if not, the test suite locks immediately before test_subproces.
# This only happens with Windows, so I suspect of a subtle bad
# interaction with threads and subprocess :-/
theSuite.addTest(unittest.makeSuite(test_threading))
return theSuite
if __name__ == '__main__':
print_versions()
unittest.main(defaultTest='suite')
# suite = suite()
# unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# -*- coding: utf-8 -*-
# _____________________________________________________________________________
#
# Copyright (c) 2012 Berlin Institute of Technology
# All rights reserved.
#
# Developed by: Neural Information Processing Group (NI)
# School for Electrical Engineering and Computer Science
# Berlin Institute of Technology
# MAR 5-6, Marchstr. 23, 10587 Berlin, Germany
# http://www.ni.tu-berlin.de/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# * Neither the names of Neural Information Processing Group (NI), Berlin
# Institute of Technology, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# WITH THE SOFTWARE.
#_____________________________________________________________________________
#
# Acknowledgements:
# Philipp Meier <[email protected]>
#_____________________________________________________________________________
#
##---IMPORTS
try:
import unittest2 as ut
except ImportError:
import unittest as ut
from numpy.testing import assert_equal, assert_almost_equal
import scipy as sp
from botmpy.common.mcfilter.mcfilter_cy import (
_mcfilter_cy32, _mcfilter_cy64, _mcfilter_hist_cy32, _mcfilter_hist_cy64)
from botmpy.common.mcfilter.mcfilter_py import (
_mcfilter_py, _mcfilter_hist_py)
##---TESTS
class TestMcFilter(ut.TestCase):
def testHistoryCy32(self):
"""test history item"""
tf = 3
nc = 2
data = sp.randn(100, nc).astype(sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32)
hist = sp.zeros((tf - 1, nc), dtype=sp.float32)
fout, hist = _mcfilter_hist_cy32(data, filt, hist)
assert_equal(hist, data[-(tf - 1):])
def testHistoryCy64(self):
"""test history item"""
tf = 3
nc = 2
data = sp.randn(100, nc).astype(sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64)
hist = sp.zeros((tf - 1, nc), dtype=sp.float64)
fout, hist = _mcfilter_hist_cy64(data, filt, hist)
assert_equal(hist, data[-(tf - 1):])
def testPyVsCyOnesCy32(self):
"""test python and cython, float"""
tf = 3
nc = 2
data = sp.ones((20, nc), dtype=sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32)
hist = sp.ones((tf - 1, nc), dtype=sp.float32)
fopy, hopy = _mcfilter_hist_py(data, filt, hist)
focy, hocy = _mcfilter_hist_cy32(data, filt, hist)
assert_almost_equal(fopy, focy)
def testPyVsCyOnesCy64(self):
"""test python and cython, double"""
tf = 3
nc = 2
data = sp.ones((20, nc), dtype=sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64)
hist = sp.ones((tf - 1, nc), dtype=sp.float64)
fopy, hopy = _mcfilter_hist_py(data, filt, hist)
focy, hocy = _mcfilter_hist_cy64(data, filt, hist)
assert_almost_equal(fopy, focy)
def testPyVsCyRandnCy32(self):
"""test python and cython"""
tf = 3
nc = 2
data = sp.randn(20, nc).astype(sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float32)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float32)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy32(data, filt, hist_cy)
assert_almost_equal(fopy, focy, decimal=5)
def testPyVsCyRandnCy64(self):
"""test python and cython"""
tf = 3
nc = 2
data = sp.randn(20, nc).astype(sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float64)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float64)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy64(data, filt, hist_cy)
assert_almost_equal(fopy, focy)
def testStepsCy32(self):
tf = 3
nc = 2
data = sp.vstack([sp.concatenate(
[sp.arange(1, 4)] * 5)] * 2).T.astype(sp.float32)
filt = sp.ones((tf, nc), dtype=sp.float32) / float(tf)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float32)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float32)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy32(data, filt, hist_cy)
assert_almost_equal(fopy, focy)
def testStepsCy64(self):
tf = 3
nc = 2
data = sp.vstack([sp.concatenate(
[sp.arange(1, 4)] * 5)] * 2).T.astype(sp.float64)
filt = sp.ones((tf, nc), dtype=sp.float64) / float(tf)
hist_py = sp.ones((tf - 1, nc), dtype=sp.float64)
hist_cy = sp.ones((tf - 1, nc), dtype=sp.float64)
fopy, hopy = _mcfilter_hist_py(data, filt, hist_py)
focy, hocy = _mcfilter_hist_cy64(data, filt, hist_cy)
assert_almost_equal(fopy, focy)
def testDataConcatenationCy32(self):
data = sp.zeros((100, 1), dtype=sp.float32)
data[sp.arange(0, 100, 10)] = 1.0
filt = sp.zeros((5, 1), dtype=sp.float32)
filt[2] = 1.0
hist = sp.zeros((4, 1), dtype=sp.float32)
fout = _mcfilter_hist_cy32(data, filt, hist)[0]
cut = int(sp.floor(5.0 / 2))
assert_equal(data[:-cut], sp.array([fout[cut:]]).T)
def testDataConcatenationCy64(self):
data = sp.zeros((100, 1), dtype=sp.float64)
data[sp.arange(0, 100, 10)] = 1.0
filt = sp.zeros((5, 1), dtype=sp.float64)
filt[2] = 1.0
hist = sp.zeros((4, 1), dtype=sp.float64)
fout = _mcfilter_hist_cy64(data, filt, hist)[0]
cut = int(sp.floor(5.0 / 2))
assert_equal(data[:-cut], sp.array([fout[cut:]]).T)
def testMcfilterRecoveryPy(self):
data = sp.zeros((100, 1), dtype=sp.float64)
data[sp.arange(0, 100, 10)] = 1.0
filt = sp.zeros((5, 1), dtype=sp.float64)
filt[2] = 1.0
fout = _mcfilter_py(data, filt)
self.assertTupleEqual(data.shape, (fout.shape[0], 1))
assert_equal(data, sp.array([fout]).T)
"""
def mcfilter_hist_py_test(inp=None, plot=False):
if inp is None:
# test setup
TF = 10
NC = 2
xi = sp.vstack([sp.sin(sp.linspace(0, 2 * sp.pi,
TF))] * NC).T * 5
LEN = 2000
noise = sp.randn(LEN, NC)
# build signal
signal = sp.zeros_like(noise)
NPOS = 3
POS = [int(i * LEN / (NPOS + 1)) for i in xrange(1, NPOS + 1)]
for i in xrange(NPOS):
signal[POS[i]:POS[i] + TF] += xi
x = signal + noise
else:
x, xi = inp
TF, NC = xi.shape
ns = x.shape[0]
step = 200
chunks = [x[i * step:(i + 1) * step] for i in xrange(ns / step)]
fouts = []
h = None
for chunk in chunks:
r, h = _mcfilter_hist_py(chunk, xi, h)
fouts.append(r)
if plot:
from spikeplot import mcdata
other = sp.atleast_2d(sp.concatenate(fouts)).T
other = sp.vstack([other, sp.zeros((int(TF / 2 - 1), 1))])[
int(TF / 2 - 1):, :]
mcdata(x, other=other)
def mcfilter_hist_c_test(inp=None, plot=False):
if _mcfilter_hist_cy is None:
print 'No clib loaded! returning'
return
if inp is None:
# test setup
TF = 10
NC = 2
xi = sp.vstack([sp.sin(sp.linspace(0, 2 * sp.pi,
TF))] * NC).T * 5
LEN = 2000
noise = sp.randn(LEN, NC)
# build signal
signal = sp.zeros_like(noise)
NPOS = 3
POS = [int(i * LEN / (NPOS + 1)) for i in xrange(1, NPOS + 1)]
for i in xrange(NPOS):
signal[POS[i]:POS[i] + TF] += xi
x = signal + noise
else:
x, xi = inp
ns = x.shape[0]
step = 200
chunks = [x[i * step:(i + 1) * step] for i in xrange(ns / step)]
fouts = []
h = sp.zeros((xi.shape[0], xi.shape[1]), dtype=sp.float32)
# r = sp.array([0] * ns, dtype=sp.float32)
for chunk in chunks:
r, h = _mcfilter_hist_cy(chunk, sp.ascontiguousarray(xi), h)
fouts.append(r)
if plot:
from spikeplot import mcdata
mcdata(x, other=sp.atleast_2d(sp.concatenate(fouts)).T)
def gen_data(ns=200000, nc=4, tf=65):
# test setup
xi = sp.vstack([sp.sin(sp.linspace(0, 2 * sp.pi, tf))] * nc).T * 7
signal = sp.randn(ns, nc).astype(sp.float32)
# build signal
pos = [50 + i for i in xrange(1, ns, 4 * tf - 50)]
if pos[-1] + tf > ns:
pos.pop(-1)
for i in xrange(len(pos)):
signal[pos[i]:pos[i] + tf, :] += xi
return signal, tf, nc, xi.astype(sp.float32)
if __name__ == '__main__':
# generate some data
sig, tf, nc, xi = gen_data(64000)
# python conventional test
mcfilter_hist_py_test((sig, xi), plot=True)
mcfilter_hist_c_test((sig, xi), plot=True)
# import cProfile
# cProfile.run('mcfilter_hist_py_test((sig, xi), plot=False)')
# cProfile.run('mcfilter_hist_c_test((sig, xi), plot=False)')
"""
if __name__ == '__main__':
ut.main()
|
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.reqctx
~~~~~~~~~~~~~~~~~~~~~~
Tests the request context.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
try:
from greenlet import greenlet
except ImportError:
greenlet = None
from flask.testsuite import FlaskTestCase
class RequestContextTestCase(FlaskTestCase):
def test_teardown_on_pop(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
self.assert_equal(buffer, [])
ctx.pop()
self.assert_equal(buffer, [None])
def test_teardown_with_previous_exception(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
try:
raise Exception('dummy')
except Exception:
pass
with app.test_request_context():
self.assert_equal(buffer, [])
self.assert_equal(buffer, [None])
def test_proper_test_request_context(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception as e:
self.assert_true(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
def test_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
@app.route('/meh')
def meh():
return flask.request.url
with app.test_request_context('/?name=World'):
self.assert_equal(index(), 'Hello World!')
with app.test_request_context('/meh'):
self.assert_equal(meh(), 'http://localhost/meh')
self.assert_true(flask._request_ctx_stack.top is None)
def test_context_test(self):
app = flask.Flask(__name__)
self.assert_false(flask.request)
self.assert_false(flask.has_request_context())
ctx = app.test_request_context()
ctx.push()
try:
self.assert_true(flask.request)
self.assert_true(flask.has_request_context())
finally:
ctx.pop()
def test_manual_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
ctx = app.test_request_context('/?name=World')
ctx.push()
self.assert_equal(index(), 'Hello World!')
ctx.pop()
try:
index()
except RuntimeError:
pass
else:
self.assert_true(0, 'expected runtime error')
def test_greenlet_context_copying(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
def g():
self.assert_false(flask.request)
self.assert_false(flask.current_app)
with reqctx:
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
self.assert_false(flask.request)
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
def test_greenlet_context_copying_api(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
@flask.copy_current_request_context
def g():
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
# Disable test if we don't have greenlets available
if greenlet is None:
test_greenlet_context_copying = None
test_greenlet_context_copying_api = None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestContextTestCase))
return suite
|
|
import copy
import datetime
import pytest
from anchore_engine.common.models.policy_engine import (
Artifact,
FixedArtifact,
Match,
NVDReference,
Vulnerability,
VulnerabilityMatch,
)
from anchore_engine.services.policy_engine.engine.vulns.dedup import (
FeedGroupRank,
ImageVulnerabilitiesDeduplicator,
RankedVulnerabilityMatch,
VulnerabilityIdentity,
transfer_vulnerability_timestamps,
)
class TestFeedGroupRank:
@pytest.mark.parametrize(
"test_group, expected_rank",
[
pytest.param("nvdv2:cves", 1, id="nvdv2"),
pytest.param("nvd:cves", 2, id="nvd"),
pytest.param("github:java", 10, id="github"),
pytest.param("alpine:3.9", 100, id="os-distro"),
pytest.param("foobar", 100, id="random"),
],
)
def test_get(self, test_group, expected_rank):
assert FeedGroupRank().get(test_group) == expected_rank
class TestVulnerabilityIdentity:
@pytest.mark.parametrize(
"test_input",
[
pytest.param(
[NVDReference(vulnerability_id="CVE-abc")],
id="single-nvd",
),
pytest.param(
[
NVDReference(vulnerability_id="CVE-abc"),
NVDReference(vulnerability_id="CVE-def"),
NVDReference(vulnerability_id="CVE-ghi"),
],
id="multiple-nvd",
),
],
)
def test_from_with_nvd(self, test_input):
match = VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
)
match.nvd = test_input
identity_objects = VulnerabilityIdentity.from_match(match)
assert identity_objects
assert isinstance(identity_objects, list) and len(identity_objects) == len(
test_input
)
for identity_object, input_nvd in zip(identity_objects, test_input):
assert identity_object.vuln_id == input_nvd.vulnerability_id
assert identity_object.pkg_name == match.artifact.name
assert identity_object.pkg_type == match.artifact.pkg_type
assert identity_object.pkg_version == match.artifact.version
assert identity_object.pkg_path == match.artifact.location
def test_from_without_nvd(self):
match = VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
nvd=[],
)
identity_objects = VulnerabilityIdentity.from_match(match)
assert identity_objects
assert isinstance(identity_objects, list) and len(identity_objects) == 1
identity_object = identity_objects[0]
assert identity_object.vuln_id == match.vulnerability.vulnerability_id
assert identity_object.pkg_name == match.artifact.name
assert identity_object.pkg_type == match.artifact.pkg_type
assert identity_object.pkg_version == match.artifact.version
assert identity_object.pkg_path == match.artifact.location
@pytest.mark.parametrize(
"lhs, rhs, expected",
[
pytest.param(
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:chameleon",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
VulnerabilityMatch(
Vulnerability(
feed="hedgehog",
feed_group="hedgy:thorny",
vulnerability_id="foo",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
True,
id="equal-different-namespaces",
),
pytest.param(
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:chameleon",
vulnerability_id="meh",
),
nvd=[
NVDReference(vulnerability_id="CVE-abc"),
NVDReference(vulnerability_id="CVE-def"),
NVDReference(vulnerability_id="CVE-ghi"),
],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="hedgehog",
feed_group="hedgy:thorny",
vulnerability_id="foo",
),
nvd=[
NVDReference(vulnerability_id="CVE-abc"),
NVDReference(vulnerability_id="CVE-def"),
NVDReference(vulnerability_id="CVE-ghi"),
],
),
True,
id="equal-multiple-cvss",
),
pytest.param(
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:chameleon",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="hedgehog",
feed_group="hedgy:thorny",
vulnerability_id="foo",
),
nvd=[NVDReference(vulnerability_id="CVE-def")],
),
False,
id="not-equal",
),
],
)
def test_equality_constant_artifact(self, lhs, rhs, expected):
artifact = Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
)
lhs.artifact = artifact
rhs.artifact = artifact
assert (
VulnerabilityIdentity.from_match(lhs)
== VulnerabilityIdentity.from_match(rhs)
) == expected
@pytest.mark.parametrize("count", [1, 2, 3, 4, 5])
def test_hash(self, count):
record = VulnerabilityIdentity(
vuln_id="meh",
pkg_name="blah",
pkg_version="1.2.3maven",
pkg_type="java",
pkg_path="blah",
)
test_input = [record for x in range(count)]
result = set(test_input)
assert result and len(result) == 1
class TestRankedVulnerabilityMatch:
def test_from(self):
match = VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
)
rank_strategy = FeedGroupRank()
ranked_match = RankedVulnerabilityMatch.from_match(match, FeedGroupRank())
assert ranked_match
assert ranked_match.vuln_id == match.vulnerability.vulnerability_id
assert ranked_match.vuln_namespace == match.vulnerability.feed_group
assert ranked_match.pkg_name == match.artifact.name
assert ranked_match.pkg_type == match.artifact.pkg_type
assert ranked_match.pkg_version == match.artifact.version
assert ranked_match.pkg_path == match.artifact.location
assert ranked_match.rank == rank_strategy.__default__
@pytest.mark.parametrize(
"lhs, rhs, expected",
[
pytest.param(
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:chameleon",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="hedgehog",
feed_group="hedgy:thorny",
vulnerability_id="foo",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
False,
id="not-equal-different-ids",
),
pytest.param(
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:chameleon",
vulnerability_id="meh",
),
nvd=[
NVDReference(vulnerability_id="CVE-abc"),
NVDReference(vulnerability_id="CVE-def"),
NVDReference(vulnerability_id="CVE-ghi"),
],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:chameleon",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
True,
id="equal-different-cvss",
),
pytest.param(
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:chameleon",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="trusty",
feed_group="trusty:python",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
False,
id="not-equal-different-namespaces",
),
],
)
def test_equality_constant_artifact(self, lhs, rhs, expected):
artifact = Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
)
lhs.artifact = artifact
rhs.artifact = artifact
assert (
RankedVulnerabilityMatch.from_match(lhs, FeedGroupRank())
== RankedVulnerabilityMatch.from_match(rhs, FeedGroupRank())
) == expected
@pytest.mark.parametrize("count", [1, 2, 3, 4, 5])
def test_hash_empty_match(self, count):
record = RankedVulnerabilityMatch(
vuln_id="meh",
vuln_namespace="trusty:chameleon",
pkg_name="blah",
pkg_version="1.2.3maven",
pkg_type="java",
pkg_path="blah",
rank=100,
match_obj=VulnerabilityMatch(),
)
test_input = [record for x in range(count)]
result = set(test_input)
assert result and len(result) == 1
@pytest.mark.parametrize(
"test_input",
[
pytest.param(
[
VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="twisty",
feed_group="twisty:python",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
VulnerabilityMatch(
artifact=Artifact(
name="foo",
location="/usr/local/java/foo",
pkg_type="unknown",
version="1.2.3",
),
vulnerability=Vulnerability(
feed="tricky",
feed_group="tricky:chameleon",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-def")],
),
],
id="different-matches",
),
pytest.param(
[
VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="twisty",
feed_group="twisty:python",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-abc")],
),
]
* 3,
id="same-matches",
),
],
)
def test_hash(self, test_input):
vuln_rank_objects = [
RankedVulnerabilityMatch(
vuln_id="meh",
vuln_namespace="trusty:chameleon",
pkg_name="blah",
pkg_version="1.2.3maven",
pkg_type="java",
pkg_path="/usr/local/blah",
rank=100,
match_obj=item,
)
for item in test_input
]
result = set(vuln_rank_objects)
assert result and len(result) == 1
result = list(result)[0]
assert result.vuln_id == "meh"
assert result.vuln_namespace == "trusty:chameleon"
assert result.pkg_name == "blah"
assert result.pkg_type == "java"
assert result.pkg_path == "/usr/local/blah"
assert result.rank == 100
class TestImageVulnerabilitiesDeduplicator:
@pytest.mark.parametrize(
"test_input, expected_index",
[
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvdv2:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="ubuntu:20.04",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
],
1,
id="different-namespaces",
),
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvdv2:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="github:java",
vulnerability_id="GHSA-foobar",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
],
1,
id="different-identifiers",
),
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="github:java",
vulnerability_id="GHSA-foobar",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="ubuntu:20.04",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
],
1,
id="non-nvd-namespaces",
),
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvdv2:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="ubuntu:20.04",
vulnerability_id="CVE-2019-12904",
),
nvd=[],
),
],
1,
id="no-nvd-refs",
),
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvdv2:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12345")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvdv2:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="github:java",
vulnerability_id="GHSA-foobar",
),
nvd=[
NVDReference(vulnerability_id="CVE-2019-12904"),
NVDReference(vulnerability_id="CVE-2019-12345"),
],
),
],
2,
id="multiple-nvd-refs",
),
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvdv2:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvd:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
],
1,
id="nvdv2-vs-nvd",
),
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="github:java",
vulnerability_id="GHSA-foobar",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="nvd:cves",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
],
0,
id="ghsa-vs-nvd",
),
pytest.param(
[
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="github:java",
vulnerability_id="GHSA-foobar",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
VulnerabilityMatch(
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="custom-feed:custom",
vulnerability_id="CVE-2019-12904",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
),
],
1,
id="ghsa-vs-custom-feed",
),
],
)
def test_execute(self, test_input, expected_index):
artifact = Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
)
for item in test_input:
item.artifact = artifact
results = ImageVulnerabilitiesDeduplicator(FeedGroupRank()).execute(test_input)
assert len(results) == 1
actual = results[0].vulnerability
expected = test_input[expected_index]
assert actual.vulnerability_id == expected.vulnerability.vulnerability_id
assert actual.feed_group == expected.vulnerability.feed_group
@pytest.mark.parametrize("count", [1, 2, 3, 4, 5])
def test_execute_absolute_duplicates(self, count):
a = VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
nvd=[NVDReference(vulnerability_id="CVE-2019-12904")],
)
input_matches = [a for x in range(count)]
results = ImageVulnerabilitiesDeduplicator(FeedGroupRank()).execute(
input_matches
)
assert len(results) == 1
@pytest.mark.parametrize(
"test_input",
[pytest.param([], id="empty-list"), pytest.param(None, id="none")],
)
def test_execute_invalid_input(self, test_input):
assert (
ImageVulnerabilitiesDeduplicator(FeedGroupRank()).execute(test_input)
== list()
)
class TestTimestampMerger:
@pytest.mark.parametrize(
"test_source, test_destination, expected",
[
pytest.param([], [], [], id="empty"),
pytest.param(None, None, [], id="none"),
pytest.param([], None, [], id="destination-none"),
pytest.param(None, [], [], id="source-none"),
],
)
def test_transfer_vulnerability_timestamps_invalid_input(
self, test_source, test_destination, expected
):
assert (
transfer_vulnerability_timestamps(
source=test_source, destination=test_destination
)
== expected
)
@pytest.mark.parametrize(
"test_source, test_destination",
[
pytest.param(
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(days=1),
id="source-behind-destination",
),
pytest.param(
datetime.datetime.utcnow() + datetime.timedelta(days=1),
datetime.datetime.utcnow(),
id="source-ahead-destination",
),
],
)
def test_transfer_vulnerability_timestamps_single(
self, test_source, test_destination
):
random = VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
fix=FixedArtifact(),
)
source = copy.deepcopy(random)
source.match = Match(detected_at=test_source)
destination = copy.deepcopy(random)
destination.match = Match(detected_at=test_destination)
results = transfer_vulnerability_timestamps(
source=[source], destination=[destination]
)
assert results and len(results) == 1
assert results[0].match.detected_at == test_source
def test_transfer_vulnerability_timestamps_multiple(self):
dest_ts = datetime.datetime.utcnow()
src_ts = datetime.datetime.utcnow() - datetime.timedelta(days=1)
destination = [
VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
match=Match(detected_at=dest_ts),
fix=FixedArtifact(),
),
VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="foo",
),
match=Match(detected_at=dest_ts),
fix=FixedArtifact(),
),
]
source = [
VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
match=Match(detected_at=src_ts),
fix=FixedArtifact(),
)
]
results = transfer_vulnerability_timestamps(
source=source, destination=destination
)
assert results and len(results) == 2
for result in results:
if (
result.vulnerability.vulnerability_id
== source[0].vulnerability.vulnerability_id
):
assert result.match.detected_at == src_ts
else:
assert result.match.detected_at == dest_ts
@pytest.mark.parametrize(
"test_source, test_destination, expected",
[
pytest.param(
FixedArtifact(
versions=[], observed_at=datetime.datetime.utcfromtimestamp(0)
),
FixedArtifact(
versions=[], observed_at=datetime.datetime.utcfromtimestamp(10)
),
datetime.datetime.utcfromtimestamp(10),
id="empty-versions",
),
pytest.param(
FixedArtifact(
versions=None, observed_at=datetime.datetime.utcfromtimestamp(0)
),
FixedArtifact(
versions=None, observed_at=datetime.datetime.utcfromtimestamp(10)
),
datetime.datetime.utcfromtimestamp(10),
id="none-versions",
),
pytest.param(
FixedArtifact(
versions=[], observed_at=datetime.datetime.utcfromtimestamp(0)
),
FixedArtifact(
versions=["foo"], observed_at=datetime.datetime.utcfromtimestamp(10)
),
datetime.datetime.utcfromtimestamp(10),
id="different-versions",
),
pytest.param(
FixedArtifact(
versions=["bar", "foo", "meh"],
observed_at=datetime.datetime.utcfromtimestamp(0),
),
FixedArtifact(
versions=["meh", "bar", "foo"],
observed_at=datetime.datetime.utcfromtimestamp(10),
),
datetime.datetime.utcfromtimestamp(0),
id="same-versions-ordered-differently",
),
],
)
def test_transfer_vulnerability_timestamps_fix_observed_at(
self, test_source, test_destination, expected
):
random = VulnerabilityMatch(
artifact=Artifact(
name="blah",
location="/usr/local/java/blah",
pkg_type="java",
version="1.2.3maven",
),
vulnerability=Vulnerability(
feed="vulnerabilities",
feed_group="whatever:hello",
vulnerability_id="meh",
),
match=Match(detected_at=datetime.datetime.utcnow()),
)
source = copy.deepcopy(random)
source.fix = test_source
destination = copy.deepcopy(random)
destination.fix = test_destination
results = transfer_vulnerability_timestamps(
source=[source], destination=[destination]
)
assert results and len(results) == 1
assert results[0].fix.observed_at == expected
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SymbolicOperator is the base class for FermionOperator and QubitOperator"""
import abc
import copy
import itertools
import re
import warnings
import numpy
from six import add_metaclass, string_types
from openfermion.config import EQ_TOLERANCE
@add_metaclass(abc.ABCMeta)
class SymbolicOperator:
"""Base class for FermionOperator and QubitOperator.
A SymbolicOperator stores an object which represents a weighted
sum of terms; each term is a product of individual factors
of the form (`index`, `action`), where `index` is a nonnegative integer
and the possible values for `action` are determined by the subclass.
For instance, for the subclass FermionOperator, `action` can be 1 or 0,
indicating raising or lowering, and for QubitOperator, `action` is from
the set {'X', 'Y', 'Z'}.
The coefficients of the terms are stored in a dictionary whose
keys are the terms.
SymbolicOperators of the same type can be added or multiplied together.
Note:
Adding SymbolicOperators is faster using += (as this
is done by in-place addition). Specifying the coefficient
during initialization is faster than multiplying a SymbolicOperator
with a scalar.
Attributes:
actions (tuple): A tuple of objects representing the possible actions.
e.g. for FermionOperator, this is (1, 0).
action_strings (tuple): A tuple of string representations of actions.
These should be in one-to-one correspondence with actions and
listed in the same order.
e.g. for FermionOperator, this is ('^', '').
action_before_index (bool): A boolean indicating whether in string
representations, the action should come before the index.
different_indices_commute (bool): A boolean indicating whether
factors acting on different indices commute.
terms (dict):
**key** (tuple of tuples): A dictionary storing the coefficients
of the terms in the operator. The keys are the terms.
A term is a product of individual factors; each factor is
represented by a tuple of the form (`index`, `action`), and
these tuples are collected into a larger tuple which represents
the term as the product of its factors.
"""
@abc.abstractproperty
def actions(self):
"""The allowed actions.
Returns a tuple of objects representing the possible actions.
"""
pass
@abc.abstractproperty
def action_strings(self):
"""The string representations of the allowed actions.
Returns a tuple containing string representations of the possible
actions, in the same order as the `actions` property.
"""
pass
@abc.abstractproperty
def action_before_index(self):
"""Whether action comes before index in string representations.
Example: For QubitOperator, the actions are ('X', 'Y', 'Z') and
the string representations look something like 'X0 Z2 Y3'. So the
action comes before the index, and this function should return True.
For FermionOperator, the string representations look like
'0^ 1 2^ 3'. The action comes after the index, so this function
should return False.
"""
pass
@abc.abstractproperty
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
pass
__hash__ = None
def __init__(self, term=None, coefficient=1.):
if not isinstance(coefficient, (int, float, complex)):
raise ValueError('Coefficient must be a numeric type.')
# Initialize the terms dictionary
self.terms = {}
# Detect if the input is the string representation of a sum of terms;
# if so, initialization needs to be handled differently
if isinstance(term, string_types) and '[' in term:
self._long_string_init(term, coefficient)
return
# Zero operator: leave the terms dictionary empty
if term is None:
return
# Parse the term
# Sequence input
if isinstance(term, tuple) or isinstance(term, list):
term = self._parse_sequence(term)
# String input
elif isinstance(term, string_types):
term = self._parse_string(term)
# Invalid input type
else:
raise ValueError('term specified incorrectly.')
# Simplify the term
coefficient, term = self._simplify(term, coefficient=coefficient)
# Add the term to the dictionary
self.terms[term] = coefficient
def _long_string_init(self, long_string, coefficient):
"""
Initialization from a long string representation.
e.g. For FermionOperator:
'1.5 [2^ 3] + 1.4 [3^ 0]'
"""
pattern = '(.*?)\[(.*?)\]' # regex for a term
for match in re.findall(pattern, long_string, flags=re.DOTALL):
# Determine the coefficient for this term
coef_string = re.sub(r"\s+", "", match[0])
if coef_string and coef_string[0] is '+':
coef_string = coef_string[1:].strip()
if coef_string == '':
coef = 1.0
elif coef_string == '-':
coef = -1.0
else:
try:
if 'j' in coef_string:
if coef_string[0] == '-':
coef = -complex(coef_string[1:])
else:
coef = complex(coef_string)
else:
coef = float(coef_string)
except ValueError:
raise ValueError(
'Invalid coefficient {}.'.format(coef_string))
coef *= coefficient
# Parse the term, simpify it and add to the dict
term = self._parse_string(match[1])
coef, term = self._simplify(term, coefficient=coef)
if term not in self.terms:
self.terms[term] = coef
else:
self.terms[term] += coef
def _validate_factor(self, factor):
"""Check that a factor of a term is valid."""
if len(factor) != 2:
raise ValueError('Invalid factor {}.'.format(factor))
index, action = factor
if action not in self.actions:
raise ValueError('Invalid action in factor {}. '
'Valid actions are: {}'.format(
factor, self.actions))
if not isinstance(index, int) or index < 0:
raise ValueError('Invalid index in factor {}. '
'The index should be a non-negative '
'integer.'.format(factor))
def _simplify(self, term, coefficient=1.0):
"""Simplifies a term."""
if self.different_indices_commute:
term = sorted(term, key=lambda factor: factor[0])
return coefficient, tuple(term)
def _parse_sequence(self, term):
"""Parse a term given as a sequence type (i.e., list, tuple, etc.).
e.g. For QubitOperator:
[('X', 2), ('Y', 0), ('Z', 3)] -> (('Y', 0), ('X', 2), ('Z', 3))
"""
if not term:
# Empty sequence
return ()
elif isinstance(term[0], int):
# Single factor
self._validate_factor(term)
return (tuple(term),)
else:
# Check that all factors in the term are valid
for factor in term:
self._validate_factor(factor)
# Return a tuple
return tuple(term)
def _parse_string(self, term):
"""Parse a term given as a string.
e.g. For FermionOperator:
"2^ 3" -> ((2, 1), (3, 0))
"""
factors = term.split()
# Convert the string representations of the factors to tuples
processed_term = []
for factor in factors:
# Get the index and action string
if self.action_before_index:
# The index is at the end of the string; find where it starts.
if not factor[-1].isdigit():
raise ValueError('Invalid factor {}.'.format(factor))
index_start = len(factor) - 1
while index_start > 0 and factor[index_start - 1].isdigit():
index_start -= 1
index = int(factor[index_start:])
action_string = factor[:index_start]
else:
# The index is at the beginning of the string; find where
# it ends
if not factor[0].isdigit():
raise ValueError('Invalid factor {}.'.format(factor))
index_end = 1
while (index_end <= len(factor) - 1 and
factor[index_end].isdigit()):
index_end += 1
index = int(factor[:index_end])
action_string = factor[index_end:]
# Check that the index is valid
if index < 0:
raise ValueError('Invalid index in factor {}. '
'The index should be a non-negative '
'integer.'.format(factor))
# Convert the action string to an action
if action_string in self.action_strings:
action = self.actions[self.action_strings.index(action_string)]
else:
raise ValueError('Invalid action in factor {}. '
'Valid actions are: {}'.format(
factor, self.action_strings))
# Add the factor to the list as a tuple
processed_term.append((index, action))
# Return a tuple
return tuple(processed_term)
@property
def constant(self):
"""The value of the constant term."""
return self.terms.get((), 0.0)
@classmethod
def zero(cls):
"""
Returns:
additive_identity (SymbolicOperator):
A symbolic operator o with the property that o+x = x+o = x for
all operators x of the same class.
"""
return cls(term=None)
@classmethod
def identity(cls):
"""
Returns:
multiplicative_identity (SymbolicOperator):
A symbolic operator u with the property that u*x = x*u = x for
all operators x of the same class.
"""
return cls(term=())
def __str__(self):
"""Return an easy-to-read string representation."""
if not self.terms:
return '0'
string_rep = ''
for term, coeff in sorted(self.terms.items()):
if numpy.isclose(coeff, 0.0):
continue
tmp_string = '{} ['.format(coeff)
for factor in term:
index, action = factor
action_string = self.action_strings[self.actions.index(action)]
if self.action_before_index:
tmp_string += '{}{} '.format(action_string, index)
else:
tmp_string += '{}{} '.format(index, action_string)
string_rep += '{}] +\n'.format(tmp_string.strip())
return string_rep[:-3]
def __repr__(self):
return str(self)
def __imul__(self, multiplier):
"""In-place multiply (*=) with scalar or operator of the same type.
Default implementation is to multiply coefficients and
concatenate terms.
Args:
multiplier(complex float, or SymbolicOperator): multiplier
Returns:
product (SymbolicOperator): Mutated self.
"""
# Handle scalars.
if isinstance(multiplier, (int, float, complex)):
for term in self.terms:
self.terms[term] *= multiplier
return self
# Handle operator of the same type
elif isinstance(multiplier, self.__class__):
result_terms = dict()
for left_term in self.terms:
for right_term in multiplier.terms:
left_coefficient = self.terms[left_term]
right_coefficient = multiplier.terms[right_term]
new_coefficient = left_coefficient * right_coefficient
new_term = left_term + right_term
new_coefficient, new_term = self._simplify(
new_term, coefficient=new_coefficient)
# Update result dict.
if new_term in result_terms:
result_terms[new_term] += new_coefficient
else:
result_terms[new_term] = new_coefficient
self.terms = result_terms
return self
# Invalid multiplier type
else:
raise TypeError('Cannot multiply {} with {}'.format(
self.__class__.__name__, multiplier.__class__.__name__))
def __mul__(self, multiplier):
"""Return self * multiplier for a scalar, or a SymbolicOperator.
Args:
multiplier: A scalar, or a SymbolicOperator.
Returns:
product (SymbolicOperator)
Raises:
TypeError: Invalid type cannot be multiply with SymbolicOperator.
"""
if isinstance(multiplier, (int, float, complex, type(self))):
product = copy.deepcopy(self)
product *= multiplier
return product
else:
raise TypeError(
'Object of invalid type cannot multiply with ' +
type(self) + '.')
def __iadd__(self, addend):
"""In-place method for += addition of SymbolicOperator.
Args:
addend (SymbolicOperator): The operator to add.
Returns:
sum (SymbolicOperator): Mutated self.
Raises:
TypeError: Cannot add invalid type.
"""
if isinstance(addend, type(self)):
for term in addend.terms:
self.terms[term] = (self.terms.get(term, 0.0) +
addend.terms[term])
if abs(self.terms[term]) < EQ_TOLERANCE:
del self.terms[term]
else:
raise TypeError('Cannot add invalid type to {}.'.format(
type(self)))
return self
def __add__(self, addend):
"""
Args:
addend (SymbolicOperator): The operator to add.
Returns:
sum (SymbolicOperator)
"""
summand = copy.deepcopy(self)
summand += addend
return summand
def __isub__(self, subtrahend):
"""In-place method for -= subtraction of SymbolicOperator.
Args:
subtrahend (A SymbolicOperator): The operator to subtract.
Returns:
difference (SymbolicOperator): Mutated self.
Raises:
TypeError: Cannot subtract invalid type.
"""
if isinstance(subtrahend, type(self)):
for term in subtrahend.terms:
self.terms[term] = (self.terms.get(term, 0.0) -
subtrahend.terms[term])
if abs(self.terms[term]) < EQ_TOLERANCE:
del self.terms[term]
else:
raise TypeError('Cannot subtract invalid type from {}.'.format(
type(self)))
return self
def __sub__(self, subtrahend):
"""
Args:
subtrahend (SymbolicOperator): The operator to subtract.
Returns:
difference (SymbolicOperator)
"""
minuend = copy.deepcopy(self)
minuend -= subtrahend
return minuend
def __rmul__(self, multiplier):
"""
Return multiplier * self for a scalar.
We only define __rmul__ for scalars because the left multiply
exist for SymbolicOperator and left multiply
is also queried as the default behavior.
Args:
multiplier: A scalar to multiply by.
Returns:
product: A new instance of SymbolicOperator.
Raises:
TypeError: Object of invalid type cannot multiply SymbolicOperator.
"""
if not isinstance(multiplier, (int, float, complex)):
raise TypeError(
'Object of invalid type cannot multiply with ' +
type(self) + '.')
return self * multiplier
def __truediv__(self, divisor):
"""
Return self / divisor for a scalar.
Note:
This is always floating point division.
Args:
divisor: A scalar to divide by.
Returns:
A new instance of SymbolicOperator.
Raises:
TypeError: Cannot divide local operator by non-scalar type.
"""
if not isinstance(divisor, (int, float, complex)):
raise TypeError('Cannot divide ' + type(self) +
' by non-scalar type.')
return self * (1.0 / divisor)
def __div__(self, divisor):
""" For compatibility with Python 2. """
return self.__truediv__(divisor)
def __itruediv__(self, divisor):
if not isinstance(divisor, (int, float, complex)):
raise TypeError('Cannot divide ' + type(self) +
' by non-scalar type.')
self *= (1.0 / divisor)
return self
def __idiv__(self, divisor):
""" For compatibility with Python 2. """
return self.__itruediv__(divisor)
def __neg__(self):
"""
Returns:
negation (SymbolicOperator)
"""
return -1 * self
def __pow__(self, exponent):
"""Exponentiate the SymbolicOperator.
Args:
exponent (int): The exponent with which to raise the operator.
Returns:
exponentiated (SymbolicOperator)
Raises:
ValueError: Can only raise SymbolicOperator to non-negative
integer powers.
"""
# Handle invalid exponents.
if not isinstance(exponent, int) or exponent < 0:
raise ValueError(
'exponent must be a non-negative int, but was {} {}'.format(
type(exponent), repr(exponent)))
# Initialized identity.
exponentiated = self.__class__(())
# Handle non-zero exponents.
for _ in range(exponent):
exponentiated *= self
return exponentiated
def __eq__(self, other):
"""
Returns True if other (SymbolicOperator) is close to self.
Comparison is done for each term individually. Return True
if the difference between each term in self and other is
less than EQ_TOLERANCE
Args:
other(SymbolicOperator): SymbolicOperator to compare against.
"""
if not isinstance(self, type(other)):
return NotImplemented
# terms which are in both:
for term in set(self.terms).intersection(set(other.terms)):
a = self.terms[term]
b = other.terms[term]
# math.isclose does this in Python >=3.5
if not abs(a - b) <= max(EQ_TOLERANCE,
EQ_TOLERANCE * max(abs(a), abs(b))):
return False
# terms only in one (compare to 0.0 so only abs_tol)
for term in set(self.terms).symmetric_difference(set(other.terms)):
if term in self.terms:
if not abs(self.terms[term]) <= EQ_TOLERANCE:
return False
elif not abs(other.terms[term]) <= EQ_TOLERANCE:
return False
return True
def __ne__(self, other):
return not (self == other)
def __iter__(self):
self._iter = iter(self.terms.items())
return self
def __next__(self):
term, coefficient = next(self._iter)
return self.__class__(term=term, coefficient=coefficient)
def next(self):
return self.__next__()
def compress(self, abs_tol=EQ_TOLERANCE):
"""
Eliminates all terms with coefficients close to zero and removes
small imaginary and real parts.
Args:
abs_tol(float): Absolute tolerance, must be at least 0.0
"""
new_terms = {}
for term in self.terms:
coeff = self.terms[term]
# Remove small imaginary and real parts
if abs(coeff.imag) <= abs_tol:
coeff = coeff.real
if abs(coeff.real) <= abs_tol:
coeff = 1.j * coeff.imag
# Add the term if the coefficient is large enough
if abs(coeff) > abs_tol:
new_terms[term] = coeff
self.terms = new_terms
def induced_norm(self, order=1):
r"""
Compute the induced p-norm of the operator.
If we represent an operator as
:math: `\sum_{j} w_j H_j`
where :math: `w_j` are scalar coefficients then this norm is
:math: `\left(\sum_{j} \| w_j \|^p \right)^{\frac{1}{p}}
where :math: `p` is the order of the induced norm
Args:
order(int): the order of the induced norm.
"""
norm = 0.
for coefficient in self.terms.values():
norm += abs(coefficient) ** order
return norm ** (1. / order)
def many_body_order(self):
"""Compute the many-body order of a SymbolicOperator.
The many-body order of a SymbolicOperator is the maximum length of
a term with nonzero coefficient.
Returns:
int
"""
if not self.terms:
# Zero operator
return 0
else:
return max(len(term) for term, coeff in self.terms.items()
if abs(coeff) > EQ_TOLERANCE)
@classmethod
def accumulate(cls, operators, start=None):
"""Sums over SymbolicOperators."""
total = copy.deepcopy(start or cls.zero())
for operator in operators:
total += operator
return total
def get_operators(self):
"""Gets a list of operators with a single term.
Returns:
operators([self.__class__]): A generator of the operators in self.
"""
for term, coefficient in self.terms.items():
yield self.__class__(term, coefficient)
def get_operator_groups(self, num_groups):
"""Gets a list of operators with a few terms.
Args:
num_groups(int): How many operators to get in the end.
Returns:
operators([self.__class__]): A list of operators summing up to
self.
"""
if num_groups < 1:
warnings.warn('Invalid num_groups {} < 1.'.format(num_groups),
RuntimeWarning)
num_groups = 1
operators = self.get_operators()
num_groups = min(num_groups, len(self.terms))
for i in range(num_groups):
yield self.accumulate(itertools.islice(
operators, len(range(i, len(self.terms), num_groups))))
# DEPRECATED FUNCTIONS
# ====================
def isclose(self, other):
warnings.warn('The method `isclose` is deprecated and will '
'be removed in a future version. Use == '
'instead. For instance, a == b instead of '
'a.isclose(b).', DeprecationWarning)
return self == other
|
|
#
# Copyright 2019 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
ALTO/Page data loaders for segmentation training
"""
import os.path
import pathlib
import logging
from itertools import groupby
from lxml import etree
from os.path import dirname
from PIL import Image
from typing import Union, Dict, Any, Sequence
from collections import defaultdict
from kraken.lib.segmentation import calculate_polygonal_environment
from kraken.lib.exceptions import KrakenInputException
logger = logging.getLogger(__name__)
__all__ = ['parse_xml', 'parse_page', 'parse_alto', 'preparse_xml_data']
# fallback mapping between PAGE region types and tags
page_regions = {'TextRegion': 'text',
'ImageRegion': 'image',
'LineDrawingRegion': 'line drawing',
'GraphicRegion': 'graphic',
'TableRegion': 'table',
'ChartRegion': 'chart',
'MapRegion': 'map',
'SeparatorRegion': 'separator',
'MathsRegion': 'maths',
'ChemRegion': 'chem',
'MusicRegion': 'music',
'AdvertRegion': 'advert',
'NoiseRegion': 'noise',
'UnknownRegion': 'unknown',
'CustomRegion': 'custom'}
# same for ALTO
alto_regions = {'TextBlock': 'text',
'IllustrationType': 'illustration',
'GraphicalElementType': 'graphic',
'ComposedBlock': 'composed'}
def preparse_xml_data(filenames: Sequence[Union[str, pathlib.Path]],
format_type: str = 'xml',
repolygonize: bool = False) -> Dict[str, Any]:
"""
Loads training data from a set of xml files.
Extracts line information from Page/ALTO xml files for training of
recognition models.
Args:
filenames: List of XML files.
format_type: Either `page`, `alto` or `xml` for autodetermination.
repolygonize: (Re-)calculates polygon information using the kraken
algorithm.
Returns:
A list of dicts {'text': text, 'baseline': [[x0, y0], ...], 'boundary':
[[x0, y0], ...], 'image': PIL.Image}.
"""
training_pairs = []
if format_type == 'xml':
parse_fn = parse_xml
elif format_type == 'alto':
parse_fn = parse_alto
elif format_type == 'page':
parse_fn = parse_page
else:
raise ValueError(f'invalid format {format_type} for preparse_xml_data')
for fn in filenames:
try:
data = parse_fn(fn)
except KrakenInputException as e:
logger.warning(e)
continue
try:
with open(data['image'], 'rb') as fp:
Image.open(fp)
except FileNotFoundError as e:
logger.warning(f'Could not open file {e.filename} in {fn}')
continue
if repolygonize:
logger.info('repolygonizing {} lines in {}'.format(len(data['lines']), data['image']))
data['lines'] = _repolygonize(data['image'], data['lines'])
for line in data['lines']:
training_pairs.append({'image': data['image'], **line})
return training_pairs
def _repolygonize(im: Image.Image, lines: Sequence[Dict[str, Any]]):
"""
Helper function taking an output of the lib.xml parse_* functions and
recalculating the contained polygonization.
Args:
im (Image.Image): Input image
lines (list): List of dicts [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': 'abcvsd'}, {...]
Returns:
A data structure `lines` with a changed polygonization.
"""
im = Image.open(im).convert('L')
polygons = calculate_polygonal_environment(im, [x['baseline'] for x in lines])
return [{'boundary': polygon,
'baseline': orig['baseline'],
'text': orig['text'],
'script': orig['script']} for orig, polygon in zip(lines, polygons)]
def parse_xml(filename: Union[str, pathlib.Path]) -> Dict[str, Any]:
"""
Parses either a PageXML or ALTO file with autodetermination of the file
format.
Args:
filename: path to an XML file.
Returns:
A dict {'image': impath, lines: [{'boundary': [[x0, y0], ...],
'baseline': [[x0, y0], ...]}, {...], 'text': 'apdjfqpf', 'tags':
['script_type_0', 'script_type_1']}, regions: {'region_type_0': [[[x0,
y0], ...], ...], ...}}
"""
with open(filename, 'rb') as fp:
try:
doc = etree.parse(fp)
except etree.XMLSyntaxError as e:
raise KrakenInputException(f'Parsing {filename} failed: {e}')
if doc.getroot().tag.endswith('alto'):
return parse_alto(filename)
elif doc.getroot().tag.endswith('PcGts'):
return parse_page(filename)
else:
raise KrakenInputException(f'Unknown XML format in {filename}')
def parse_page(filename: Union[str, pathlib.Path]) -> Dict[str, Any]:
"""
Parses a PageXML file, returns the baselines defined in it, and loads the
referenced image.
Args:
filename: path to a PageXML file.
Returns:
A dict {'image': impath, lines: [{'boundary': [[x0, y0], ...],
'baseline': [[x0, y0], ...]}, {...], 'text': 'apdjfqpf', 'tags':
{'script': 'script_type', 'split': 'train', 'type': 'type_1']},
regions: {'region_type_0': [[[x0, y0], ...], ...], ...}}
"""
def _parse_page_custom(s):
o = {}
s = s.strip()
l_chunks = [l_chunk for l_chunk in s.split('}') if l_chunk.strip()]
if l_chunks:
for chunk in l_chunks:
tag, vals = chunk.split('{')
tag_vals = {}
vals = [val.strip() for val in vals.split(';') if val.strip()]
for val in vals:
key, *val = val.split(':')
tag_vals[key] = ":".join(val)
o[tag.strip()] = tag_vals
return o
def _parse_coords(coords):
points = [x for x in coords.split(' ')]
points = [int(c) for point in points for c in point.split(',')]
pts = zip(points[::2], points[1::2])
return [k for k, g in groupby(pts)]
with open(filename, 'rb') as fp:
base_dir = dirname(filename)
try:
doc = etree.parse(fp)
except etree.XMLSyntaxError as e:
raise KrakenInputException('Parsing {} failed: {}'.format(filename, e))
image = doc.find('.//{*}Page')
if image is None or image.get('imageFilename') is None:
raise KrakenInputException('No valid image filename found in PageXML file {}'.format(filename))
try:
base_direction = {'left-to-right': 'L',
'right-to-left': 'R',
'top-to-bottom': 'L',
'bottom-to-top': 'R',
None: None}[image.get('readingDirection')]
except KeyError:
logger.warning(f'Invalid value {image.get("readingDirection")} encountered in page-level reading direction.')
base_direction = None
lines = doc.findall('.//{*}TextLine')
data = {'image': os.path.join(base_dir, image.get('imageFilename')),
'lines': [],
'type': 'baselines',
'base_dir': base_direction,
'regions': {}}
# find all image regions
regions = []
for x in page_regions.keys():
regions.extend(doc.findall('.//{{*}}{}'.format(x)))
# parse region type and coords
region_data = defaultdict(list)
for region in regions:
coords = region.find('{*}Coords')
if coords is not None and not coords.get('points').isspace() and len(coords.get('points')):
try:
coords = _parse_coords(coords.get('points'))
except Exception:
logger.warning('Region {} without coordinates'.format(region.get('id')))
continue
else:
logger.warning('Region {} without coordinates'.format(region.get('id')))
continue
rtype = region.get('type')
# parse transkribus-style custom field if possible
custom_str = region.get('custom')
if not rtype and custom_str:
cs = _parse_page_custom(custom_str)
if 'structure' in cs and 'type' in cs['structure']:
rtype = cs['structure']['type']
# fall back to default region type if nothing is given
if not rtype:
rtype = page_regions[region.tag.split('}')[-1]]
region_data[rtype].append(coords)
data['regions'] = region_data
# parse line information
tag_set = set(('default',))
for line in lines:
pol = line.find('./{*}Coords')
boundary = None
if pol is not None and not pol.get('points').isspace() and len(pol.get('points')):
try:
boundary = _parse_coords(pol.get('points'))
except Exception:
logger.info('TextLine {} without polygon'.format(line.get('id')))
else:
logger.info('TextLine {} without polygon'.format(line.get('id')))
base = line.find('./{*}Baseline')
baseline = None
if base is not None and not base.get('points').isspace() and len(base.get('points')):
try:
baseline = _parse_coords(base.get('points'))
except Exception:
logger.info('TextLine {} without baseline'.format(line.get('id')))
continue
else:
logger.info('TextLine {} without baseline'.format(line.get('id')))
continue
text = ''
manual_transcription = line.find('./{*}TextEquiv')
if manual_transcription is not None:
transcription = manual_transcription
else:
transcription = line
for el in transcription.findall('.//{*}Unicode'):
if el.text:
text += el.text
# retrieve line tags if custom string is set and contains
tags = {'type': 'default'}
split_type = None
custom_str = line.get('custom')
if custom_str:
cs = _parse_page_custom(custom_str)
if 'structure' in cs and 'type' in cs['structure']:
tags['type'] = cs['structure']['type']
tag_set.add(tags['type'])
# retrieve data split if encoded in custom string.
if 'split' in cs and 'type' in cs['split'] and cs['split']['type'] in ['train', 'validation', 'test']:
split_type = cs['split']['type']
tags['split'] = split_type
tag_set.add(split_type)
data['lines'].append({'baseline': baseline,
'boundary': boundary,
'text': text,
'split': split_type,
'tags': tags})
if len(tag_set) > 1:
data['script_detection'] = True
else:
data['script_detection'] = False
return data
def parse_alto(filename: Union[str, pathlib.Path]) -> Dict[str, Any]:
"""
Parses an ALTO file, returns the baselines defined in it, and loads the
referenced image.
Args:
filename: path to an ALTO file.
Returns:
A dict {'image': impath, lines: [{'boundary': [[x0, y0], ...],
'baseline': [[x0, y0], ...]}, {...], 'text': 'apdjfqpf', 'tags':
{'script': 'script_type', 'split': 'train', 'type': 'type_1']},
regions: {'region_type_0': [[[x0, y0], ...], ...], ...}}
"""
with open(filename, 'rb') as fp:
base_dir = dirname(filename)
try:
doc = etree.parse(fp)
except etree.XMLSyntaxError as e:
raise KrakenInputException('Parsing {} failed: {}'.format(filename, e))
image = doc.find('.//{*}fileName')
if image is None or not image.text:
raise KrakenInputException('No valid filename found in ALTO file')
lines = doc.findall('.//{*}TextLine')
data = {'image': os.path.join(base_dir, image.text),
'lines': [],
'type': 'baselines',
'base_dir': None,
'regions': {}}
# find all image regions
regions = []
for x in alto_regions.keys():
regions.extend(doc.findall('./{{*}}Layout/{{*}}Page/{{*}}PrintSpace/{{*}}{}'.format(x)))
# find overall dimensions to filter out dummy TextBlocks
ps = doc.find('./{*}Layout/{*}Page/{*}PrintSpace')
x_min = int(float(ps.get('HPOS')))
y_min = int(float(ps.get('VPOS')))
width = int(float(ps.get('WIDTH')))
height = int(float(ps.get('HEIGHT')))
page_boundary = [(x_min, y_min),
(x_min, y_min + height),
(x_min + width, y_min + height),
(x_min + width, y_min)]
# parse tagrefs
cls_map = {}
tags = doc.find('.//{*}Tags')
if tags is not None:
for x in ['StructureTag', 'LayoutTag', 'OtherTag']:
for tag in tags.findall('./{{*}}{}'.format(x)):
cls_map[tag.get('ID')] = (x[:-3].lower(), tag.get('LABEL'))
# parse region type and coords
region_data = defaultdict(list)
for region in regions:
# try to find shape object
coords = region.find('./{*}Shape/{*}Polygon')
if coords is not None:
points = [int(float(x)) for x in coords.get('POINTS').split(' ')]
boundary = zip(points[::2], points[1::2])
boundary = [k for k, g in groupby(boundary)]
elif (region.get('HPOS') is not None and region.get('VPOS') is not None and
region.get('WIDTH') is not None and region.get('HEIGHT') is not None):
# use rectangular definition
x_min = int(float(region.get('HPOS')))
y_min = int(float(region.get('VPOS')))
width = int(float(region.get('WIDTH')))
height = int(float(region.get('HEIGHT')))
boundary = [(x_min, y_min),
(x_min, y_min + height),
(x_min + width, y_min + height),
(x_min + width, y_min)]
else:
continue
rtype = region.get('TYPE')
# fall back to default region type if nothing is given
tagrefs = region.get('TAGREFS')
if tagrefs is not None and rtype is None:
for tagref in tagrefs.split():
ttype, rtype = cls_map.get(tagref, (None, None))
if rtype is not None and ttype:
break
if rtype is None:
rtype = alto_regions[region.tag.split('}')[-1]]
if boundary == page_boundary and rtype == 'text':
logger.info('Skipping TextBlock with same size as page image.')
continue
region_data[rtype].append(boundary)
data['regions'] = region_data
tag_set = set(('default',))
for line in lines:
if line.get('BASELINE') is None:
logger.info('TextLine {} without baseline'.format(line.get('ID')))
continue
pol = line.find('./{*}Shape/{*}Polygon')
boundary = None
if pol is not None:
try:
points = [int(float(x)) for x in pol.get('POINTS').split(' ')]
boundary = zip(points[::2], points[1::2])
boundary = [k for k, g in groupby(boundary)]
except ValueError:
logger.info('TextLine {} without polygon'.format(line.get('ID')))
else:
logger.info('TextLine {} without polygon'.format(line.get('ID')))
baseline = None
try:
points = [int(float(x)) for x in line.get('BASELINE').split(' ')]
baseline = list(zip(points[::2], points[1::2]))
baseline = [k for k, g in groupby(baseline)]
except ValueError:
logger.info('TextLine {} without baseline'.format(line.get('ID')))
text = ''
for el in line.xpath(".//*[local-name() = 'String'] | .//*[local-name() = 'SP']"):
text += el.get('CONTENT') if el.get('CONTENT') else ' '
# find line type
tags = {'type': 'default'}
split_type = None
tagrefs = line.get('TAGREFS')
if tagrefs is not None:
for tagref in tagrefs.split():
ttype, ltype = cls_map.get(tagref, (None, None))
if ltype is not None:
tag_set.add(ltype)
if ttype == 'other':
tags['type'] = ltype
else:
tags[ttype] = ltype
if ltype in ['train', 'validation', 'test']:
split_type = ltype
data['lines'].append({'baseline': baseline,
'boundary': boundary,
'text': text,
'tags': tags,
'split': split_type})
if len(tags) > 1:
data['tags'] = True
else:
data['tags'] = False
return data
|
|
# -*- coding: utf-8 -*-
# (c) 2009-2022 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Implementation of a WebDAV provider that provides a very basic, read-only
resource layer emulation of a MySQL database.
This module is specific to the WsgiDAV application. It provides a
classes ``MySQLBrowserProvider``.
Usage::
(see docs/sample_wsgidav.yaml)
MySQLBrowserProvider(host, user, passwd, db)
host - host of database server
user - user_name to access database
passwd - passwd to access database
db - name of database on database server
The ``MySQLBrowserProvider`` provides a very basic, read-only
resource layer emulation of a MySQL database.
It provides the following interface:
- the root collection shared consists of collections that correspond to
table names
- in each table collection, there is a resource called "_ENTIRE_CONTENTS".
This is a non-collection resource that returns a csv representation of the
entire table
- if the table has a single primary key, each table record will also appear
as a non-collection resource in the table collection using the primary key
value as its name. This resource returns a csv representation of the record
and will also include the record attributes as live properties with
attribute name as property name and table name suffixed with colon as the
property namespace
This is a very basic interface and below is a by no means thorough summary of
its limitations:
- Really only supports having numbers or strings as primary keys. The code uses
a numeric or string comparison that may not hold up if the primary key is
a date or some other datatype.
- There is no handling for cases like BLOBs as primary keys or such. Well, there is
no handling for BLOBs in general.
- When returning contents, it buffers the entire contents! A bad way to return
large tables. Ideally you would have a FileMixin that reads the database even
as the application reads the file object....
- It takes too many database queries to return information.
Ideally there should be some sort of caching for metadata at least, to avoid
unnecessary queries to the database.
"""
import csv
import hashlib
import time
from io import StringIO
import MySQLdb # @UnresolvedImport
from wsgidav import util
from wsgidav.dav_error import (
HTTP_FORBIDDEN,
DAVError,
PRECONDITION_CODE_ProtectedProperty,
)
from wsgidav.dav_provider import DAVProvider, _DAVResource
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
class MySQLBrowserResource(_DAVResource):
"""Represents a single existing DAV resource instance.
See also DAVResource and MySQLBrowserProvider.
"""
def __init__(self, provider, path, is_collection, environ):
super().__init__(path, is_collection, environ)
self._cache = None
def _init(self):
"""Read resource information into self._cache, for cached access.
See DAVResource._init()
"""
# TODO: recalc self.path from <self._file_path>, to fix correct file system case
# On windows this would lead to correct URLs
self.provider._count_get_resource_inst_init += 1
tableName, primKey = self.provider._split_path(self.path)
display_type = "Unknown"
displayTypeComment = ""
contentType = "text/html"
# _logger.debug("getInfoDict(%s), nc=%s" % (path, self.connectCount))
if tableName is None:
display_type = "Database"
elif primKey is None: # "database" and table name
display_type = "Database Table"
else:
contentType = "text/csv"
if primKey == "_ENTIRE_CONTENTS":
display_type = "Database Table Contents"
displayTypeComment = "CSV Representation of Table Contents"
else:
display_type = "Database Record"
displayTypeComment = "Attributes available as properties"
# Avoid calling is_collection, since it would call isExisting -> _init_connection
is_collection = primKey is None
self._cache = {
"content_length": None,
"contentType": contentType,
"created": time.time(),
"display_name": self.name,
"etag": hashlib.md5().update(self.path).hexdigest(),
# "etag": md5.new(self.path).hexdigest(),
"modified": None,
"support_ranges": False,
"display_info": {"type": display_type, "typeComment": displayTypeComment},
}
# Some resource-only infos:
if not is_collection:
self._cache["modified"] = time.time()
_logger.debug("---> _init, nc=%s" % self.provider._count_initConnection)
def _get_info(self, info):
if self._cache is None:
self._init()
return self._cache.get(info)
# Getter methods for standard live properties
def get_content_length(self):
return self._get_info("content_length")
def get_content_type(self):
return self._get_info("contentType")
def get_creation_date(self):
return self._get_info("created")
def get_display_name(self):
return self.name
def get_display_info(self):
return self._get_info("display_info")
def get_etag(self):
return self._get_info("etag")
def get_last_modified(self):
return self._get_info("modified")
def get_member_list(self):
"""Return list of (direct) collection member names (UTF-8 byte strings).
See DAVResource.get_member_list()
"""
members = []
conn = self.provider._init_connection()
try:
tableName, primKey = self.provider._split_path(self.path)
if tableName is None:
retlist = self.provider._list_tables(conn)
for name in retlist:
members.append(
MySQLBrowserResource(
self.provider,
util.join_uri(self.path, name),
True,
self.environ,
)
)
elif primKey is None:
pri_key = self.provider._find_primary_key(conn, tableName)
if pri_key is not None:
retlist = self.provider._list_fields(conn, tableName, pri_key)
for name in retlist:
members.append(
MySQLBrowserResource(
self.provider,
util.join_uri(self.path, name),
False,
self.environ,
)
)
members.insert(
0,
MySQLBrowserResource(
self.provider,
util.join_uri(self.path, "_ENTIRE_CONTENTS"),
False,
self.environ,
),
)
finally:
conn.close()
return members
def get_content(self):
"""Open content as a stream for reading.
See DAVResource.get_content()
"""
filestream = StringIO()
tableName, primKey = self.provider._split_path(self.path)
if primKey is not None:
conn = self.provider._init_connection()
listFields = self.provider._get_field_list(conn, tableName)
csvwriter = csv.DictWriter(filestream, listFields, extrasaction="ignore")
dictFields = {}
for field_name in listFields:
dictFields[field_name] = field_name
csvwriter.writerow(dictFields)
if primKey == "_ENTIRE_CONTENTS":
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT * from " + self.provider._db + "." + tableName)
result_set = cursor.fetchall()
for row in result_set:
csvwriter.writerow(row)
cursor.close()
else:
row = self.provider._get_record_by_primary_key(conn, tableName, primKey)
if row is not None:
csvwriter.writerow(row)
conn.close()
# this suffices for small dbs, but
# for a production big database, I imagine you would have a FileMixin that
# does the retrieving and population even as the file object is being read
filestream.seek(0)
return filestream
def get_property_names(self, *, is_allprop):
"""Return list of supported property names in Clark Notation.
Return supported live and dead properties. (See also DAVProvider.get_property_names().)
In addition, all table field names are returned as properties.
"""
# Let default implementation return supported live and dead properties
propNames = super().get_property_names(is_allprop=is_allprop)
# Add fieldnames as properties
tableName, primKey = self.provider._split_path(self.path)
if primKey is not None:
conn = self.provider._init_connection()
fieldlist = self.provider._get_field_list(conn, tableName)
for fieldname in fieldlist:
propNames.append("{%s:}%s" % (tableName, fieldname))
conn.close()
return propNames
def get_property_value(self, name):
"""Return the value of a property.
The base implementation handles:
- ``{DAV:}lockdiscovery`` and ``{DAV:}supportedlock`` using the
associated lock manager.
- All other *live* properties (i.e. name starts with ``{DAV:}``) are
delegated to self.getLivePropertyValue()
- Finally, other properties are considered *dead*, and are handled using
the associated property manager, if one is present.
"""
# Return table field as property
tableName, primKey = self.provider._split_path(self.path)
if primKey is not None:
ns, localName = util.split_namespace(name)
if ns == (tableName + ":"):
conn = self.provider._init_connection()
fieldlist = self.provider._get_field_list(conn, tableName)
if localName in fieldlist:
val = self.provider._get_field_by_primary_key(
conn, tableName, primKey, localName
)
conn.close()
return val
conn.close()
# else, let default implementation return supported live and dead properties
return super().get_property_value(name)
def set_property_value(self, name, value, dry_run=False):
"""Set or remove property value.
See DAVResource.set_property_value()
"""
raise DAVError(
HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty
)
# ============================================================================
# MySQLBrowserProvider
# ============================================================================
class MySQLBrowserProvider(DAVProvider):
def __init__(self, host, user, passwd, db):
super().__init__()
self._host = host
self._user = user
self._passwd = passwd
self._db = db
self._count_initConnection = 0
def __repr__(self):
return "%s for db '%s' on '%s' (user: '%s')'" % (
self.__class__.__name__,
self._db,
self._host,
self._user,
)
def _split_path(self, path):
"""Return (tableName, primaryKey) tuple for a request path."""
if path.strip() in (None, "", "/"):
return (None, None)
tableName, primKey = util.save_split(path.strip("/"), "/", 1)
# _logger.debug("'%s' -> ('%s', '%s')" % (path, tableName, primKey))
return (tableName, primKey)
def _init_connection(self):
self._count_initConnection += 1
return MySQLdb.connect(
host=self._host, user=self._user, passwd=self._passwd, db=self._db
)
def _get_field_list(self, conn, table_name):
retlist = []
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("DESCRIBE " + table_name)
result_set = cursor.fetchall()
for row in result_set:
retlist.append(row["Field"])
cursor.close()
return retlist
def _is_data_type_numeric(self, datatype):
if datatype is None:
return False
# how many MySQL datatypes does it take to change a lig... I mean, store numbers
numerictypes = [
"BIGINT",
"INTT",
"MEDIUMINT",
"SMALLINT",
"TINYINT",
"BIT",
"DEC",
"DECIMAL",
"DOUBLE",
"FLOAT",
"REAL",
"DOUBLE PRECISION",
"INTEGER",
"NUMERIC",
]
datatype = datatype.upper()
for numtype in numerictypes:
if datatype.startswith(numtype):
return True
return False
def _exists_record_by_primary_key(self, conn, table_name, pri_key_value):
pri_key = None
pri_field_type = None
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("DESCRIBE " + table_name)
result_set = cursor.fetchall()
for row in result_set:
if row["Key"] == "PRI":
if pri_key is None:
pri_key = row["Field"]
pri_field_type = row["Type"]
else:
return False # more than one primary key - multipart key?
cursor.close()
isNumType = self._is_data_type_numeric(pri_field_type)
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
if isNumType:
cursor.execute(
"SELECT "
+ pri_key
+ " FROM "
+ self._db
+ "."
+ table_name
+ " WHERE "
+ pri_key
+ " = "
+ pri_key_value
)
else:
cursor.execute(
"SELECT "
+ pri_key
+ " FROM "
+ self._db
+ "."
+ table_name
+ " WHERE "
+ pri_key
+ " = '"
+ pri_key_value
+ "'"
)
row = cursor.fetchone()
if row is None:
cursor.close()
return False
cursor.close()
return True
def _get_field_by_primary_key(self, conn, table_name, pri_key_value, field_name):
pri_key = None
pri_field_type = None
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("DESCRIBE " + table_name)
result_set = cursor.fetchall()
for row in result_set:
if row["Key"] == "PRI":
if pri_key is None:
pri_key = row["Field"]
pri_field_type = row["Type"]
else:
return None # more than one primary key - multipart key?
cursor.close()
isNumType = self._is_data_type_numeric(pri_field_type)
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
if isNumType:
cursor.execute(
"SELECT "
+ field_name
+ " FROM "
+ self._db
+ "."
+ table_name
+ " WHERE "
+ pri_key
+ " = "
+ pri_key_value
)
else:
cursor.execute(
"SELECT "
+ field_name
+ " FROM "
+ self._db
+ "."
+ table_name
+ " WHERE "
+ pri_key
+ " = '"
+ pri_key_value
+ "'"
)
row = cursor.fetchone()
if row is None:
cursor.close()
return None
val = util.to_str(row[field_name])
cursor.close()
return val
def _get_record_by_primary_key(self, conn, table_name, pri_key_value):
dictRet = {}
pri_key = None
pri_field_type = None
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("DESCRIBE " + table_name)
result_set = cursor.fetchall()
for row in result_set:
if row["Key"] == "PRI":
if pri_key is None:
pri_key = row["Field"]
pri_field_type = row["Type"]
else:
return None # more than one primary key - multipart key?
cursor.close()
isNumType = self._is_data_type_numeric(pri_field_type)
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
if isNumType:
cursor.execute(
"SELECT * FROM "
+ self._db
+ "."
+ table_name
+ " WHERE "
+ pri_key
+ " = "
+ pri_key_value
)
else:
cursor.execute(
"SELECT * FROM "
+ self._db
+ "."
+ table_name
+ " WHERE "
+ pri_key
+ " = '"
+ pri_key_value
+ "'"
)
row = cursor.fetchone()
if row is None:
cursor.close()
return None
for fname in row.keys():
dictRet[fname] = util.to_str(row[fname])
cursor.close()
return dictRet
def _find_primary_key(self, conn, table_name):
pri_key = None
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("DESCRIBE " + table_name)
result_set = cursor.fetchall()
for row in result_set:
fieldname = row["Field"]
keyvalue = row["Key"]
if keyvalue == "PRI":
if pri_key is None:
pri_key = fieldname
else:
return None # more than one primary key - multipart key?
cursor.close()
return pri_key
def _list_fields(self, conn, table_name, field_name):
retlist = []
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT " + field_name + " FROM " + self._db + "." + table_name)
result_set = cursor.fetchall()
for row in result_set:
retlist.append(util.to_str(row[field_name]))
cursor.close()
return retlist
def _list_tables(self, conn):
retlist = []
cursor = conn.cursor()
cursor.execute("SHOW TABLES")
result_set = cursor.fetchall()
for row in result_set:
retlist.append("%s" % (row[0]))
cursor.close()
return retlist
def get_resource_inst(self, path, environ):
"""Return info dictionary for path.
See get_resource_inst()
"""
# TODO: calling exists() makes directory browsing VERY slow.
# At least compared to PyFileServer, which simply used string
# functions to get display_type and displayTypeComment
self._count_get_resource_inst += 1
if not self.exists(path, environ):
return None
_tableName, primKey = self._split_path(path)
is_collection = primKey is None
return MySQLBrowserResource(self, path, is_collection, environ)
def exists(self, path, environ):
tableName, primKey = self._split_path(path)
if tableName is None:
return True
try:
conn = None
conn = self._init_connection()
# Check table existence:
tbllist = self._list_tables(conn)
if tableName not in tbllist:
return False
# Check table key existence:
if primKey and primKey != "_ENTIRE_CONTENTS":
return self._exists_record_by_primary_key(conn, tableName, primKey)
return True
finally:
if conn:
conn.close()
def is_collection(self, path, environ):
_tableName, primKey = self._split_path(path)
return self.exists(path, environ) and primKey is None
|
|
#!/usr/bin/python3
"""Set up a GCC repository.
Download and configure a GCC repository. There are many options
available for downloading GCC -- there is subversion, GCC git (from
git://gcc.gnu.org/git/gcc.git), and github GCC
(https://github.com/gcc-mirror/gcc.git).
"""
import getopt
import os
import sys
import script_utils as u
# Echo command before executing
flag_echo = True
# Dry run mode
flag_dryrun = False
# Show command output (ex: make)
flag_show_output = False
# Select google 4_9 branch
flag_google = False
# Select vanilla 4_9 branch
flag_49_branch = False
# Select vanilla 5 branch
flag_5_branch = False
# Add sub-repos for GO-related projects
flag_dogo = False
# Create build dirs
flag_mkbuilds = True
# clang format for gofrontend
clang_format_contents = """
BasedOnStyle: Google
BreakBeforeBraces: GNU
AlwaysBreakAfterReturnType: All
AllowShortBlocksOnASingleLine: false
UseTab: ForIndentation
"""
build_flavors = {
"build-gcc": {"extra": "",
"prefix": "cross"},
"build-gcc-dbg": {"extra":
"CFLAGS=\"-O0 -g\" CXXFLAGS=\"-O0 -g\" "
"CFLAGS_FOR_BUILD=\"-O0 -g\" "
"CXXFLAGS_FOR_BUILD=\"-O0 -g\"",
"prefix": "cross"}
}
# Download prereqs
flag_prereqs = False
# Which flavor
flag_flavor = None
# Use mirrors
flag_use_mirrors = False
# Legal flavors
flavors = {"svn": 1, "git": 1, "git-svn": 1}
def docmd(cmd):
"""Execute a command."""
if flag_echo:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return
u.docmd(cmd)
def doscmd(cmd):
"""Execute a command."""
if flag_echo:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return
if flag_show_output:
u.docmd(cmd)
else:
u.doscmd(cmd)
def dochdir(thedir):
"""Switch to dir."""
if flag_echo:
sys.stderr.write("cd " + thedir + "\n")
if flag_dryrun:
return
try:
os.chdir(thedir)
except OSError as err:
u.error("chdir failed: %s" % err)
def setup_binutils():
"""Set up binutils."""
if os.path.exists("binutils"):
u.verbose(0, "... 'binutils' already exists, skipping clone")
return
binutils_git = "git://sourceware.org/git/binutils-gdb.git"
if flag_use_mirrors:
binutils_git = "https://github.com/bminor/binutils-gdb"
docmd("git clone --depth 1 %s binutils" % binutils_git)
def setup_prereqs(targ):
"""Set up prerequistics."""
dochdir(targ)
if os.path.exists("gmp"):
u.verbose(0, "... 'gmp' already exists, skipping clone")
dochdir("..")
return
docmd("sh contrib/download_prerequisites")
dochdir("..")
def setup_build_dirs(targ):
"""Set up build_dirs."""
root = os.getcwd()
bb = "build-binutils"
if os.path.exists(bb):
u.verbose(0, "... binutils build dir '%s' already exists, "
"skipping setup" % bb)
else:
os.mkdir("build-binutils")
dochdir("build-binutils")
u.verbose(0, "... running configure in build dir 'build-binutils'")
doscmd("../binutils/configure --prefix=%s/binutils-cross "
"--enable-gold=default --enable-plugins" % root)
dochdir("..")
for b, d in build_flavors.items():
if os.path.exists(b):
u.verbose(0, "... build dir '%s' already exists, skipping setup" % b)
continue
prefix = d["prefix"]
extra = d["extra"]
os.mkdir(b)
dochdir(b)
u.verbose(0, "... running configure in build dir '%s'" % b)
doscmd("../%s/configure --prefix=%s/%s "
"--enable-languages=c,c++,go --enable-libgo "
"--disable-bootstrap --with-ld=%s/binutils-cross/bin/ld.gold "
"%s" % (targ, root, prefix, root, extra))
dochdir("..")
def setup_go(targ):
"""Set up go-specific stuff."""
if os.path.exists("gofrontend"):
u.verbose(0, "... 'gofrontend' already exists, skipping clone")
return
docmd("git clone https://go.googlesource.com/gofrontend")
dochdir("gofrontend")
try:
with open("./.clang-format", "w") as wf:
wf.write(clang_format_contents)
wf.write("\n")
except IOError:
u.error("open/write failed for .clang-format")
dochdir("..")
dochdir(targ)
docmd("rm -rf gcc/go/gofrontend")
docmd("ln -s ../../../gofrontend/go gcc/go/gofrontend")
docmd("rm -rf libgo")
docmd("mkdir libgo")
if flag_dryrun:
u.verbose(0, "for f in GOFRONTEND/libgo/*; "
"do ln -s $f libgo/`basename $f`; done")
else:
libgo = "../gofrontend/libgo"
for item in os.listdir(libgo):
docmd("ln -s ../../gofrontend/libgo/%s libgo/%s" % (item, item))
dochdir("..")
def perform_git():
"""Create git repo."""
targ = "gcc-trunk"
if flag_flavor == "git" or flag_flavor == "git-svn":
baseurl = "git://gcc.gnu.org/git/gcc.git"
if flag_use_mirrors:
baseurl = "https://github.com/gcc-mirror/gcc"
if os.path.exists(targ):
u.verbose(0, "... path %s already exists, skipping clone" % targ)
return targ
docmd("git clone %s %s" % (baseurl, targ))
if flag_flavor == "git-svn":
url = "http://gcc.gnu.org/svn/gcc/trunk"
doscmd("git svn init %s" % url)
doscmd("git config svn-remote.svn.fetch :refs/remotes/origin/master")
doscmd("git svn rebase -l")
else:
dochdir(targ)
docmd("git checkout master")
dochdir("..")
if flag_google:
dochdir(targ)
docmd("git branch google origin/google")
sp = ".git/info/sparse-checkout"
if not flag_dryrun:
try:
with open(sp, "w") as f:
f.write("gcc-4_9/")
except IOError:
u.error("open failed for %s" % sp)
else:
u.verbose(0, "echo 'gcc-4_9/' > %s" % sp)
docmd("git checkout google")
dochdir("..")
docmd("ln -s google/gcc-4_9 gcc-4.9")
return targ
def perform_svn():
"""Create svn repo."""
targ = "gcc-trunk"
url = "svn://gcc.gnu.org/svn/gcc/trunk"
if flag_google:
targ = "gcc-google-4.9"
url = "svn://gcc.gnu.org/svn/gcc/branches/google/gcc-4_9"
elif flag_49_branch:
targ = "gcc-4.9"
url = "svn://gcc.gnu.org/svn/gcc/branches/gcc-4_9-branch"
elif flag_5_branch:
targ = "gcc-5"
url = "svn://gcc.gnu.org/svn/gcc/branches/gcc-5-branch"
docmd("svn co %s %s" % (url, targ))
def perform():
"""Guts of script."""
if flag_flavor == "svn":
perform_svn()
else:
targ = perform_git()
if flag_dogo:
setup_go("gcc-trunk")
if flag_prereqs:
setup_prereqs("gcc-trunk")
if flag_mkbuilds:
setup_build_dirs(targ)
setup_binutils()
def usage(msgarg):
"""Print usage and exit."""
me = os.path.basename(sys.argv[0])
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options]
options:
-d increase debug msg verbosity level
-e echo cmds before executing
-s show output from git clone or svn checkout
-D dryrun mode (echo commands but do not execute)
-f F repository flavor F. Can be one of: svn|git|git-svn
-M use github mirrors where possible to speed things up
-G add sub-repos for go-related projects
-P download prerequisites for gcc build (gmp, mpcr, etc)
-g select google 4_9 branch
-b select vanilla 4_9 branch
-B select vanilla 5 branch
-X do not create build dirs
Example 1: setup gcc git repo off google/4_9 branch
%s -f svn -g
""" % (me, me))
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_echo, flag_dryrun, flag_google, flag_flavor
global flag_show_output, flag_49_branch, flag_5_branch
global flag_dogo, flag_use_mirrors, flag_prereqs, flag_mkbuilds
try:
optlist, args = getopt.getopt(sys.argv[1:], "dbBeDPGMBXgsf:")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-e":
flag_echo = True
elif opt == "-g":
flag_google = True
elif opt == "-G":
flag_dogo = True
elif opt == "-P":
flag_prereqs = True
elif opt == "-M":
flag_use_mirrors = True
elif opt == "-b":
flag_49_branch = True
elif opt == "-B":
flag_5_branch = True
elif opt == "-X":
flag_mkbuilds = False
elif opt == "-s":
flag_show_output = True
elif opt == "-f":
if arg not in flavors:
usage("flavor %s not in set of legal "
"flavors: %s" % (arg, " ".join(list(flavors.keys()))))
flag_flavor = arg
elif opt == "-D":
flag_dryrun = True
flag_echo = True
if args:
usage("unknown extra args")
if not flag_flavor:
usage("select a flavor")
if flag_49_branch and flag_google:
usage("pick either -b or -g (not both)")
if flag_49_branch and flag_flavor != "svn":
usage("-b option requires -f svn")
if flag_5_branch and flag_flavor != "svn":
usage("-B option requires -f svn")
if flag_5_branch and flag_49_branch:
usage("select one of -B / -b but not both")
#
#......................................................................
#
# Main portion of script
#
parse_args()
u.setdeflanglocale()
perform()
exit(0)
|
|
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal volume driver."""
from oslo.config import cfg
from nova import exception
from nova import test
from nova.virt.baremetal import volume_driver
from nova.virt import fake
from nova.virt.libvirt import volume as libvirt_volume
CONF = cfg.CONF
SHOW_OUTPUT = """Target 1: iqn.2010-10.org.openstack:volume-00000001
System information:
Driver: iscsi
State: ready
I_T nexus information:
I_T nexus: 8
Initiator: iqn.1993-08.org.debian:01:7780c6a16b4
Connection: 0
IP Address: 172.17.12.10
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00010000
SCSI SN: beaf10
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00010001
SCSI SN: beaf11
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000001
Backing store flags:
Account information:
ACL information:
ALL
Target 2: iqn.2010-10.org.openstack:volume-00000002
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00020000
SCSI SN: beaf20
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00020001
SCSI SN: beaf21
Size: 2147 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000002
Backing store flags:
Account information:
ACL information:
ALL
Target 1000001: iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET f42410000
SCSI SN: beaf10000010
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET f42410001
SCSI SN: beaf10000011
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/disk/by-path/ip-172.17.12.10:3260-iscsi-\
iqn.2010-10.org.openstack:volume-00000001-lun-1
Backing store flags:
Account information:
ACL information:
ALL
"""
def fake_show_tgtadm():
return SHOW_OUTPUT
class BareMetalVolumeTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalVolumeTestCase, self).setUp()
self.stubs.Set(volume_driver, '_show_tgtadm', fake_show_tgtadm)
def test_list_backingstore_path(self):
l = volume_driver._list_backingstore_path()
self.assertEqual(len(l), 3)
self.assertIn('/dev/nova-volumes/volume-00000001', l)
self.assertIn('/dev/nova-volumes/volume-00000002', l)
self.assertIn('/dev/disk/by-path/ip-172.17.12.10:3260-iscsi-'
'iqn.2010-10.org.openstack:volume-00000001-lun-1', l)
def test_get_next_tid(self):
tid = volume_driver._get_next_tid()
self.assertEqual(1000002, tid)
def test_find_tid_found(self):
tid = volume_driver._find_tid(
'iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc')
self.assertEqual(1000001, tid)
def test_find_tid_not_found(self):
tid = volume_driver._find_tid(
'iqn.2010-10.org.openstack.baremetal:1000002-dev.vdc')
self.assertIsNone(tid)
def test_get_iqn(self):
self.flags(iscsi_iqn_prefix='iqn.2012-12.a.b', group='baremetal')
iqn = volume_driver._get_iqn('instname', '/dev/vdx')
self.assertEqual('iqn.2012-12.a.b:instname-dev-vdx', iqn)
class FakeConf(object):
def __init__(self, source_path):
self.source_path = source_path
class BareMetalLibVirtVolumeDriverTestCase(test.TestCase):
def setUp(self):
super(BareMetalLibVirtVolumeDriverTestCase, self).setUp()
self.flags(volume_drivers=[
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'fake2=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
], group='libvirt')
self.driver = volume_driver.LibvirtVolumeDriver(fake.FakeVirtAPI())
self.disk_info = {
'dev': 'vdc',
'bus': 'baremetal',
'type': 'baremetal',
}
self.connection_info = {'driver_volume_type': 'fake'}
self.mount_point = '/dev/vdc'
self.mount_device = 'vdc'
self.source_path = '/dev/sdx'
self.instance = {'uuid': '12345678-1234-1234-1234-123467890123456',
'name': 'instance-00000001'}
self.fixed_ips = [{'address': '10.2.3.4'},
{'address': '172.16.17.18'},
]
self.iqn = 'iqn.fake:instance-00000001-dev-vdc'
self.tid = 100
def test_init_loads_volume_drivers(self):
self.assertIsInstance(self.driver.volume_drivers['fake'],
libvirt_volume.LibvirtFakeVolumeDriver)
self.assertIsInstance(self.driver.volume_drivers['fake2'],
libvirt_volume.LibvirtFakeVolumeDriver)
self.assertEqual(len(self.driver.volume_drivers), 2)
def test_fake_connect_volume(self):
"""Check connect_volume returns without exceptions."""
self.driver._connect_volume(self.connection_info,
self.disk_info)
def test_volume_driver_method_ok(self):
fake_driver = self.driver.volume_drivers['fake']
self.mox.StubOutWithMock(fake_driver, 'connect_volume')
fake_driver.connect_volume(self.connection_info, self.disk_info)
self.mox.ReplayAll()
self.driver._connect_volume(self.connection_info,
self.disk_info)
def test_volume_driver_method_driver_type_not_found(self):
self.connection_info['driver_volume_type'] = 'qwerty'
self.assertRaises(exception.VolumeDriverNotFound,
self.driver._connect_volume,
self.connection_info,
self.disk_info)
def test_publish_iscsi(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_get_next_tid')
self.mox.StubOutWithMock(volume_driver, '_create_iscsi_export_tgtadm')
self.mox.StubOutWithMock(volume_driver, '_allow_iscsi_tgtadm')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._get_next_tid().AndReturn(self.tid)
volume_driver._create_iscsi_export_tgtadm(self.source_path,
self.tid,
self.iqn)
volume_driver._allow_iscsi_tgtadm(self.tid,
self.fixed_ips[0]['address'])
volume_driver._allow_iscsi_tgtadm(self.tid,
self.fixed_ips[1]['address'])
self.mox.ReplayAll()
self.driver._publish_iscsi(self.instance,
self.mount_point,
self.fixed_ips,
self.source_path)
def test_depublish_iscsi_ok(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(self.tid)
volume_driver._delete_iscsi_export_tgtadm(self.tid)
self.mox.ReplayAll()
self.driver._depublish_iscsi(self.instance, self.mount_point)
def test_depublish_iscsi_do_nothing_if_tid_is_not_found(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(None)
self.mox.ReplayAll()
self.driver._depublish_iscsi(self.instance, self.mount_point)
def test_attach_volume(self):
self.mox.StubOutWithMock(volume_driver, '_get_fixed_ips')
self.mox.StubOutWithMock(self.driver, '_connect_volume')
self.mox.StubOutWithMock(self.driver, '_publish_iscsi')
volume_driver._get_fixed_ips(self.instance).AndReturn(self.fixed_ips)
self.driver._connect_volume(self.connection_info, self.disk_info).\
AndReturn(FakeConf(self.source_path))
self.driver._publish_iscsi(self.instance, self.mount_point,
self.fixed_ips, self.source_path)
self.mox.ReplayAll()
self.driver.attach_volume(self.connection_info,
self.instance,
self.mount_point)
def test_detach_volume(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm')
self.mox.StubOutWithMock(self.driver, '_disconnect_volume')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(self.tid)
volume_driver._delete_iscsi_export_tgtadm(self.tid)
self.driver._disconnect_volume(self.connection_info,
self.mount_device)
self.mox.ReplayAll()
self.driver.detach_volume(self.connection_info,
self.instance,
self.mount_point)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import context
from neutron_lib import fixture
from oslo_utils import uuidutils
from neutron.db.quota import api as quota_db_api
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit.extensions import test_securitygroup
from neutron.tests.unit.plugins.ml2 import base as ml2_base
from neutron.tests.unit.plugins.ml2 import test_plugin
class SgTestCaseWrapper(test_securitygroup.SecurityGroupDBTestCase):
# This wrapper class enables Ml2PluginV2TestCase to correctly call the
# setup method in SecurityGroupDBTestCase which does not accept the
# service_plugins keyword parameter.
def setUp(self, plugin, **kwargs):
super(SgTestCaseWrapper, self).setUp(plugin)
class BaseTestTrackedResources(test_plugin.Ml2PluginV2TestCase,
SgTestCaseWrapper):
def setUp(self):
self.ctx = context.get_admin_context()
super(BaseTestTrackedResources, self).setUp()
self._tenant_id = uuidutils.generate_uuid()
def _test_init(self, resource_name):
quota_db_api.set_quota_usage(
self.ctx, resource_name, self._tenant_id)
class BaseTestEventHandler(object):
def setUp(self):
# Prevent noise from default security group operations
def_sec_group_patch = mock.patch(
'neutron.db.securitygroups_db.SecurityGroupDbMixin.'
'_ensure_default_security_group')
def_sec_group_patch.start()
get_sec_group_port_patch = mock.patch(
'neutron.db.securitygroups_db.SecurityGroupDbMixin.'
'_get_security_groups_on_port')
get_sec_group_port_patch.start()
handler_patch = mock.patch(
'neutron.quota.resource.TrackedResource._db_event_handler')
self.handler_mock = handler_patch.start()
super(BaseTestEventHandler, self).setUp()
def _verify_event_handler_calls(self, data, expected_call_count=1):
if not hasattr(data, '__iter__') or isinstance(data, dict):
data = [data]
self.assertEqual(expected_call_count, self.handler_mock.call_count)
call_idx = -1
for item in data:
if item:
model = self.handler_mock.call_args_list[call_idx][0][-1]
self.assertEqual(model['id'], item['id'])
self.assertEqual(model['tenant_id'], item['tenant_id'])
call_idx = call_idx - 1
class TestTrackedResourcesEventHandler(BaseTestEventHandler,
BaseTestTrackedResources):
def test_create_delete_network_triggers_event(self):
self._test_init('network')
net = self._make_network('json', 'meh', True)['network']
self._verify_event_handler_calls(net)
self._delete('networks', net['id'])
self._verify_event_handler_calls(net, expected_call_count=2)
def test_create_delete_port_triggers_event(self):
self._test_init('port')
net = self._make_network('json', 'meh', True)['network']
port = self._make_port('json', net['id'])['port']
# Expecting 2 calls - 1 for the network, 1 for the port
self._verify_event_handler_calls(port, expected_call_count=2)
self._delete('ports', port['id'])
self._verify_event_handler_calls(port, expected_call_count=3)
def test_create_delete_subnet_triggers_event(self):
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
subnet = self._make_subnet('json', net, '10.0.0.1',
'10.0.0.0/24')['subnet']
# Expecting 2 calls - 1 for the network, 1 for the subnet
self._verify_event_handler_calls([subnet, net['network']],
expected_call_count=2)
self._delete('subnets', subnet['id'])
self._verify_event_handler_calls(subnet, expected_call_count=3)
def test_create_delete_network_with_subnet_triggers_event(self):
self._test_init('network')
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
subnet = self._make_subnet('json', net, '10.0.0.1',
'10.0.0.0/24')['subnet']
# Expecting 2 calls - 1 for the network, 1 for the subnet
self._verify_event_handler_calls([subnet, net['network']],
expected_call_count=2)
self._delete('networks', net['network']['id'])
# Expecting 2 more calls - 1 for the network, 1 for the subnet
self._verify_event_handler_calls([net['network'], subnet],
expected_call_count=4)
def test_create_delete_subnetpool_triggers_event(self):
self._test_init('subnetpool')
pool = self._make_subnetpool('json', ['10.0.0.0/8'],
name='meh',
tenant_id=self._tenant_id)['subnetpool']
self._verify_event_handler_calls(pool)
self._delete('subnetpools', pool['id'])
self._verify_event_handler_calls(pool, expected_call_count=2)
def test_create_delete_securitygroup_triggers_event(self):
self._test_init('security_group')
sec_group = self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
# When a security group is created it also creates 2 rules, therefore
# there will be three calls and we need to verify the first
self._verify_event_handler_calls([None, None, sec_group],
expected_call_count=3)
self._delete('security-groups', sec_group['id'])
# When a security group is deleted it also removes the 2 rules
# generated upon creation
self._verify_event_handler_calls(sec_group, expected_call_count=6)
def test_create_delete_securitygrouprule_triggers_event(self):
self._test_init('security_group_rule')
sec_group = self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
rule_req = self._build_security_group_rule(
sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id)
sec_group_rule = self._make_security_group_rule(
'json', rule_req)['security_group_rule']
# When a security group is created it also creates 2 rules, therefore
# there will be four calls in total to the event handler
self._verify_event_handler_calls(sec_group_rule, expected_call_count=4)
self._delete('security-group-rules', sec_group_rule['id'])
self._verify_event_handler_calls(sec_group_rule, expected_call_count=5)
class TestL3ResourcesEventHandler(BaseTestEventHandler,
ml2_base.ML2TestFramework,
test_l3.L3NatTestCaseMixin):
def setUp(self):
super(TestL3ResourcesEventHandler, self).setUp()
self.useFixture(fixture.APIDefinitionFixture())
ext_mgr = test_l3.L3TestExtensionManager()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def test_create_delete_floating_ip_triggers_event(self):
net = self._make_network('json', 'meh', True)
subnet = self._make_subnet('json', net, '14.0.0.1',
'14.0.0.0/24')['subnet']
self._set_net_external(subnet['network_id'])
floatingip = self._make_floatingip('json', subnet['network_id'])
internal_port = self._show(
'ports', floatingip['floatingip']['port_id'])['ports'][0]
# When a floatingip is created it also creates port, therefore
# there will be four calls in total to the event handler
self._verify_event_handler_calls(floatingip['floatingip'],
expected_call_count=4)
self._delete('floatingips', floatingip['floatingip']['id'])
# Expecting 2 more calls - 1 for the port, 1 for the floatingip
self._verify_event_handler_calls(
[internal_port, floatingip['floatingip']], expected_call_count=6)
class TestTrackedResources(BaseTestTrackedResources):
def _verify_dirty_bit(self, resource_name, expected_value=True):
usage = quota_db_api.get_quota_usage_by_resource_and_tenant(
self.ctx, resource_name, self._tenant_id)
self.assertEqual(expected_value, usage.dirty)
def test_create_delete_network_marks_dirty(self):
self._test_init('network')
net = self._make_network('json', 'meh', True)['network']
self._verify_dirty_bit('network')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
self.ctx, 'network', self._tenant_id, dirty=False)
self._delete('networks', net['id'])
self._verify_dirty_bit('network')
def test_list_networks_clears_dirty(self):
self._test_init('network')
net = self._make_network('json', 'meh', True)['network']
self.ctx.tenant_id = net['tenant_id']
self._list('networks', neutron_context=self.ctx)
self._verify_dirty_bit('network', expected_value=False)
def test_create_delete_port_marks_dirty(self):
self._test_init('port')
net = self._make_network('json', 'meh', True)['network']
port = self._make_port('json', net['id'])['port']
self._verify_dirty_bit('port')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
self.ctx, 'port', self._tenant_id, dirty=False)
self._delete('ports', port['id'])
self._verify_dirty_bit('port')
def test_list_ports_clears_dirty(self):
self._test_init('port')
net = self._make_network('json', 'meh', True)['network']
port = self._make_port('json', net['id'])['port']
self.ctx.tenant_id = port['tenant_id']
self._list('ports', neutron_context=self.ctx)
self._verify_dirty_bit('port', expected_value=False)
def test_create_delete_subnet_marks_dirty(self):
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
subnet = self._make_subnet('json', net, '10.0.0.1',
'10.0.0.0/24')['subnet']
self._verify_dirty_bit('subnet')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
self.ctx, 'subnet', self._tenant_id, dirty=False)
self._delete('subnets', subnet['id'])
self._verify_dirty_bit('subnet')
def test_create_delete_network_with_subnet_marks_dirty(self):
self._test_init('network')
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
self._make_subnet('json', net, '10.0.0.1',
'10.0.0.0/24')['subnet']
self._verify_dirty_bit('subnet')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
self.ctx, 'subnet', self._tenant_id, dirty=False)
self._delete('networks', net['network']['id'])
self._verify_dirty_bit('network')
self._verify_dirty_bit('subnet')
def test_list_subnets_clears_dirty(self):
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
subnet = self._make_subnet('json', net, '10.0.0.1',
'10.0.0.0/24')['subnet']
self.ctx.tenant_id = subnet['tenant_id']
self._list('subnets', neutron_context=self.ctx)
self._verify_dirty_bit('subnet', expected_value=False)
def test_create_delete_subnetpool_marks_dirty(self):
self._test_init('subnetpool')
pool = self._make_subnetpool('json', ['10.0.0.0/8'],
name='meh',
tenant_id=self._tenant_id)['subnetpool']
self._verify_dirty_bit('subnetpool')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
self.ctx, 'subnetpool', self._tenant_id, dirty=False)
self._delete('subnetpools', pool['id'])
self._verify_dirty_bit('subnetpool')
def test_list_subnetpools_clears_dirty(self):
self._test_init('subnetpool')
pool = self._make_subnetpool('json', ['10.0.0.0/8'],
name='meh',
tenant_id=self._tenant_id)['subnetpool']
self.ctx.tenant_id = pool['tenant_id']
self._list('subnetpools', neutron_context=self.ctx)
self._verify_dirty_bit('subnetpool', expected_value=False)
def test_create_delete_securitygroup_marks_dirty(self):
self._test_init('security_group')
sec_group = self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
self._verify_dirty_bit('security_group')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
self.ctx, 'security_group', self._tenant_id, dirty=False)
self._delete('security-groups', sec_group['id'])
self._verify_dirty_bit('security_group')
def test_list_securitygroups_clears_dirty(self):
self._test_init('security_group')
self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
self.ctx.tenant_id = self._tenant_id
self._list('security-groups', neutron_context=self.ctx)
self._verify_dirty_bit('security_group', expected_value=False)
def test_create_delete_securitygrouprule_marks_dirty(self):
self._test_init('security_group_rule')
sec_group = self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
rule_req = self._build_security_group_rule(
sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id)
sec_group_rule = self._make_security_group_rule(
'json', rule_req)['security_group_rule']
self._verify_dirty_bit('security_group_rule')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
self.ctx, 'security_group_rule', self._tenant_id, dirty=False)
self._delete('security-group-rules', sec_group_rule['id'])
self._verify_dirty_bit('security_group_rule')
def test_list_securitygrouprules_clears_dirty(self):
self._test_init('security_group_rule')
self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group']
# As the security group create operation also creates 2 security group
# rules there is no need to explicitly create any rule
self.ctx.tenant_id = self._tenant_id
self._list('security-group-rules', neutron_context=self.ctx)
self._verify_dirty_bit('security_group_rule', expected_value=False)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# NOTE(dtroyer): This file is deprecated in Jun 2016, remove after 4.x release
# or Jun 2017.
import logging
import mock
from osc_lib import logs
from openstackclient.tests.unit import utils
class TestContext(utils.TestCase):
def test_log_level_from_options(self):
opts = mock.Mock()
opts.verbose_level = 0
self.assertEqual(logging.ERROR, logs.log_level_from_options(opts))
opts.verbose_level = 1
self.assertEqual(logging.WARNING, logs.log_level_from_options(opts))
opts.verbose_level = 2
self.assertEqual(logging.INFO, logs.log_level_from_options(opts))
opts.verbose_level = 3
self.assertEqual(logging.DEBUG, logs.log_level_from_options(opts))
def test_log_level_from_config(self):
cfg = {'verbose_level': 0}
self.assertEqual(logging.ERROR, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1}
self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 2}
self.assertEqual(logging.INFO, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 3}
self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'critical'}
self.assertEqual(logging.CRITICAL, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'error'}
self.assertEqual(logging.ERROR, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'warning'}
self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'info'}
self.assertEqual(logging.INFO, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'debug'}
self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'bogus'}
self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg))
cfg = {'verbose_level': 1, 'log_level': 'info', 'debug': True}
self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg))
@mock.patch('warnings.simplefilter')
def test_set_warning_filter(self, simplefilter):
logs.set_warning_filter(logging.ERROR)
simplefilter.assert_called_with("ignore")
logs.set_warning_filter(logging.WARNING)
simplefilter.assert_called_with("ignore")
logs.set_warning_filter(logging.INFO)
simplefilter.assert_called_with("once")
class TestFileFormatter(utils.TestCase):
def test_nothing(self):
formatter = logs._FileFormatter()
self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s %(message)s'), formatter.fmt)
def test_options(self):
class Opts(object):
cloud = 'cloudy'
os_project_name = 'projecty'
username = 'usernamey'
options = Opts()
formatter = logs._FileFormatter(options=options)
self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [cloudy usernamey projecty] %(message)s'),
formatter.fmt)
def test_config(self):
config = mock.Mock()
config.config = {'cloud': 'cloudy'}
config.auth = {'project_name': 'projecty', 'username': 'usernamey'}
formatter = logs._FileFormatter(config=config)
self.assertEqual(('%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [cloudy usernamey projecty] %(message)s'),
formatter.fmt)
class TestLogConfigurator(utils.TestCase):
def setUp(self):
super(TestLogConfigurator, self).setUp()
self.options = mock.Mock()
self.options.verbose_level = 1
self.options.log_file = None
self.options.debug = False
self.root_logger = mock.Mock()
self.root_logger.setLevel = mock.Mock()
self.root_logger.addHandler = mock.Mock()
self.requests_log = mock.Mock()
self.requests_log.setLevel = mock.Mock()
self.cliff_log = mock.Mock()
self.cliff_log.setLevel = mock.Mock()
self.stevedore_log = mock.Mock()
self.stevedore_log.setLevel = mock.Mock()
self.iso8601_log = mock.Mock()
self.iso8601_log.setLevel = mock.Mock()
self.loggers = [
self.root_logger,
self.requests_log,
self.cliff_log,
self.stevedore_log,
self.iso8601_log]
@mock.patch('logging.StreamHandler')
@mock.patch('logging.getLogger')
@mock.patch('osc_lib.logs.set_warning_filter')
def test_init(self, warning_filter, getLogger, handle):
getLogger.side_effect = self.loggers
console_logger = mock.Mock()
console_logger.setFormatter = mock.Mock()
console_logger.setLevel = mock.Mock()
handle.return_value = console_logger
configurator = logs.LogConfigurator(self.options)
getLogger.assert_called_with('iso8601') # last call
warning_filter.assert_called_with(logging.WARNING)
self.root_logger.setLevel.assert_called_with(logging.DEBUG)
self.root_logger.addHandler.assert_called_with(console_logger)
self.requests_log.setLevel.assert_called_with(logging.ERROR)
self.cliff_log.setLevel.assert_called_with(logging.ERROR)
self.stevedore_log.setLevel.assert_called_with(logging.ERROR)
self.iso8601_log.setLevel.assert_called_with(logging.ERROR)
self.assertFalse(configurator.dump_trace)
@mock.patch('logging.getLogger')
@mock.patch('osc_lib.logs.set_warning_filter')
def test_init_no_debug(self, warning_filter, getLogger):
getLogger.side_effect = self.loggers
self.options.debug = True
configurator = logs.LogConfigurator(self.options)
warning_filter.assert_called_with(logging.DEBUG)
self.requests_log.setLevel.assert_called_with(logging.DEBUG)
self.assertTrue(configurator.dump_trace)
@mock.patch('logging.FileHandler')
@mock.patch('logging.getLogger')
@mock.patch('osc_lib.logs.set_warning_filter')
@mock.patch('osc_lib.logs._FileFormatter')
def test_init_log_file(self, formatter, warning_filter, getLogger, handle):
getLogger.side_effect = self.loggers
self.options.log_file = '/tmp/log_file'
file_logger = mock.Mock()
file_logger.setFormatter = mock.Mock()
file_logger.setLevel = mock.Mock()
handle.return_value = file_logger
mock_formatter = mock.Mock()
formatter.return_value = mock_formatter
logs.LogConfigurator(self.options)
handle.assert_called_with(filename=self.options.log_file)
self.root_logger.addHandler.assert_called_with(file_logger)
file_logger.setFormatter.assert_called_with(mock_formatter)
file_logger.setLevel.assert_called_with(logging.WARNING)
@mock.patch('logging.FileHandler')
@mock.patch('logging.getLogger')
@mock.patch('osc_lib.logs.set_warning_filter')
@mock.patch('osc_lib.logs._FileFormatter')
def test_configure(self, formatter, warning_filter, getLogger, handle):
getLogger.side_effect = self.loggers
configurator = logs.LogConfigurator(self.options)
cloud_config = mock.Mock()
config_log = '/tmp/config_log'
cloud_config.config = {
'log_file': config_log,
'verbose_level': 1,
'log_level': 'info'}
file_logger = mock.Mock()
file_logger.setFormatter = mock.Mock()
file_logger.setLevel = mock.Mock()
handle.return_value = file_logger
mock_formatter = mock.Mock()
formatter.return_value = mock_formatter
configurator.configure(cloud_config)
warning_filter.assert_called_with(logging.INFO)
handle.assert_called_with(filename=config_log)
self.root_logger.addHandler.assert_called_with(file_logger)
file_logger.setFormatter.assert_called_with(mock_formatter)
file_logger.setLevel.assert_called_with(logging.INFO)
self.assertFalse(configurator.dump_trace)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import shutil
import unittest
from io import StringIO
from unittest import mock
import paramiko
import pysftp
from parameterized import parameterized
from airflow.models import Connection
from airflow.providers.sftp.hooks.sftp import SFTPHook
from airflow.utils.session import provide_session
def generate_host_key(pkey: paramiko.PKey):
key_fh = StringIO()
pkey.write_private_key(key_fh)
key_fh.seek(0)
key_obj = paramiko.RSAKey(file_obj=key_fh)
return key_obj.get_base64()
TMP_PATH = '/tmp'
TMP_DIR_FOR_TESTS = 'tests_sftp_hook_dir'
SUB_DIR = "sub_dir"
TMP_FILE_FOR_TESTS = 'test_file.txt'
SFTP_CONNECTION_USER = "root"
TEST_PKEY = paramiko.RSAKey.generate(4096)
TEST_HOST_KEY = generate_host_key(pkey=TEST_PKEY)
class TestSFTPHook(unittest.TestCase):
@provide_session
def update_connection(self, login, session=None):
connection = session.query(Connection).filter(Connection.conn_id == "sftp_default").first()
old_login = connection.login
connection.login = login
session.commit()
return old_login
def setUp(self):
self.old_login = self.update_connection(SFTP_CONNECTION_USER)
self.hook = SFTPHook()
os.makedirs(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, SUB_DIR))
with open(os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS), 'a') as file:
file.write('Test file')
with open(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, SUB_DIR, TMP_FILE_FOR_TESTS), 'a') as file:
file.write('Test file')
def test_get_conn(self):
output = self.hook.get_conn()
assert isinstance(output, pysftp.Connection)
def test_close_conn(self):
self.hook.conn = self.hook.get_conn()
assert self.hook.conn is not None
self.hook.close_conn()
assert self.hook.conn is None
def test_describe_directory(self):
output = self.hook.describe_directory(TMP_PATH)
assert TMP_DIR_FOR_TESTS in output
def test_list_directory(self):
output = self.hook.list_directory(path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
assert output == [SUB_DIR]
def test_create_and_delete_directory(self):
new_dir_name = 'new_dir'
self.hook.create_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, new_dir_name))
output = self.hook.describe_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
assert new_dir_name in output
self.hook.delete_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, new_dir_name))
output = self.hook.describe_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
assert new_dir_name not in output
def test_create_and_delete_directories(self):
base_dir = "base_dir"
sub_dir = "sub_dir"
new_dir_path = os.path.join(base_dir, sub_dir)
self.hook.create_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, new_dir_path))
output = self.hook.describe_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
assert base_dir in output
output = self.hook.describe_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, base_dir))
assert sub_dir in output
self.hook.delete_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, new_dir_path))
self.hook.delete_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, base_dir))
output = self.hook.describe_directory(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
assert new_dir_path not in output
assert base_dir not in output
def test_store_retrieve_and_delete_file(self):
self.hook.store_file(
remote_full_path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS),
local_full_path=os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS),
)
output = self.hook.list_directory(path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
assert output == [SUB_DIR, TMP_FILE_FOR_TESTS]
retrieved_file_name = 'retrieved.txt'
self.hook.retrieve_file(
remote_full_path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS),
local_full_path=os.path.join(TMP_PATH, retrieved_file_name),
)
assert retrieved_file_name in os.listdir(TMP_PATH)
os.remove(os.path.join(TMP_PATH, retrieved_file_name))
self.hook.delete_file(path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS))
output = self.hook.list_directory(path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
assert output == [SUB_DIR]
def test_get_mod_time(self):
self.hook.store_file(
remote_full_path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS),
local_full_path=os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS),
)
output = self.hook.get_mod_time(path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS))
assert len(output) == 14
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_no_host_key_check_default(self, get_connection):
connection = Connection(login='login', host='host')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.no_host_key_check is False
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_no_host_key_check_enabled(self, get_connection):
connection = Connection(login='login', host='host', extra='{"no_host_key_check": true}')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.no_host_key_check is True
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_no_host_key_check_disabled(self, get_connection):
connection = Connection(login='login', host='host', extra='{"no_host_key_check": false}')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.no_host_key_check is False
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_ciphers(self, get_connection):
connection = Connection(login='login', host='host', extra='{"ciphers": ["A", "B", "C"]}')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.ciphers == ["A", "B", "C"]
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_no_host_key_check_disabled_for_all_but_true(self, get_connection):
connection = Connection(login='login', host='host', extra='{"no_host_key_check": "foo"}')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.no_host_key_check is False
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_no_host_key_check_ignore(self, get_connection):
connection = Connection(login='login', host='host', extra='{"ignore_hostkey_verification": true}')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.no_host_key_check is True
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_no_host_key_check_no_ignore(self, get_connection):
connection = Connection(login='login', host='host', extra='{"ignore_hostkey_verification": false}')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.no_host_key_check is False
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_host_key_default(self, get_connection):
connection = Connection(login='login', host='host')
get_connection.return_value = connection
hook = SFTPHook()
assert hook.host_key is None
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_host_key(self, get_connection):
connection = Connection(
login='login',
host='host',
extra=json.dumps({"host_key": TEST_HOST_KEY, "no_host_key_check": False}),
)
get_connection.return_value = connection
hook = SFTPHook()
assert hook.host_key.get_base64() == TEST_HOST_KEY
@mock.patch('airflow.providers.sftp.hooks.sftp.SFTPHook.get_connection')
def test_host_key_with_no_host_key_check(self, get_connection):
connection = Connection(login='login', host='host', extra=json.dumps({"host_key": TEST_HOST_KEY}))
get_connection.return_value = connection
hook = SFTPHook()
assert hook.host_key is None
@parameterized.expand(
[
(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS), True),
(os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS), True),
(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS + "abc"), False),
(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, "abc"), False),
]
)
def test_path_exists(self, path, exists):
result = self.hook.path_exists(path)
assert result == exists
@parameterized.expand(
[
("test/path/file.bin", None, None, True),
("test/path/file.bin", "test", None, True),
("test/path/file.bin", "test/", None, True),
("test/path/file.bin", None, "bin", True),
("test/path/file.bin", "test", "bin", True),
("test/path/file.bin", "test/", "file.bin", True),
("test/path/file.bin", None, "file.bin", True),
("test/path/file.bin", "diff", None, False),
("test/path/file.bin", "test//", None, False),
("test/path/file.bin", None, ".txt", False),
("test/path/file.bin", "diff", ".txt", False),
]
)
def test_path_match(self, path, prefix, delimiter, match):
result = self.hook._is_path_match(path=path, prefix=prefix, delimiter=delimiter)
assert result == match
def test_get_tree_map(self):
tree_map = self.hook.get_tree_map(path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
files, dirs, unknowns = tree_map
assert files == [os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, SUB_DIR, TMP_FILE_FOR_TESTS)]
assert dirs == [os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS, SUB_DIR)]
assert unknowns == []
def tearDown(self):
shutil.rmtree(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS))
os.remove(os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS))
self.update_connection(self.old_login)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import template, redirect, abort
import utils
import time
from datetime import datetime
import json
import LogicaNegocios
def GET(**params):
return template('entrada_materiales_index.html')
def consultar_asociados_GET(**params):
db = utils.ConnectDB()
#obtener datos de las entradas de material
db.execute("""SELECT idasoc as id, descripcion as value
FROM asociados
WHERE idasoc LIKE %s
OR descripcion LIKE %s""",
(
'%'+params.get('query','')+'%',
'%'+params.get('query','')+'%'
)
)
return json.dumps(db.fetchall())
def consultar_materiales_GET(**params):
db = utils.ConnectDB()
#obtener datos de las entradas de material
db.execute("""SELECT idmat as id, descripcion as value
FROM materiales
WHERE ( idmat LIKE %s
OR descripcion LIKE %s )
and grupo1 <> 'Insu'""",
(
'%'+params.get('query','')+'%',
'%'+params.get('query','')+'%'
)
)
return json.dumps(db.fetchall())
def consultar_insumos_GET(**params):
db = utils.ConnectDB()
#obtener datos de las entradas de material
db.execute("""SELECT idmat as id, descripcion as value
FROM materiales
WHERE ( idmat LIKE %s
OR descripcion LIKE %s )
AND grupo1 = 'Insu'""",
(
'%'+params.get('query','')+'%',
'%'+params.get('query','')+'%'
)
)
return json.dumps(db.fetchall())
def crear_entrada_material_GET(**params):
#Se le indica al formularo que debe insertar
#los datos al momento de presionar el boton de guardar
now_date, now_time = str(datetime.now(utils.TZColombia()).strftime('%Y-%m-%d %H:%M:%S')).split(' ')
rowdata = {
'method':'POST',
'fecha_recepcion':now_date,
'hora_recepcion':now_time,
}
#escribir en memoria global una lista vacia de pesos para inicializar la lista
utils.storeGlobalValue('pesos_docmat',[])
return template('entrada_material.html',**rowdata)
def editar_entrada_material_GET(**params):
table = utils.RenderTable(\
"""SELECT a.docmat,
a.fecha,
a.idasoc,
b.descripcion as idasoc_txt,
c.idmat,
c.descripcion as idmat_txt,
a.autor
FROM documentos_material as a
LEFT OUTER JOIN asociados as b
ON a.idasoc = b.idasoc
LEFT OUTER JOIN materiales as c
ON a.idmat = c.idmat
WHERE tipo_documento = 'E' or tipo_documento = 'I'
ORDER BY docmat""",\
(),\
u"""
<tr>
<th><input type="checkbox"></th>
<th>#Documento</th>
<th>Fecha</th>
<th colspan="2">Proveeedor</th>
<th colspan="2">Material</th>
<th>Autor</th>
</tr>
""",\
u"""
<tr>
<td scope="row"><input type="checkbox" data-docmat="{docmat}"></td>
<td><a href="/web/entrada_materiales/entrada_material?id={docmat}">{docmat}</a></td>
<td>{fecha}</td>
<td>{idasoc}</td>
<td>{idasoc_txt}</td>
<td>{idmat}</td>
<td>{idmat_txt}</td>
<td>{autor}</td>
</tr>""",\
'table table-bordered',\
10,\
int(params.get('table_table_table-bordered_page','1'))\
)
return template('listado_entrada_material.html',table=table)
def entrada_material_json_GET(**params):
db = utils.ConnectDB()
#obtener datos de las entradas de material
db.execute("""SELECT a.*,b.descripcion as idasoc_txt, c.descripcion as idmat_txt FROM documentos_material as a
LEFT OUTER JOIN asociados as b
ON b.idasoc = a.idasoc
LEFT OUTER JOIN materiales as c
ON c.idmat = a.idmat
WHERE docmat = %s""",(params.get('id',''),))
rowdata = db.fetchone()
if rowdata:
rowdata['fecha'] = rowdata['fecha'].strftime('%Y-%m-%d %H-%M-%S')
rowdata['cantidad_kg'] = str(rowdata['cantidad_kg'])
rowdata['cantidad_kg_factura'] = str(rowdata['cantidad_kg_factura'])
rowdata['precio_factura'] = str(rowdata['precio_factura'])
return json.dumps(rowdata)
def entrada_material_GET(**params):
db = utils.ConnectDB()
#obtener datos de las entradas de material
db.execute("""SELECT a.*,b.descripcion as idasoc_txt, c.descripcion as idmat_txt FROM documentos_material as a
LEFT OUTER JOIN asociados as b
ON b.idasoc = a.idasoc
LEFT OUTER JOIN materiales as c
ON c.idmat = a.idmat
WHERE docmat = %s""",(params.get('id',''),))
rowdata = db.fetchone()
#obtener datos de los pesos de los palets
db.execute("SELECT * FROM documentos_material_pesos WHERE docmat = %s", (params.get('id',''),))
weighdata = db.fetchall()
utils.storeGlobalValue('pesos_docmat',[{'estiba':row['estiba'], 'bruto':float(row['bruto']), 'tara':float(row['tara'])} for row in weighdata])
#separar los campos de fecha y hora
rowdata['fecha_recepcion'], rowdata['hora_recepcion'] = str(rowdata['fecha']).split(' ')
#armar paquete de pesos de los estibas como un nodo hijo para dibujar en el formulario html
rowdata['weighdata'] = weighdata
#Se le indica al formularo que debe actualizar
#no insertar porque ya existe al momento de que el usuario
#presione el boton de guardar
rowdata['method'] = 'PUT'
return template('entrada_material.html',**rowdata)
def guardar_peso_POST(**params):
pesos = utils.readGlobalValue('pesos_docmat')
if pesos:
pesos.append({'estiba': params.get('estiba',''), 'bruto': float(params.get('bruto','')),'tara': float(params.get('tara'))})
else:
pesos = [{'estiba': params.get('estiba',''), 'bruto': float(params.get('bruto','')),'tara': float(params.get('tara'))}]
utils.storeGlobalValue('pesos_docmat',pesos)
return str(float(params.get('bruto','')) - float(params.get('tara')))
def remover_peso_POST(**params):
pesos = utils.readGlobalValue('pesos_docmat')
para_borrar = json.loads(params.get('listado','[]'))
if pesos:
for item in para_borrar:
pesos = [pitem for pitem in pesos if pitem['estiba'] != item]
utils.storeGlobalValue('pesos_docmat',pesos)
return '""'
def entrada_material_POST(**params):
db = utils.ConnectDB()
pesos = utils.readGlobalValue('pesos_docmat')
if not pesos:
pesos = []
try:
otros_pesos_tara = float(params.get('otros_pesos_tara',''))
except Exception, e:
otros_pesos_tara = 0.0
cantidad_kg = sum([row['bruto'] - row['tara'] for row in pesos]) - otros_pesos_tara
db.execute("""INSERT INTO documentos_material(
fecha,
idasoc,
idmat,
cantidad_kg,
lote_material_proveedor,
factura_proveedor,
tipo_empaque,
dotacion_personal,
elementos_extranos,
est_unid_empaque,
libre_infest_plag,
insp_mat_prim,
insp_mat_color,
insp_mat_olor,
insp_mat_sabor,
insp_mat_elem_extra,
insp_mat_madurez,
insp_epm_color,
insp_epm_olor,
insp_epm_selle,
insp_epm_calibre,
insp_epm_elem_extra,
autor,
factura_recibida,
producto_aprobado,
cantidad_kg_factura,
precio_factura,
observaciones,
acciones_correctivas,
orden,
tipo_documento,
unidad,
sub_tipo_documento,
idalm,
otros_pesos_tara,
estado_vehiculo_piso,
estado_vehiculo_paredes,
estado_vehiculo_techo
)
VALUES(
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""",(
params.get('fecha_recepcion','1980-01-01') + ' ' + params.get('hora_recepcion','00:00:00'),
params.get('proveedor',''),
params.get('producto',''),
cantidad_kg,
params.get('lote_material_proveedor',''),
params.get('factura_proveedor',''),
params.get('tipo_empaque',''),
params.get('dotacion_personal',''),
params.get('elementos_extranos',''),
params.get('est_unid_empaque',''),
params.get('libre_infest_plag',''),
params.get('insp_mat_prim',''),
params.get('insp_mat_color',''),
params.get('insp_mat_olor',''),
params.get('insp_mat_sabor',''),
params.get('insp_mat_elem_extra',''),
params.get('insp_mat_madurez',''),
params.get('insp_epm_color',''),
params.get('insp_epm_olor',''),
params.get('insp_epm_selle',''),
params.get('insp_epm_calibre',''),
params.get('insp_epm_elem_extra',''),
utils.get_actual_user(),
params.get('factura_recibida',''),
params.get('producto_aprobado',''),
params.get('cantidad_kg_factura',''),
0.0 if params.get('precio_factura','0.0') == '' else float(params.get('precio_factura','0.0')),
params.get('observaciones',''),
params.get('acciones_correctivas',''),
params.get('orden',''),
params.get('tipo_documento',''),
params.get('unidad',''),
params.get('sub_tipo_documento',''),
params.get('idalm',''),
params.get('otros_pesos_tara',''),
params.get('estado_vehiculo_piso',''),
params.get('estado_vehiculo_paredes',''),
params.get('estado_vehiculo_techo',''),
)
)
db.execute("""SELECT LAST_INSERT_ID() as last
FROM documentos_material
WHERE docmat = LAST_INSERT_ID()""",())
rowdata = db.fetchone()
docmat = rowdata['last']
for item in pesos:
db.execute('INSERT INTO documentos_material_pesos (docmat, estiba, bruto, tara) VALUES (%s,%s,%s,%s)',
(docmat,item['estiba'],item['bruto'],item['tara']))
db.execute('COMMIT WORK')
try:
if params.get('tipo_documento','') != 'I':
LogicaNegocios.modificar_inventario_material(params.get('idalm',''), params.get('producto',''), cantidad_kg, 'kg')
else:
LogicaNegocios.modificar_inventario_material(params.get('idalm',''), params.get('producto',''), params.get('cantidad_kg_factura',''), 'kg')
except Exception, e:
abort(422,str(e))
return str(rowdata['last'])
def entrada_material_PUT(**params):
db = utils.ConnectDB()
pesos = utils.readGlobalValue('pesos_docmat')
if not pesos:
pesos = []
cantidad_kg = sum([row['bruto'] - row['tara'] for row in pesos]) - float(params.get('otros_pesos_tara',''))
db.execute('SELECT cantidad_kg, cantidad_kg_factura FROM documentos_material WHERE docmat = %s',
(params.get('docmat',''),)
)
cantidad_kg_antes, cantidad_kg_factura = db.fetchone().values()
try:
if params.get('tipo_documento','') != 'I':
LogicaNegocios.modificar_inventario_material(params.get('idalm',''), params.get('producto',''), cantidad_kg_antes * -1, 'kg')
else:
LogicaNegocios.modificar_inventario_material(params.get('idalm',''), params.get('producto',''), cantidad_kg_factura * -1, 'kg')
except Exception, e:
abort(422, str(e))
db.execute("""REPLACE INTO documentos_material(
docmat,
fecha,
idasoc,
idmat,
cantidad_kg,
lote_material_proveedor,
factura_proveedor,
tipo_empaque,
dotacion_personal,
estado_vehiculo,
elementos_extranos,
est_unid_empaque,
libre_infest_plag,
insp_mat_prim,
insp_mat_color,
insp_mat_olor,
insp_mat_sabor,
insp_mat_elem_extra,
insp_mat_madurez,
insp_epm_color,
insp_epm_olor,
insp_epm_selle,
insp_epm_calibre,
insp_epm_elem_extra,
autor,
factura_recibida,
producto_aprobado,
cantidad_kg_factura,
precio_factura,
observaciones,
acciones_correctivas,
orden,
tipo_documento,
unidad,
sub_tipo_documento,
idalm,
otros_pesos_tara
)
VALUES(
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""",
(params.get('docmat',''),
params.get('fecha_recepcion','1980-01-01') + ' ' + params.get('hora_recepcion','00:00:00'),
params.get('proveedor',''),
params.get('producto',''),
cantidad_kg,
params.get('lote_material_proveedor',''),
params.get('factura_proveedor',''),
params.get('tipo_empaque',''),
params.get('dotacion_personal',''),
params.get('estado_vehiculo',''),
params.get('elementos_extranos',''),
params.get('est_unid_empaque',''),
params.get('libre_infest_plag',''),
params.get('insp_mat_prim',''),
params.get('insp_mat_color',''),
params.get('insp_mat_olor',''),
params.get('insp_mat_sabor',''),
params.get('insp_mat_elem_extra',''),
params.get('insp_mat_madurez',''),
params.get('insp_epm_color',''),
params.get('insp_epm_olor',''),
params.get('insp_epm_selle',''),
params.get('insp_epm_calibre',''),
params.get('insp_epm_elem_extra',''),
utils.get_actual_user(),
params.get('factura_recibida',''),
params.get('producto_aprobado',''),
params.get('cantidad_kg_factura',''),
params.get('precio_factura',''),
params.get('observaciones',''),
params.get('acciones_correctivas',''),
params.get('orden',''),
params.get('tipo_documento',''),
params.get('unidad',''),
params.get('sub_tipo_documento',''),
params.get('idalm',''),
params.get('otros_pesos_tara',''),
)
)
db.execute("""DELETE FROM documentos_material_pesos WHERE docmat = %s""",(params.get('docmat',''),))
for item in pesos:
db.execute('INSERT INTO documentos_material_pesos (docmat, estiba, bruto, tara) VALUES (%s,%s,%s,%s)',
(params.get('docmat',''),item['estiba'],item['bruto'],item['tara']))
db.execute('COMMIT WORK')
if params.get('tipo_documento','') != 'I':
LogicaNegocios.modificar_inventario_material(params.get('idalm',''), params.get('producto',''), cantidad_kg, 'kg')
else:
LogicaNegocios.modificar_inventario_material(params.get('idalm',''), params.get('producto',''), cantidad_kg_factura, 'kg')
return str(params.get('docmat',''))
|
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from lxml import etree
from webob import exc
from nova.api.openstack.compute.contrib import instance_actions
from nova.compute import api as compute_api
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests import fake_server_actions
FAKE_UUID = fake_server_actions.FAKE_UUID
FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1
def format_action(action):
'''Remove keys that aren't serialized.'''
to_delete = ('id', 'finish_time', 'created_at', 'updated_at', 'deleted_at',
'deleted')
for key in to_delete:
if key in action:
del(action[key])
if 'start_time' in action:
# NOTE(danms): Without WSGI above us, these will be just stringified
action['start_time'] = str(action['start_time'].replace(tzinfo=None))
for event in action.get('events', []):
format_event(event)
return action
def format_event(event):
'''Remove keys that aren't serialized.'''
to_delete = ('id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
'action_id')
for key in to_delete:
if key in event:
del(event[key])
if 'start_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['start_time'] = str(event['start_time'].replace(tzinfo=None))
if 'finish_time' in event:
# NOTE(danms): Without WSGI above us, these will be just stringified
event['finish_time'] = str(event['finish_time'].replace(tzinfo=None))
return event
class InstanceActionsPolicyTest(test.NoDBTestCase):
def setUp(self):
super(InstanceActionsPolicyTest, self).setUp()
self.controller = instance_actions.InstanceActionsController()
def test_list_actions_restricted_by_project(self):
rules = {'compute:get': common_policy.parse_rule(''),
'compute_extension:instance_actions':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
self.assertRaises(exception.Forbidden, self.controller.index, req,
str(uuid.uuid4()))
def test_get_action_restricted_by_project(self):
rules = {'compute:get': common_policy.parse_rule(''),
'compute_extension:instance_actions':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1')
self.assertRaises(exception.Forbidden, self.controller.show, req,
str(uuid.uuid4()), '1')
class InstanceActionsTest(test.NoDBTestCase):
def setUp(self):
super(InstanceActionsTest, self).setUp()
self.controller = instance_actions.InstanceActionsController()
self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
return {'uuid': instance_uuid}
def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
return {'name': 'fake', 'project_id': context.project_id}
self.stubs.Set(compute_api.API, 'get', fake_get)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
def test_list_actions(self):
def fake_get_actions(context, uuid):
actions = []
for act in self.fake_actions[uuid].itervalues():
action = models.InstanceAction()
action.update(act)
actions.append(action)
return actions
self.stubs.Set(db, 'actions_get', fake_get_actions)
req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
res_dict = self.controller.index(req, FAKE_UUID)
for res in res_dict['instanceActions']:
fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
self.assertEqual(format_action(fake_action), format_action(res))
def test_get_action_with_events_allowed(self):
def fake_get_action(context, uuid, request_id):
action = models.InstanceAction()
action.update(self.fake_actions[uuid][request_id])
return action
def fake_get_events(context, action_id):
events = []
for evt in self.fake_events[action_id]:
event = models.InstanceActionEvent()
event.update(evt)
events.append(event)
return events
self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
self.stubs.Set(db, 'action_events_get', fake_get_events)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1',
use_admin_context=True)
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
fake_events = self.fake_events[fake_action['id']]
fake_action['events'] = fake_events
self.assertEqual(format_action(fake_action),
format_action(res_dict['instanceAction']))
def test_get_action_with_events_not_allowed(self):
def fake_get_action(context, uuid, request_id):
return self.fake_actions[uuid][request_id]
def fake_get_events(context, action_id):
return self.fake_events[action_id]
self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
self.stubs.Set(db, 'action_events_get', fake_get_events)
rules = {'compute:get': common_policy.parse_rule(''),
'compute_extension:instance_actions':
common_policy.parse_rule(''),
'compute_extension:instance_actions:events':
common_policy.parse_rule('is_admin:True')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1')
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
self.assertEqual(format_action(fake_action),
format_action(res_dict['instanceAction']))
def test_action_not_found(self):
def fake_no_action(context, uuid, action_id):
return None
self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/1')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, FAKE_REQUEST_ID)
def test_index_instance_not_found(self):
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=instance_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
self.assertRaises(exc.HTTPNotFound, self.controller.index, req,
FAKE_UUID)
def test_show_instance_not_found(self):
def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=instance_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = fakes.HTTPRequest.blank(
'/v2/123/servers/12/os-instance-actions/fake')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, 'fake')
class InstanceActionsSerializerTest(test.NoDBTestCase):
def setUp(self):
super(InstanceActionsSerializerTest, self).setUp()
self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
def _verify_instance_action_attachment(self, attach, tree):
for key in attach.keys():
if key != 'events':
self.assertEqual(attach[key], tree.get(key),
'%s did not match' % key)
def _verify_instance_action_event_attachment(self, attach, tree):
for key in attach.keys():
self.assertEqual(attach[key], tree.get(key),
'%s did not match' % key)
def test_instance_action_serializer(self):
serializer = instance_actions.InstanceActionTemplate()
action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
text = serializer.serialize({'instanceAction': action})
tree = etree.fromstring(text)
action = format_action(action)
self.assertEqual('instanceAction', tree.tag)
self._verify_instance_action_attachment(action, tree)
found_events = False
for child in tree:
if child.tag == 'events':
found_events = True
self.assertFalse(found_events)
def test_instance_action_events_serializer(self):
serializer = instance_actions.InstanceActionTemplate()
action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
event = self.fake_events[action['id']][0]
action['events'] = [dict(event), dict(event)]
text = serializer.serialize({'instanceAction': action})
tree = etree.fromstring(text)
action = format_action(action)
self.assertEqual('instanceAction', tree.tag)
self._verify_instance_action_attachment(action, tree)
event = format_event(event)
found_events = False
for child in tree:
if child.tag == 'events':
found_events = True
for key in event:
self.assertEqual(event[key], child.get(key))
self.assertTrue(found_events)
def test_instance_actions_serializer(self):
serializer = instance_actions.InstanceActionsTemplate()
action_list = self.fake_actions[FAKE_UUID].values()
text = serializer.serialize({'instanceActions': action_list})
tree = etree.fromstring(text)
action_list = [format_action(action) for action in action_list]
self.assertEqual('instanceActions', tree.tag)
self.assertEqual(len(action_list), len(tree))
for idx, child in enumerate(tree):
self.assertEqual('instanceAction', child.tag)
request_id = child.get('request_id')
self._verify_instance_action_attachment(
self.fake_actions[FAKE_UUID][request_id],
child)
|
|
from Visitor import VisitorTransform, CythonTransform
from ModuleNode import ModuleNode
from Nodes import *
from ExprNodes import *
from StringEncoding import EncodedString
from Errors import CompileError
from Code import UtilityCode
import Interpreter
import PyrexTypes
try:
set
except NameError:
from sets import Set as set
import textwrap
def dedent(text, reindent=0):
text = textwrap.dedent(text)
if reindent > 0:
indent = " " * reindent
text = '\n'.join([indent + x for x in text.split('\n')])
return text
class IntroduceBufferAuxiliaryVars(CythonTransform):
#
# Entry point
#
buffers_exists = False
def __call__(self, node):
assert isinstance(node, ModuleNode)
self.max_ndim = 0
result = super(IntroduceBufferAuxiliaryVars, self).__call__(node)
if self.buffers_exists:
use_py2_buffer_functions(node.scope)
use_empty_bufstruct_code(node.scope, self.max_ndim)
return result
#
# Basic operations for transforms
#
def handle_scope(self, node, scope):
# For all buffers, insert extra variables in the scope.
# The variables are also accessible from the buffer_info
# on the buffer entry
bufvars = [entry for name, entry
in scope.entries.iteritems()
if entry.type.is_buffer]
if len(bufvars) > 0:
self.buffers_exists = True
if isinstance(node, ModuleNode) and len(bufvars) > 0:
# for now...note that pos is wrong
raise CompileError(node.pos, "Buffer vars not allowed in module scope")
for entry in bufvars:
if entry.type.dtype.is_ptr:
raise CompileError(node.pos, "Buffers with pointer types not yet supported.")
name = entry.name
buftype = entry.type
if buftype.ndim > self.max_ndim:
self.max_ndim = buftype.ndim
# Declare auxiliary vars
cname = scope.mangle(Naming.bufstruct_prefix, name)
bufinfo = scope.declare_var(name="$%s" % cname, cname=cname,
type=PyrexTypes.c_py_buffer_type, pos=node.pos)
if entry.is_arg:
bufinfo.used = True # otherwise, NameNode will mark whether it is used
def var(prefix, idx, initval):
cname = scope.mangle(prefix, "%d_%s" % (idx, name))
result = scope.declare_var("$%s" % cname, PyrexTypes.c_py_ssize_t_type,
node.pos, cname=cname, is_cdef=True)
result.init = initval
if entry.is_arg:
result.used = True
return result
stridevars = [var(Naming.bufstride_prefix, i, "0") for i in range(entry.type.ndim)]
shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)]
mode = entry.type.mode
if mode == 'full':
suboffsetvars = [var(Naming.bufsuboffset_prefix, i, "-1") for i in range(entry.type.ndim)]
else:
suboffsetvars = None
entry.buffer_aux = Symtab.BufferAux(bufinfo, stridevars, shapevars, suboffsetvars)
scope.buffer_entries = bufvars
self.scope = scope
def visit_ModuleNode(self, node):
self.handle_scope(node, node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.handle_scope(node, node.local_scope)
self.visitchildren(node)
return node
#
# Analysis
#
buffer_options = ("dtype", "ndim", "mode", "negative_indices", "cast") # ordered!
buffer_defaults = {"ndim": 1, "mode": "full", "negative_indices": True, "cast": False}
buffer_positional_options_count = 1 # anything beyond this needs keyword argument
ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option'
ERR_BUF_TOO_MANY = 'Too many buffer options'
ERR_BUF_DUP = '"%s" buffer option already supplied'
ERR_BUF_MISSING = '"%s" missing'
ERR_BUF_MODE = 'Only allowed buffer modes are: "c", "fortran", "full", "strided" (as a compile-time string)'
ERR_BUF_NDIM = 'ndim must be a non-negative integer'
ERR_BUF_DTYPE = 'dtype must be "object", numeric type or a struct'
ERR_BUF_BOOL = '"%s" must be a boolean'
def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, need_complete=True):
"""
Must be called during type analysis, as analyse is called
on the dtype argument.
posargs and dictargs should consist of a list and a dict
of tuples (value, pos). Defaults should be a dict of values.
Returns a dict containing all the options a buffer can have and
its value (with the positions stripped).
"""
if defaults is None:
defaults = buffer_defaults
posargs, dictargs = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env, type_args = (0,'dtype'))
if len(posargs) > buffer_positional_options_count:
raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
options = {}
for name, (value, pos) in dictargs.iteritems():
if not name in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
options[name] = value
for name, (value, pos) in zip(buffer_options, posargs):
if not name in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
if name in options:
raise CompileError(pos, ERR_BUF_DUP % name)
options[name] = value
# Check that they are all there and copy defaults
for name in buffer_options:
if not name in options:
try:
options[name] = defaults[name]
except KeyError:
if need_complete:
raise CompileError(globalpos, ERR_BUF_MISSING % name)
dtype = options.get("dtype")
if dtype and dtype.is_extension_type:
raise CompileError(globalpos, ERR_BUF_DTYPE)
ndim = options.get("ndim")
if ndim and (not isinstance(ndim, int) or ndim < 0):
raise CompileError(globalpos, ERR_BUF_NDIM)
mode = options.get("mode")
if mode and not (mode in ('full', 'strided', 'c', 'fortran')):
raise CompileError(globalpos, ERR_BUF_MODE)
def assert_bool(name):
x = options.get(name)
if not isinstance(x, bool):
raise CompileError(globalpos, ERR_BUF_BOOL % name)
assert_bool('negative_indices')
assert_bool('cast')
return options
#
# Code generation
#
def get_flags(buffer_aux, buffer_type):
flags = 'PyBUF_FORMAT'
mode = buffer_type.mode
if mode == 'full':
flags += '| PyBUF_INDIRECT'
elif mode == 'strided':
flags += '| PyBUF_STRIDES'
elif mode == 'c':
flags += '| PyBUF_C_CONTIGUOUS'
elif mode == 'fortran':
flags += '| PyBUF_F_CONTIGUOUS'
else:
assert False
if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
return flags
def used_buffer_aux_vars(entry):
buffer_aux = entry.buffer_aux
buffer_aux.buffer_info_var.used = True
for s in buffer_aux.shapevars: s.used = True
for s in buffer_aux.stridevars: s.used = True
if buffer_aux.suboffsetvars:
for s in buffer_aux.suboffsetvars: s.used = True
def put_unpack_buffer_aux_into_scope(buffer_aux, mode, code):
# Generate code to copy the needed struct info into local
# variables.
bufstruct = buffer_aux.buffer_info_var.cname
varspec = [("strides", buffer_aux.stridevars),
("shape", buffer_aux.shapevars)]
if mode == 'full':
varspec.append(("suboffsets", buffer_aux.suboffsetvars))
for field, vars in varspec:
code.putln(" ".join(["%s = %s.%s[%d];" %
(s.cname, bufstruct, field, idx)
for idx, s in enumerate(vars)]))
def put_acquire_arg_buffer(entry, code, pos):
code.globalstate.use_utility_code(acquire_utility_code)
buffer_aux = entry.buffer_aux
getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
# Acquire any new buffer
code.putln("{")
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}")
# An exception raised in arg parsing cannot be catched, so no
# need to care about the buffer then.
put_unpack_buffer_aux_into_scope(buffer_aux, entry.type.mode, code)
def put_release_buffer_code(code, entry):
code.globalstate.use_utility_code(acquire_utility_code)
code.putln("__Pyx_SafeReleaseBuffer(&%s);" % entry.buffer_aux.buffer_info_var.cname)
def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
ndim = buffer_type.ndim
cast = int(buffer_type.cast)
flags = get_flags(buffer_aux, buffer_type)
bufstruct = buffer_aux.buffer_info_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
return ("__Pyx_GetBufferAndValidate(&%(bufstruct)s, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
"%(cast)d, __pyx_stack)" % locals())
def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
is_initialized, pos, code):
"""
Generate code for reassigning a buffer variables. This only deals with getting
the buffer auxiliary structure and variables set up correctly, the assignment
itself and refcounting is the responsibility of the caller.
However, the assignment operation may throw an exception so that the reassignment
never happens.
Depending on the circumstances there are two possible outcomes:
- Old buffer released, new acquired, rhs assigned to lhs
- Old buffer released, new acquired which fails, reaqcuire old lhs buffer
(which may or may not succeed).
"""
code.globalstate.use_utility_code(acquire_utility_code)
bufstruct = buffer_aux.buffer_info_var.cname
flags = get_flags(buffer_aux, buffer_type)
code.putln("{") # Set up necesarry stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
if is_initialized:
# Release any existing buffer
code.putln('__Pyx_SafeReleaseBuffer(&%s);' % bufstruct)
# Acquire
retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname))
code.putln('if (%s) {' % (code.unlikely("%s < 0" % retcode_cname)))
# If acquisition failed, attempt to reacquire the old buffer
# before raising the exception. A failure of reacquisition
# will cause the reacquisition exception to be reported, one
# can consider working around this later.
type, value, tb = [code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
for i in range(3)]
code.putln('PyErr_Fetch(&%s, &%s, &%s);' % (type, value, tb))
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname)))
code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % (type, value, tb)) # Do not refnanny these!
code.globalstate.use_utility_code(raise_buffer_fallback_code)
code.putln('__Pyx_RaiseBufferFallbackError();')
code.putln('} else {')
code.putln('PyErr_Restore(%s, %s, %s);' % (type, value, tb))
for t in (type, value, tb):
code.funcstate.release_temp(t)
code.putln('}')
code.putln('}')
# Unpack indices
put_unpack_buffer_aux_into_scope(buffer_aux, buffer_type.mode, code)
code.putln(code.error_goto_if_neg(retcode_cname, pos))
code.funcstate.release_temp(retcode_cname)
else:
# Our entry had no previous value, so set to None when acquisition fails.
# In this case, auxiliary vars should be set up right in initialization to a zero-buffer,
# so it suffices to set the buf field to NULL.
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname)))
code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.buf = NULL;' %
(lhs_cname,
PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"),
bufstruct))
code.putln(code.error_goto(pos))
code.put('} else {')
# Unpack indices
put_unpack_buffer_aux_into_scope(buffer_aux, buffer_type.mode, code)
code.putln('}')
code.putln("}") # Release stack
def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos, code):
"""
Generates code to process indices and calculate an offset into
a buffer. Returns a C string which gives a pointer which can be
read from or written to at will (it is an expression so caller should
store it in a temporary if it is used more than once).
As the bounds checking can have any number of combinations of unsigned
arguments, smart optimizations etc. we insert it directly in the function
body. The lookup however is delegated to a inline function that is instantiated
once per ndim (lookup with suboffsets tend to get quite complicated).
"""
bufaux = entry.buffer_aux
bufstruct = bufaux.buffer_info_var.cname
negative_indices = directives['wraparound'] and entry.type.negative_indices
if directives['boundscheck']:
# Check bounds and fix negative indices.
# We allocate a temporary which is initialized to -1, meaning OK (!).
# If an error occurs, the temp is set to the dimension index the
# error is occuring at.
tmp_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = -1;" % tmp_cname)
for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames,
bufaux.shapevars)):
if signed != 0:
# not unsigned, deal with negative index
code.putln("if (%s < 0) {" % cname)
if negative_indices:
code.putln("%s += %s;" % (cname, shape.cname))
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s < 0" % cname), tmp_cname, dim))
else:
code.putln("%s = %d;" % (tmp_cname, dim))
code.put("} else ")
# check bounds in positive direction
if signed != 0:
cast = ""
else:
cast = "(size_t)"
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s >= %s%s" % (cname, cast, shape.cname)),
tmp_cname, dim))
code.globalstate.use_utility_code(raise_indexerror_code)
code.putln("if (%s) {" % code.unlikely("%s != -1" % tmp_cname))
code.putln('__Pyx_RaiseBufferIndexError(%s);' % tmp_cname)
code.putln(code.error_goto(pos))
code.putln('}')
code.funcstate.release_temp(tmp_cname)
elif negative_indices:
# Only fix negative indices.
for signed, cname, shape in zip(index_signeds, index_cnames,
bufaux.shapevars):
if signed != 0:
code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape.cname))
# Create buffer lookup and return it
# This is done via utility macros/inline functions, which vary
# according to the access mode used.
params = []
nd = entry.type.ndim
mode = entry.type.mode
if mode == 'full':
for i, s, o in zip(index_cnames, bufaux.stridevars, bufaux.suboffsetvars):
params.append(i)
params.append(s.cname)
params.append(o.cname)
funcname = "__Pyx_BufPtrFull%dd" % nd
funcgen = buf_lookup_full_code
else:
if mode == 'strided':
funcname = "__Pyx_BufPtrStrided%dd" % nd
funcgen = buf_lookup_strided_code
elif mode == 'c':
funcname = "__Pyx_BufPtrCContig%dd" % nd
funcgen = buf_lookup_c_code
elif mode == 'fortran':
funcname = "__Pyx_BufPtrFortranContig%dd" % nd
funcgen = buf_lookup_fortran_code
else:
assert False
for i, s in zip(index_cnames, bufaux.stridevars):
params.append(i)
params.append(s.cname)
# Make sure the utility code is available
if funcname not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(funcname)
protocode = code.globalstate['utility_code_proto']
defcode = code.globalstate['utility_code_def']
funcgen(protocode, defcode, name=funcname, nd=nd)
ptr_type = entry.type.buffer_ptr_type
ptrcode = "%s(%s, %s.buf, %s)" % (funcname,
ptr_type.declaration_code(""),
bufstruct,
", ".join(params))
return ptrcode
def use_empty_bufstruct_code(env, max_ndim):
code = dedent("""
Py_ssize_t __Pyx_zeros[] = {%s};
Py_ssize_t __Pyx_minusones[] = {%s};
""") % (", ".join(["0"] * max_ndim), ", ".join(["-1"] * max_ndim))
env.use_utility_code(UtilityCode(proto=code))
def buf_lookup_full_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride, sub_o_ffset
macroargs = ", ".join(["i%d, s%d, o%d" % (i, i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)(%s_imp(buf, %s))" % (name, macroargs, name, macroargs))
funcargs = ", ".join(["Py_ssize_t i%d, Py_ssize_t s%d, Py_ssize_t o%d" % (i, i, i) for i in range(nd)])
proto.putln("static CYTHON_INLINE void* %s_imp(void* buf, %s);" % (name, funcargs))
defin.putln(dedent("""
static CYTHON_INLINE void* %s_imp(void* buf, %s) {
char* ptr = (char*)buf;
""") % (name, funcargs) + "".join([dedent("""\
ptr += s%d * i%d;
if (o%d >= 0) ptr = *((char**)ptr) + o%d;
""") % (i, i, i, i) for i in range(nd)]
) + "\nreturn ptr;\n}")
def buf_lookup_strided_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset))
def buf_lookup_c_code(proto, defin, name, nd):
"""
Similar to strided lookup, but can assume that the last dimension
doesn't need a multiplication as long as.
Still we keep the same signature for now.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1))
def buf_lookup_fortran_code(proto, defin, name, nd):
"""
Like C lookup, but the first index is optimized instead.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(1, nd)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, 0))
def use_py2_buffer_functions(env):
# Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2.
# For >= 2.6 we do double mode -- use the new buffer interface on objects
# which has the right tp_flags set, but emulation otherwise.
# Search all types for __getbuffer__ overloads
types = []
visited_scopes = set()
def find_buffer_types(scope):
if scope in visited_scopes:
return
visited_scopes.add(scope)
for m in scope.cimported_modules:
find_buffer_types(m)
for e in scope.type_entries:
t = e.type
if t.is_extension_type:
release = get = None
for x in t.scope.pyfunc_entries:
if x.name == u"__getbuffer__": get = x.func_cname
elif x.name == u"__releasebuffer__": release = x.func_cname
if get:
types.append((t.typeptr_cname, get, release))
find_buffer_types(env)
code = dedent("""
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
#if PY_VERSION_HEX >= 0x02060000
if (Py_TYPE(obj)->tp_flags & Py_TPFLAGS_HAVE_NEWBUFFER)
return PyObject_GetBuffer(obj, view, flags);
#endif
""")
if len(types) > 0:
clause = "if"
for t, get, release in types:
code += " %s (PyObject_TypeCheck(obj, %s)) return %s(obj, view, flags);\n" % (clause, t, get)
clause = "else if"
code += " else {\n"
code += dedent("""\
PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
""", 2)
if len(types) > 0: code += " }"
code += dedent("""
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject* obj = view->obj;
if (obj) {
""")
if len(types) > 0:
clause = "if"
for t, get, release in types:
if release:
code += "%s (PyObject_TypeCheck(obj, %s)) %s(obj, view);" % (clause, t, release)
clause = "else if"
code += dedent("""
Py_DECREF(obj);
view->obj = NULL;
}
}
#endif
""")
env.use_utility_code(UtilityCode(
proto = dedent("""\
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
"""), impl = code))
def mangle_dtype_name(dtype):
# Use prefixes to seperate user defined types from builtins
# (consider "typedef float unsigned_int")
if dtype.is_pyobject:
return "object"
elif dtype.is_ptr:
return "ptr"
else:
if dtype.is_typedef or dtype.is_struct_or_union:
prefix = "nn_"
else:
prefix = ""
return prefix + dtype.declaration_code("").replace(" ", "_")
def get_type_information_cname(code, dtype, maxdepth=None):
# Output the run-time type information (__Pyx_TypeInfo) for given dtype,
# and return the name of the type info struct.
#
# Structs with two floats of the same size are encoded as complex numbers.
# One can seperate between complex numbers declared as struct or with native
# encoding by inspecting to see if the fields field of the type is
# filled in.
namesuffix = mangle_dtype_name(dtype)
name = "__Pyx_TypeInfo_%s" % namesuffix
structinfo_name = "__Pyx_StructFields_%s" % namesuffix
if dtype.is_error: return "<error>"
# It's critical that walking the type info doesn't use more stack
# depth than dtype.struct_nesting_depth() returns, so use an assertion for this
if maxdepth is None: maxdepth = dtype.struct_nesting_depth()
if maxdepth <= 0:
assert False
if name not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(name)
typecode = code.globalstate['typeinfo']
complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
declcode = dtype.declaration_code("")
if dtype.is_simple_buffer_dtype():
structinfo_name = "NULL"
elif dtype.is_struct:
fields = dtype.scope.var_entries
# Must pre-call all used types in order not to recurse utility code
# writing.
assert len(fields) > 0
types = [get_type_information_cname(code, f.type, maxdepth - 1)
for f in fields]
typecode.putln("static __Pyx_StructField %s[] = {" % structinfo_name, safe=True)
for f, typeinfo in zip(fields, types):
typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' %
(typeinfo, f.name, dtype.declaration_code(""), f.cname), safe=True)
typecode.putln(' {NULL, NULL, 0}', safe=True)
typecode.putln("};", safe=True)
else:
assert False
rep = str(dtype)
if dtype.is_int:
if dtype.signed == 0:
typegroup = 'U'
else:
typegroup = 'I'
elif complex_possible or dtype.is_complex:
typegroup = 'C'
elif dtype.is_float:
typegroup = 'R'
elif dtype.is_struct:
typegroup = 'S'
elif dtype.is_pyobject:
typegroup = 'O'
else:
print dtype
assert False
typecode.putln(('static __Pyx_TypeInfo %s = { "%s", %s, sizeof(%s), \'%s\' };'
) % (name,
rep,
structinfo_name,
declcode,
typegroup,
), safe=True)
return name
# Utility function to set the right exception
# The caller should immediately goto_error
raise_indexerror_code = UtilityCode(
proto = """\
static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/
""",
impl = """\
static void __Pyx_RaiseBufferIndexError(int axis) {
PyErr_Format(PyExc_IndexError,
"Out of bounds on buffer access (axis %d)", axis);
}
""")
parse_typestring_repeat_code = UtilityCode(
proto = """
""",
impl = """
""")
raise_buffer_fallback_code = UtilityCode(
proto = """
static void __Pyx_RaiseBufferFallbackError(void); /*proto*/
""",
impl = """
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_Format(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
""")
#
# Buffer format string checking
#
# Buffer type checking. Utility code for checking that acquired
# buffers match our assumptions. We only need to check ndim and
# the format string; the access mode/flags is checked by the
# exporter.
#
# The alignment code is copied from _struct.c in Python.
acquire_utility_code = UtilityCode(proto="""
/* Run-time type information about structs used with buffers */
struct __Pyx_StructField_;
typedef struct {
const char* name; /* for error messages only */
struct __Pyx_StructField_* fields;
size_t size; /* sizeof(type) */
char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
""", impl="""
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
int new_count, enc_count;
int is_complex;
char enc_type;
char packmode;
} __Pyx_BufFmt_Context;
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
char msg[] = {ch, 0};
PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%s'", msg);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'b': return "'char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_s_long_long;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_s_long_long) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U';
case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R');
case 'O': return 'O';
case 'P': return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset;
if (ctx->enc_type == 0) return 0;
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->packmode == '@' || ctx->packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->packmode == '@') {
int align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
int align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
/* special case -- treat as struct rather than complex number */
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d "
"but %"PY_FORMAT_SIZE_T"d expected", ctx->fmt_offset, offset);
return -1;
}
ctx->fmt_offset += size;
--ctx->enc_count; /* Consume from buffer string */
/* Done checking, move to next field, pushing or popping struct stack if needed */
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break; /* breaks both loops as ctx->enc_count == 0 */
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue; /* empty struct */
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static int __Pyx_BufFmt_FirstPack(__Pyx_BufFmt_Context* ctx) {
if (ctx->enc_type != 0 || ctx->packmode != '@') {
PyErr_SetString(PyExc_ValueError, "Buffer packing mode currently only allowed at beginning of format string (this is a defect)");
return -1;
}
return 0;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case 10:
case 13:
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
if (__Pyx_BufFmt_FirstPack(ctx) == -1) return NULL;
ctx->packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
if (__Pyx_BufFmt_FirstPack(ctx) == -1) return NULL;
ctx->packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
if (__Pyx_BufFmt_FirstPack(ctx) == -1) return NULL;
ctx->packmode = *ts++;
break;
case 'T': /* substruct */
{
int i;
const char* ts_after_sub;
int struct_count = ctx->new_count;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
}
break;
case '}': /* end of substruct; either repeat or move on */
++ts;
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
} /* fall through */
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex) {
/* Continue pooling same type */
ctx->enc_count += ctx->new_count;
} else {
/* New type */
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
}
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
default:
{
ctx->new_count = __Pyx_BufFmt_ParseNumber(&ts);
if (ctx->new_count == -1) { /* First char was not a digit */
char msg[2] = { *ts, 0 };
PyErr_Format(PyExc_ValueError,
"Does not understand character buffer dtype format string ('%s')", msg);
return NULL;
}
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) {
if (obj == Py_None) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
""")
|
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_role
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower role.
description:
- Create, update, or destroy Ansible Tower roles. See
U(https://www.ansible.com/tower) for an overview.
options:
user:
description:
- User that receives the permissions specified by the role.
required: False
default: null
team:
description:
- Team that receives the permissions specified by the role.
required: False
default: null
role:
description:
- The role type to grant/revoke.
required: True
choices: ["admin", "read", "member", "execute", "adhoc", "update", "use", "auditor"]
target_team:
description:
- Team that the role acts on.
required: False
default: null
inventory:
description:
- Inventory the role acts on.
required: False
default: null
job_template:
description:
- The job_template the role acts on.
required: False
default: null
credential:
description:
- Credential the role acts on.
required: False
default: null
organization:
description:
- Organiation the role acts on.
required: False
default: null
project:
description:
- Project the role acts on.
required: False
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add jdoe to the member role of My Team
tower_role:
user: jdoe
target_team: "My Team"
role: member
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def update_resources(module, p):
'''update_resources attempts to fetch any of the resources given
by name using their unique field (identity)
'''
params = p.copy()
identity_map = {
'user': 'username',
'team': 'name',
'target_team': 'name',
'inventory': 'name',
'job_template': 'name',
'credential': 'name',
'organization': 'name',
'project': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'team' if k == 'target_team' else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update role, {0} not found: {1}'.format(k, excinfo), changed=False)
return params
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(),
team=dict(),
role=dict(choices=["admin", "read", "member", "execute", "adhoc", "update", "use", "auditor"]),
target_team=dict(),
inventory=dict(),
job_template=dict(),
credential=dict(),
organization=dict(),
project=dict(),
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
state=dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
role_type = module.params.pop('role')
state = module.params.get('state')
json_output = {'role': role_type, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
role = tower_cli.get_resource('role')
params = update_resources(module, module.params)
params['type'] = role_type
try:
if state == 'present':
result = role.grant(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = role.revoke(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update role: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that triggers and waits for tasks on android-compile.skia.org"""
import base64
import hashlib
import json
import math
import optparse
import os
import subprocess
import sys
import time
INFRA_BOTS_DIR = os.path.abspath(os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir)))
sys.path.insert(0, INFRA_BOTS_DIR)
import git_utils
import utils
ANDROID_COMPILE_BUCKET = 'android-compile-tasks'
GS_RETRIES = 5
GS_RETRY_WAIT_BASE = 15
POLLING_FREQUENCY_SECS = 10
DEADLINE_SECS = 2* 60 * 60 # 2 hours.
INFRA_FAILURE_ERROR_MSG = (
'\n\n'
'Your run failed due to unknown infrastructure failures.\n'
'Please contact rmistry@ or the trooper from '
'http://skia-tree-status.appspot.com/trooper\n'
'Sorry for the inconvenience!\n'
)
class AndroidCompileException(Exception):
pass
def _create_task_dict(options):
"""Creates a dict representation of the requested task."""
params = {}
params['lunch_target'] = options.lunch_target
params['mmma_targets'] = options.mmma_targets
params['issue'] = options.issue
params['patchset'] = options.patchset
params['hash'] = options.hash
return params
def _get_gs_bucket():
"""Returns the Google storage bucket with the gs:// prefix."""
return 'gs://%s' % ANDROID_COMPILE_BUCKET
def _write_to_storage(task):
"""Writes the specified compile task to Google storage."""
with utils.tmp_dir():
json_file = os.path.join(os.getcwd(), _get_task_file_name(task))
with open(json_file, 'w') as f:
json.dump(task, f)
subprocess.check_call(['gsutil', 'cp', json_file, '%s/' % _get_gs_bucket()])
print 'Created %s/%s' % (_get_gs_bucket(), os.path.basename(json_file))
def _get_task_file_name(task):
"""Returns the file name of the compile task. Eg: ${issue}-${patchset}.json"""
return '%s-%s-%s.json' % (task['lunch_target'], task['issue'],
task['patchset'])
# Checks to see if task already exists in Google storage.
# If the task has completed then the Google storage file is deleted.
def _does_task_exist_in_storage(task):
"""Checks to see if the corresponding file of the task exists in storage.
If the file exists and the task has already completed then the storage file is
deleted and False is returned.
"""
gs_file = '%s/%s' % (_get_gs_bucket(), _get_task_file_name(task))
try:
output = subprocess.check_output(['gsutil', 'cat', gs_file])
except subprocess.CalledProcessError:
print 'Task does not exist in Google storage'
return False
taskJSON = json.loads(output)
if taskJSON.get('done'):
print 'Task exists in Google storage and has completed.'
print 'Deleting it so that a new run can be scheduled.'
subprocess.check_call(['gsutil', 'rm', gs_file])
return False
else:
print 'Tasks exists in Google storage and is still running.'
return True
def _trigger_task(options):
"""Triggers a task on the compile server by creating a file in storage."""
task = _create_task_dict(options)
# Check to see if file already exists in Google Storage.
if not _does_task_exist_in_storage(task):
_write_to_storage(task)
return task
def _add_cl_comment(issue, comment):
# Depot tools needs a checkout to use "git cl" even though we are just adding
# a comment to a change unrelated to the checkout.
with git_utils.NewGitCheckout(repository=utils.SKIA_REPO) as checkout:
add_comment_cmd = ['git', 'cl', 'comments', '-i', str(issue), '-a', comment]
subprocess.check_call(add_comment_cmd)
def trigger_and_wait(options):
"""Triggers a task on the compile server and waits for it to complete."""
task = _trigger_task(options)
print 'Android Compile Task for %d/%d has been successfully added to %s.' % (
options.issue, options.patchset, ANDROID_COMPILE_BUCKET)
print '%s will be polled every %d seconds.' % (ANDROID_COMPILE_BUCKET,
POLLING_FREQUENCY_SECS)
# Now poll the Google storage file till the task completes or till deadline
# is hit.
time_started_polling = time.time()
while True:
if (time.time() - time_started_polling) > DEADLINE_SECS:
raise AndroidCompileException(
'Task did not complete in the deadline of %s seconds.' % (
DEADLINE_SECS))
# Get the status of the task.
gs_file = '%s/%s' % (_get_gs_bucket(), _get_task_file_name(task))
for retry in range(GS_RETRIES):
try:
output = subprocess.check_output(['gsutil', 'cat', gs_file])
ret = json.loads(output)
break
except (ValueError, subprocess.CalledProcessError) as e:
if e.__class__ == ValueError:
print ('Received output "%s" that could not be converted to '
'json: %s' % (output, e))
elif e.__class__ == subprocess.CalledProcessError:
print e
if retry == (GS_RETRIES-1):
print '%d retries did not help' % GS_RETRIES
raise
waittime = GS_RETRY_WAIT_BASE * math.pow(2, retry)
print 'Retry in %d seconds.' % waittime
time.sleep(waittime)
if ret.get('infra_failure'):
if ret.get('error'):
raise AndroidCompileException('Run failed with:\n\n%s\n' % ret['error'])
else:
# Use a general purpose error message.
raise AndroidCompileException(INFRA_FAILURE_ERROR_MSG)
if ret.get('done'):
if not ret.get('is_master_branch', True):
print 'The Android Framework Compile bot only works for patches and'
print 'hashes from the master branch.'
return 0
elif ret['withpatch_success']:
print 'Your run was successfully completed.'
print 'With patch logs are here: %s' % ret['withpatch_log']
return 0
elif ret['nopatch_success']:
raise AndroidCompileException('The build with the patch failed and the '
'build without the patch succeeded. This means that the patch '
'causes Android to fail compilation.\n\n'
'With patch logs are here: %s\n\n'
'No patch logs are here: %s\n\n'
'You can force sync of the checkout if needed here: %s\n\n' % (
ret['withpatch_log'], ret['nopatch_log'],
'https://skia-android-compile.corp.goog/'))
else:
msg = ('FYI: Both with patch and no patch builds of the %s bot '
'failed.\nThis could mean that the Android tree is currently '
'broken and infra is investigating.\nMarking this bot as '
'successful to not block the CQ.\n\n'
'With patch logs are here: %s\n'
'No patch logs are here: %s\n\n') % (
options.builder_name, ret['withpatch_log'],
ret['nopatch_log'])
_add_cl_comment(task['issue'], msg)
print msg
return 0
# Print status of the task.
print 'Task: %s\n' % pretty_task_str(ret)
time.sleep(POLLING_FREQUENCY_SECS)
def pretty_task_str(task):
status = 'Not picked up by server yet'
if task.get('task_id'):
status = 'Running withpatch compilation'
if task.get('withpatch_log'):
status = 'Running nopatch compilation'
return '[id: %s, checkout: %s, status: %s]' % (
task.get('task_id'), task.get('checkout'), status)
def main():
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--lunch_target', type=str, default='',
help='The lunch target the android compile bot should build with.')
option_parser.add_option(
'', '--mmma_targets', type=str, default='',
help='The comma-separated mmma targets the android compile bot should '
'build.')
option_parser.add_option(
'', '--issue', type=int, default=0,
help='The Gerrit change number to get the patch from.')
option_parser.add_option(
'', '--patchset', type=int, default=0,
help='The Gerrit change patchset to use.')
option_parser.add_option(
'', '--hash', type=str, default='',
help='The Skia repo hash to compile against.')
option_parser.add_option(
'', '--builder_name', type=str, default='',
help='The builder that triggered this run.')
options, _ = option_parser.parse_args()
sys.exit(trigger_and_wait(options))
if __name__ == '__main__':
main()
|
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
class Confusion(object):
'''Confusion matrix class including incremental confusion computation.
Instances of this class can be used to compute the confusion matrix
and other typical scores for semantic segmentation problems. Either
incrementally or in one call. All labels should be positive integers.
With the exception of a negative void label. Methods for plotting and
printing are included.
'''
def __init__(self, label_names, void_label=-1, label_count=None):
'''Inits a Confusion matrix with label names and the void label.
Parameters
----------
label_names : list of strings or None
A list of all label names. The void label name should not be
included
void_label : int (default: -1)
This label will be ignored. It has to be negative.
label_count : int or None (default: None)
If label_names is None, this will be used to define the shape of
the confusion matrix.
Raises
------
ValueError
When both `label_names` and `label_count` is ``None``, or if
`void_label` is positive, a `ValueError` is raised.
'''
if label_names is not None:
self.label_names = (np.array(label_names).copy()).tolist()
else:
if label_count is None:
raise ValueError('Either label_names or label_count has to be '
'specified.')
else:
self.label_names = [str(i) for i in range(label_count)]
if void_label >= 0:
raise ValueError('The void label needs to be a negative number.')
else:
self.void_label = void_label
self.class_count= len(self.label_names)
self.reset()
def reset(self):
'''Reset all values to allow for a fresh computation.
'''
self.confusion = np.zeros((self.class_count,self.class_count), np.int64)
self.confusion_normalized_row = None
self.confusion_normalized_col = None
self.global_score = 0
self.avg_score = 0
self.avg_iou_score = 0
self.finished_computation = False
def finish(self):
'''Computes all scores given the accumulated data.
'''
total = np.sum(self.confusion)
self.gt_sum_per_class = np.sum(self.confusion, 1)
self.sum_per_class = np.sum(self.confusion, 0)
self.global_score = np.sum(np.diag(self.confusion))/total
diag = np.diag(self.confusion)
union = self.gt_sum_per_class + self.sum_per_class - diag
self.avg_score = np.nanmean(diag/self.gt_sum_per_class)
self.avg_iou_score = np.nanmean(diag/union)
self.confusion_normalized_row = (
self.confusion.copy().T/self.gt_sum_per_class.astype(np.float32)).T
self.confusion_normalized_col = (
self.confusion.copy()/self.sum_per_class.astype(np.float32))
self.finished_computation = True
def incremental_update(self, gt, pred, allow_void_prediction=False,
update_finished=True):
'''Update the confusion matrix with the provided data.
Given the ground truth and predictions the stored confusion matrix is
updated. If all scores have been computed before they become invalid
after this operation and need to be recomputed. Updates can be done
with a single image, a batch, or the complete dataset at once.
gt : np.ndarray
The ground truth image(s). Either a single image (WxH) or a tensor
of several images (BxWxH).
pred : np.ndarray
The prediction image(s). Either a single image (WxH) or a tensor
of several images (BxWxH). Needs the same shape as gt.
allow_void_prediction : bool (default: False)
Specifies if void predictions are allowed or not. Typically this is
not desired and an exception is raised when predictions have void
labels. When set to True, these labels are ignored during the
computation.
update_finished : bool (default: True)
When set to False this method raise an exception if scores have
been computed before. If left at True, nothing happens.
Raises
------
ValueError
When `gt` and `pred` don't have matching shapes, when the labels
are too large, or when `pred` contains void labels and
`allow_void_prediction` is set to False a `ValueError` is raised.
Exception
When `update_finished` is set to false and this method is called
after the the scores have been computed an `Exception` is raised.
'''
if gt.shape != pred.shape:
raise ValueError('Groundtruth and prediction shape missmatch')
if not allow_void_prediction and self.void_label in pred:
raise ValueError('Void labels found in the predictions. Fix the '
'predictions, or set `allow_void_prediction` to '
'True.')
if np.max(gt) >= self.class_count:
raise ValueError('Labels in the groundturh exceed the class count.')
if np.max(pred) >= self.class_count:
raise ValueError('Labels in the prediction exceed the class count.')
if self.finished_computation and not update_finished:
raise Exception('You specified not to allow updates after computing'
' scores.')
gt_flat = gt.flatten().astype(np.int32)
pred_flat = pred.flatten().astype(np.int32)
non_void = gt_flat != self.void_label
if allow_void_prediction:
non_void *= pred_flat != self.void_label
gt_flat = gt_flat[non_void]
pred_flat = pred_flat[non_void]
pairs = gt_flat*self.class_count + pred_flat
pairs, pair_counts = np.unique(pairs, return_counts=True)
self.confusion.flat[pairs] += pair_counts
self.finished_computation = False
def plot(self, colormap=None, number_format=None, only_return_fig=False):
'''Create and plot a figure summarizing all information.
colormap : mpl.cm (default: None)
The colormap used to colorize the matrices. None results in mpl
default to be used.
number_format : string or None (default: None)
The format used to print percentages into the confusion matrix. When
not provided the numbers are not printed. For example `{0:>7.2%}`.
only_return_fig : bool (default: False)
When set to true the figure is only returned. For example for saving
the figure or when using this outside of jupyter notebooks.
'''
#Compute the values in case this has not been done yet.
if not self.finished_computation:
self.finish()
#Setup the plots
fig, ax = plt.subplots(1,2, figsize=(15,5.5), sharey=True, sharex=True)
#Show the confusion matrices
ax[0].imshow(self.confusion_normalized_row*100, interpolation='nearest',
cmap=colormap, vmin=0, vmax=100, aspect='auto')
im = ax[1].imshow(
self.confusion_normalized_col*100, interpolation='nearest',
cmap=colormap, vmin=0, vmax=100, aspect='auto')
#Make a colorbar
cax,kw = mpl.colorbar.make_axes([a for a in ax.flat])
plt.colorbar(im, cax=cax, **kw)
ax[0].set_yticks(range(self.class_count))
ax[0].set_xticks(range(self.class_count))
#Possibly add the numbers
if number_format is not None:
for r in range(0,self.class_count):
for c in range(0,self.class_count):
ax[0].text(c, r, number_format.format(
self.confusion_normalized_row[r,c]),
horizontalalignment='center',
verticalalignment='center', fontsize=10)
ax[1].text(c, r, number_format.format(
self.confusion_normalized_col[r,c]),
horizontalalignment='center',
verticalalignment='center', fontsize=10)
# Add the names
ax[0].set_yticklabels(self.label_names)
ax[0].xaxis.tick_top()
ax[0].set_xticklabels(self.label_names, rotation='vertical')
ax[1].xaxis.tick_top()
ax[1].set_xticklabels(self.label_names, rotation='vertical')
# Labels for Row vs Column normalized
ax[0].set_title('Row normalized', horizontalalignment='center', y=-0.1)
ax[1].set_title(
'Column normalized', horizontalalignment='center', y=-0.1)
# A final line showing our three favorite scores.
fig.suptitle('Global:{0:.2%}, Average:{1:.2%}, IoU:{2:.2%}'.format(
self.global_score, self.avg_score, self.avg_iou_score), fontsize=14,
fontweight='bold', x = 0.4, y = 0.03)
if only_return_fig:
plt.close()
return fig
def print_confusion_matrix(self, max_name_length=None):
'''Print the row normalized confusion matrix in a human readable form.
Parameters
----------
max_name_length : int or None (default:None)
The maximum number of characters printed for the class names.
If left as None the longest class name defines this value.
'''
if max_name_length is None:
max_name_length = np.max([len(n) for n in self.label_names])
label_names_cropped = [n[:max_name_length] for n in self.label_names]
#Compute the values in case this has not been done yet.
if not self.finished_computation:
self.finish()
line = ('{:>' + str(max_name_length) + 's}, ' +
', '.join(['{:>7.2%}'] * self.class_count))
for l, conf in zip(label_names_cropped, self.confusion_normalized_row):
print(line.format(l, *(conf.tolist())))
print('Global: {:>7.2%}'.format(self.global_score))
print('Average: {:>7.2%}'.format(self.avg_score))
print('IoU: {:>7.2%}'.format(self.avg_iou_score))
|
|
# Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.views.audit_scores
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <[email protected]>
"""
from security_monkey.views import AuthenticatedService
from security_monkey.views import AUDIT_SCORE_FIELDS
from security_monkey.views import ACCOUNT_PATTERN_AUDIT_SCORE_FIELDS
from security_monkey.datastore import ItemAuditScore
from security_monkey import db, rbac
from flask.ext.restful import marshal, reqparse
class AuditScoresGet(AuthenticatedService):
decorators = [
rbac.allow(["View"], ["GET"]),
rbac.allow(["Admin"], ["POST"])
]
def __init__(self):
super(AuditScoresGet, self).__init__()
def get(self):
"""
.. http:get:: /api/1/auditscores
Get a list of override scores for audit items.
**Example Request**:
.. sourcecode:: http
GET /api/1/auditscores HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
count: 1,
items: [
{
"id": 123,
"method": "check_xxx",
"technology": "policy",
"score": 1
},
],
total: 1,
page: 1,
auth: {
authenticated: true,
user: "[email protected]"
}
}
:statuscode 200: no error
:statuscode 401: Authentication failure. Please login.
"""
self.reqparse.add_argument(
'count', type=int, default=30, location='args')
self.reqparse.add_argument(
'page', type=int, default=1, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
result = ItemAuditScore.query.order_by(ItemAuditScore.technology).paginate(page, count, error_out=False)
items = []
for entry in result.items:
auditscore_marshaled = marshal(entry.__dict__, AUDIT_SCORE_FIELDS)
items.append(auditscore_marshaled)
marshaled_dict = {
'total': result.total,
'count': len(items),
'page': result.page,
'items': items,
'auth': self.auth_dict
}
return marshaled_dict, 200
def post(self):
"""
.. http:post:: /api/1/auditscores
Create a new override audit score.
**Example Request**:
.. sourcecode:: http
POST /api/1/auditscores HTTP/1.1
Host: example.com
Accept: application/json
{
"method": "check_xxx",
"technology": "policy",
"score": 1
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 201 Created
Vary: Accept
Content-Type: application/json
{
"id": 123,
"name": "Corp",
"notes": "Corporate Network",
"cidr": "1.2.3.4/22"
}
:statuscode 201: created
:statuscode 401: Authentication Error. Please Login.
"""
self.reqparse.add_argument('method', required=True, type=unicode, help='Must provide method name',
location='json')
self.reqparse.add_argument('technology', required=True, type=unicode, help='Technology required.',
location='json')
self.reqparse.add_argument('score', required=False, type=unicode, help='Override score required',
location='json')
self.reqparse.add_argument('disabled', required=True, type=unicode, help='Disabled flag',
location='json')
args = self.reqparse.parse_args()
method = args['method']
technology = args['technology']
score = args['score']
if score is None:
score = 0
disabled = args['disabled']
query = ItemAuditScore.query.filter(ItemAuditScore.technology == technology)
query = query.filter(ItemAuditScore.method == method)
auditscore = query.first()
if not auditscore:
auditscore = ItemAuditScore()
auditscore.method = method
auditscore.technology = technology
auditscore.score = int(score)
auditscore.disabled = bool(disabled)
db.session.add(auditscore)
db.session.commit()
db.session.refresh(auditscore)
auditscore_marshaled = marshal(auditscore.__dict__, AUDIT_SCORE_FIELDS)
auditscore_marshaled['auth'] = self.auth_dict
return auditscore_marshaled, 201
class AuditScoreGetPutDelete(AuthenticatedService):
decorators = [
rbac.allow(["View"], ["GET"]),
rbac.allow(["Admin"], ["PUT", "DELETE"])
]
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(AuditScoreGetPutDelete, self).__init__()
def get(self, id):
"""
.. http:get:: /api/1/auditscores/<int:id>
Get the overide audit score with given ID.
**Example Request**:
.. sourcecode:: http
GET /api/1/auditscores/123 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"id": 123,
"method": "check_xxx",
"technology": "policy",
"score": "1",
auth: {
authenticated: true,
user: "[email protected]"
}
}
:statuscode 200: no error
:statuscode 404: item with given ID not found
:statuscode 401: Authentication failure. Please login.
"""
result = ItemAuditScore.query.filter(ItemAuditScore.id == id).first()
if not result:
return {"status": "Override Audit Score with the given ID not found."}, 404
auditscore_marshaled = marshal(result.__dict__, AUDIT_SCORE_FIELDS)
auditscore_marshaled['auth'] = self.auth_dict
account_pattern_scores_marshaled = []
for account_pattern_score in result.account_pattern_scores:
account_pattern_score_marshaled = marshal(account_pattern_score, ACCOUNT_PATTERN_AUDIT_SCORE_FIELDS)
account_pattern_scores_marshaled.append(account_pattern_score_marshaled)
auditscore_marshaled['account_pattern_scores'] = account_pattern_scores_marshaled
return auditscore_marshaled, 200
def put(self, id):
"""
.. http:get:: /api/1/auditscores/<int:id>
Update override audit score with the given ID.
**Example Request**:
.. sourcecode:: http
PUT /api/1/auditscores/123 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"id": 123,
"method": "check_xxx",
"technology": "policy",
"Score": "1"
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"id": 123,
"score": "1",
auth: {
authenticated: true,
user: "[email protected]"
}
}
:statuscode 200: no error
:statuscode 404: item with given ID not found
:statuscode 401: Authentication failure. Please login.
"""
self.reqparse.add_argument('method', required=True, type=unicode, help='Must provide method name',
location='json')
self.reqparse.add_argument('technology', required=True, type=unicode, help='Technology required.',
location='json')
self.reqparse.add_argument('score', required=False, type=unicode, help='Must provide score.',
location='json')
self.reqparse.add_argument('disabled', required=True, type=unicode, help='Must disabled flag.',
location='json')
args = self.reqparse.parse_args()
score = args['score']
if score is None:
score = 0
result = ItemAuditScore.query.filter(ItemAuditScore.id == id).first()
if not result:
return {"status": "Override audit score with the given ID not found."}, 404
result.method = args['method']
result.technology = args['technology']
result.disabled = args['disabled']
result.score = int(score)
db.session.add(result)
db.session.commit()
db.session.refresh(result)
auditscore_marshaled = marshal(result.__dict__, AUDIT_SCORE_FIELDS)
auditscore_marshaled['auth'] = self.auth_dict
return auditscore_marshaled, 200
def delete(self, id):
"""
.. http:delete:: /api/1/auditscores/123
Delete an override audit score
**Example Request**:
.. sourcecode:: http
DELETE /api/1/auditscores/123 HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202 Accepted
Vary: Accept
Content-Type: application/json
{
'status': 'deleted'
}
:statuscode 202: accepted
:statuscode 401: Authentication Error. Please Login.
"""
result = ItemAuditScore.query.filter(ItemAuditScore.id == id).first()
db.session.delete(result)
db.session.commit()
return {'status': 'deleted'}, 202
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Nova instances.
"""
import datetime
import logging
from django import http
from django import shortcuts
from django import template
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django_openstack import api
from django_openstack import forms
from django_openstack import utils
import openstack.compute.servers
import openstackx.api.exceptions as api_exceptions
import StringIO
LOG = logging.getLogger('django_openstack.dash')
class TerminateInstance(forms.SelfHandlingForm):
instance = forms.CharField(required=True)
def handle(self, request, data):
instance_id = data['instance']
instance = api.server_get(request, instance_id)
try:
api.server_delete(request, instance)
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException while terminating instance "%s"') %
instance_id)
messages.error(request,
_('Unable to terminate %(inst)s: %(message)s') %
{"inst": instance_id, "message": e.message})
else:
msg = _('Instance %s has been terminated.') % instance_id
LOG.info(msg)
messages.success(request, msg)
return shortcuts.redirect(request.build_absolute_uri())
class RebootInstance(forms.SelfHandlingForm):
instance = forms.CharField(required=True)
def handle(self, request, data):
instance_id = data['instance']
try:
server = api.server_reboot(request, instance_id)
messages.success(request, _("Instance rebooting"))
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException while rebooting instance "%s"') %
instance_id)
messages.error(request,
_('Unable to reboot instance: %s') % e.message)
else:
msg = _('Instance %s has been rebooted.') % instance_id
LOG.info(msg)
messages.success(request, msg)
return shortcuts.redirect(request.build_absolute_uri())
class UpdateInstance(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
instance = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
name = forms.CharField(required=True)
description = forms.CharField(required=False)
def handle(self, request, data):
tenant_id = data['tenant_id']
description = data.get('description', '')
try:
api.server_update(request,
data['instance'],
data['name'],
description)
messages.success(request, _("Instance '%s' updated") %
data['name'])
except api_exceptions.ApiException, e:
messages.error(request,
_('Unable to update instance: %s') % e.message)
return shortcuts.redirect('dash_instances', tenant_id)
@login_required
def index(request, tenant_id):
for f in (TerminateInstance, RebootInstance):
form, handled = f.maybe_handle(request)
if handled:
return handled
instances = []
try:
instances = api.server_list(request)
except api_exceptions.ApiException as e:
LOG.exception(_('Exception in instance index'))
messages.error(request, _('Unable to get instance list: %s') % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return shortcuts.render_to_response(
'django_openstack/dash/instances/index.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
@login_required
def refresh(request, tenant_id):
instances = []
try:
instances = api.server_list(request)
except Exception as e:
messages.error(request,
_('Unable to get instance list: %s') % e.message)
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
terminate_form = TerminateInstance()
reboot_form = RebootInstance()
return shortcuts.render_to_response(
'django_openstack/dash/instances/_instance_list.html', {
'instances': instances,
'terminate_form': terminate_form,
'reboot_form': reboot_form,
}, context_instance=template.RequestContext(request))
@login_required
def usage(request, tenant_id=None):
today = utils.today()
date_start = datetime.date(today.year, today.month, 1)
datetime_start = datetime.datetime.combine(date_start, utils.time())
datetime_end = utils.utcnow()
show_terminated = request.GET.get('show_terminated', False)
usage = {}
if not tenant_id:
tenant_id = request.user.tenant_id
try:
usage = api.usage_get(request, tenant_id, datetime_start, datetime_end)
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException in instance usage'))
messages.error(request, _('Unable to get usage info: %s') % e.message)
ram_unit = "MB"
total_ram = 0
if hasattr(usage, 'total_active_ram_size'):
total_ram = usage.total_active_ram_size
if total_ram > 999:
ram_unit = "GB"
total_ram /= float(1024)
running_instances = []
terminated_instances = []
if hasattr(usage, 'instances'):
now = datetime.datetime.now()
for i in usage.instances:
# this is just a way to phrase uptime in a way that is compatible
# with the 'timesince' filter. Use of local time intentional
i['uptime_at'] = now - datetime.timedelta(seconds=i['uptime'])
if i['ended_at']:
terminated_instances.append(i)
else:
running_instances.append(i)
instances = running_instances
if show_terminated:
instances += terminated_instances
if request.GET.get('format', 'html') == 'csv':
template_name = 'django_openstack/dash/instances/usage.csv'
mimetype = "text/csv"
else:
template_name = 'django_openstack/dash/instances/usage.html'
mimetype = "text/html"
return shortcuts.render_to_response(template_name, {
'usage': usage,
'ram_unit': ram_unit,
'total_ram': total_ram,
# there are no date selection caps yet so keeping csv_link simple
'csv_link': '?format=csv',
'show_terminated': show_terminated,
'datetime_start': datetime_start,
'datetime_end': datetime_end,
'instances': instances
}, context_instance=template.RequestContext(request), mimetype=mimetype)
@login_required
def console(request, tenant_id, instance_id):
try:
# TODO(jakedahn): clean this up once the api supports tailing.
length = request.GET.get('length', '')
console = api.console_create(request, instance_id, 'text')
response = http.HttpResponse(mimetype='text/plain')
if length:
response.write('\n'.join(console.output.split('\n')[-int(length):]))
else:
response.write(console.output)
response.flush()
return response
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException while fetching instance console'))
messages.error(request,
_('Unable to get log for instance %s: %s') %
(instance_id, e.message))
return shortcuts.redirect('dash_instances', tenant_id)
@login_required
def vnc(request, tenant_id, instance_id):
try:
console = api.console_create(request, instance_id, 'vnc')
instance = api.server_get(request, instance_id)
return shortcuts.redirect(console.output +
("&title=%s(%s)" % (instance.name, instance_id)))
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException while fetching instance vnc connection'))
messages.error(request,
_('Unable to get vnc console for instance %(inst)s: %(message)s') %
{"inst": instance_id, "message": e.message})
return shortcuts.redirect('dash_instances', tenant_id)
@login_required
def update(request, tenant_id, instance_id):
try:
instance = api.server_get(request, instance_id)
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException while fetching instance info'))
messages.error(request,
_('Unable to get information for instance %(inst)s: %(message)s') %
{"inst": instance_id, "message": e.message})
return shortcuts.redirect('dash_instances', tenant_id)
form, handled = UpdateInstance.maybe_handle(request, initial={
'instance': instance_id,
'tenant_id': tenant_id,
'name': instance.name,
'description': instance.attrs['description']})
if handled:
return handled
return shortcuts.render_to_response(
'django_openstack/dash/instances/update.html', {
'instance': instance,
'form': form,
}, context_instance=template.RequestContext(request))
@login_required
def detail(request, tenant_id, instance_id):
try:
instance = api.server_get(request, instance_id)
try:
console = api.console_create(request, instance_id, 'vnc')
vnc_url = "%s&title=%s(%s)" % (console.output,
instance.name,
instance_id)
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException while fetching instance vnc \
connection'))
messages.error(request,
_('Unable to get vnc console for instance %s: %s') %
(instance_id, e.message))
return shortcuts.redirect('dash_instances', tenant_id)
except api_exceptions.ApiException, e:
LOG.exception(_('ApiException while fetching instance info'))
messages.error(request,
_('Unable to get information for instance %s: %s') %
(instance_id, e.message))
return shortcuts.redirect('dash_instances', tenant_id)
return shortcuts.render_to_response(
'django_openstack/dash/instances/detail.html', {
'instance': instance,
'vnc_url': vnc_url,
}, context_instance=template.RequestContext(request))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import collections
import textwrap
import cffi
from cryptography.hazmat.primitives import padding
from eventlet import semaphore
from oslo_config import cfg
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
from barbican.openstack.common import jsonutils as json
from barbican.plugin.crypto import crypto as plugin
Attribute = collections.namedtuple("Attribute", ["type", "value"])
CKAttributes = collections.namedtuple("CKAttributes", ["template", "cffivals"])
CKMechanism = collections.namedtuple("CKMechanism", ["mech", "cffivals"])
CKR_OK = 0
CKF_RW_SESSION = (1 << 1)
CKF_SERIAL_SESSION = (1 << 2)
CKU_SO = 0
CKU_USER = 1
CKO_SECRET_KEY = 4
CKK_AES = 0x1f
CKA_CLASS = 0
CKA_TOKEN = 1
CKA_PRIVATE = 2
CKA_LABEL = 3
CKA_APPLICATION = 0x10
CKA_VALUE = 0x11
CKA_OBJECT_ID = 0x12
CKA_CERTIFICATE_TYPE = 0x80
CKA_ISSUER = 0x81
CKA_SERIAL_NUMBER = 0x82
CKA_AC_ISSUER = 0x83
CKA_OWNER = 0x84
CKA_ATTR_TYPES = 0x85
CKA_TRUSTED = 0x86
CKA_CERTIFICATE_CATEGORY = 0x87
CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x88
CKA_URL = 0x89
CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x8a
CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x8b
CKA_CHECK_VALUE = 0x90
CKA_KEY_TYPE = 0x100
CKA_SUBJECT = 0x101
CKA_ID = 0x102
CKA_SENSITIVE = 0x103
CKA_ENCRYPT = 0x104
CKA_DECRYPT = 0x105
CKA_WRAP = 0x106
CKA_UNWRAP = 0x107
CKA_SIGN = 0x108
CKA_SIGN_RECOVER = 0x109
CKA_VERIFY = 0x10a
CKA_VERIFY_RECOVER = 0x10b
CKA_DERIVE = 0x10c
CKA_START_DATE = 0x110
CKA_END_DATE = 0x111
CKA_MODULUS = 0x120
CKA_MODULUS_BITS = 0x121
CKA_PUBLIC_EXPONENT = 0x122
CKA_PRIVATE_EXPONENT = 0x123
CKA_PRIME_1 = 0x124
CKA_PRIME_2 = 0x125
CKA_EXPONENT_1 = 0x126
CKA_EXPONENT_2 = 0x127
CKA_COEFFICIENT = 0x128
CKA_PRIME = 0x130
CKA_SUBPRIME = 0x131
CKA_BASE = 0x132
CKA_PRIME_BITS = 0x133
CKA_SUB_PRIME_BITS = 0x134
CKA_VALUE_BITS = 0x160
CKA_VALUE_LEN = 0x161
CKA_EXTRACTABLE = 0x162
CKA_LOCAL = 0x163
CKA_NEVER_EXTRACTABLE = 0x164
CKA_ALWAYS_SENSITIVE = 0x165
CKA_KEY_GEN_MECHANISM = 0x166
CKA_MODIFIABLE = 0x170
CKA_ECDSA_PARAMS = 0x180
CKA_EC_PARAMS = 0x180
CKA_EC_POINT = 0x181
CKA_SECONDARY_AUTH = 0x200
CKA_AUTH_PIN_FLAGS = 0x201
CKA_ALWAYS_AUTHENTICATE = 0x202
CKA_WRAP_WITH_TRUSTED = 0x210
CKA_HW_FEATURE_TYPE = 0x300
CKA_RESET_ON_INIT = 0x301
CKA_HAS_RESET = 0x302
CKA_PIXEL_X = 0x400
CKA_PIXEL_Y = 0x401
CKA_RESOLUTION = 0x402
CKA_CHAR_ROWS = 0x403
CKA_CHAR_COLUMNS = 0x404
CKA_COLOR = 0x405
CKA_BITS_PER_PIXEL = 0x406
CKA_CHAR_SETS = 0x480
CKA_ENCODING_METHODS = 0x481
CKA_MIME_TYPES = 0x482
CKA_MECHANISM_TYPE = 0x500
CKA_REQUIRED_CMS_ATTRIBUTES = 0x501
CKA_DEFAULT_CMS_ATTRIBUTES = 0x502
CKA_SUPPORTED_CMS_ATTRIBUTES = 0x503
CKM_SHA256_HMAC = 0x251
CKM_AES_KEY_GEN = 0x1080
CKM_AES_CBC_PAD = 0x1085
CKM_AES_KEY_WRAP = 0x1090
VENDOR_SAFENET_CKM_AES_GCM = 0x8000011c
CONF = cfg.CONF
LOG = utils.getLogger(__name__)
p11_crypto_plugin_group = cfg.OptGroup(name='p11_crypto_plugin',
title="PKCS11 Crypto Plugin Options")
p11_crypto_plugin_opts = [
cfg.StrOpt('library_path',
help=u._('Path to vendor PKCS11 library')),
cfg.StrOpt('login',
help=u._('Password to login to PKCS11 session')),
cfg.StrOpt('mkek_label',
help=u._('Master KEK label (used in the HSM)')),
cfg.IntOpt('mkek_length',
help=u._('Master KEK length in bytes.')),
cfg.StrOpt('hmac_label',
help=u._('HMAC label (used in the HSM)')),
]
CONF.register_group(p11_crypto_plugin_group)
CONF.register_opts(p11_crypto_plugin_opts, group=p11_crypto_plugin_group)
def _build_ffi():
ffi = cffi.FFI()
ffi.cdef(textwrap.dedent("""
typedef unsigned char CK_BYTE;
typedef unsigned long CK_ULONG;
typedef unsigned long CK_RV;
typedef unsigned long CK_SESSION_HANDLE;
typedef unsigned long CK_OBJECT_HANDLE;
typedef unsigned long CK_SLOT_ID;
typedef unsigned long CK_FLAGS;
typedef unsigned long CK_USER_TYPE;
typedef unsigned char * CK_UTF8CHAR_PTR;
typedef ... *CK_NOTIFY;
typedef unsigned long ck_attribute_type_t;
struct ck_attribute {
ck_attribute_type_t type;
void *value;
unsigned long value_len;
};
typedef struct ck_attribute CK_ATTRIBUTE;
typedef CK_ATTRIBUTE *CK_ATTRIBUTE_PTR;
typedef unsigned long ck_mechanism_type_t;
struct ck_mechanism {
ck_mechanism_type_t mechanism;
void *parameter;
unsigned long parameter_len;
};
typedef struct ck_mechanism CK_MECHANISM;
typedef CK_MECHANISM *CK_MECHANISM_PTR;
typedef CK_BYTE *CK_BYTE_PTR;
typedef CK_ULONG *CK_ULONG_PTR;
typedef struct CK_AES_GCM_PARAMS {
char * pIv;
unsigned long ulIvLen;
unsigned long ulIvBits;
char * pAAD;
unsigned long ulAADLen;
unsigned long ulTagBits;
} CK_AES_GCM_PARAMS;
"""))
# FUNCTIONS
ffi.cdef(textwrap.dedent("""
CK_RV C_Initialize(void *);
CK_RV C_OpenSession(CK_SLOT_ID, CK_FLAGS, void *, CK_NOTIFY,
CK_SESSION_HANDLE *);
CK_RV C_CloseSession(CK_SESSION_HANDLE);
CK_RV C_Login(CK_SESSION_HANDLE, CK_USER_TYPE, CK_UTF8CHAR_PTR,
CK_ULONG);
CK_RV C_FindObjectsInit(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG);
CK_RV C_FindObjects(CK_SESSION_HANDLE, CK_OBJECT_HANDLE *, CK_ULONG,
CK_ULONG *);
CK_RV C_FindObjectsFinal(CK_SESSION_HANDLE);
CK_RV C_GenerateKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_ATTRIBUTE *,
CK_ULONG, CK_OBJECT_HANDLE *);
CK_RV C_UnwrapKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE,
CK_BYTE *, CK_ULONG, CK_ATTRIBUTE *, CK_ULONG,
CK_OBJECT_HANDLE *);
CK_RV C_WrapKey(CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_OBJECT_HANDLE,
CK_OBJECT_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR);
CK_RV C_EncryptInit(CK_SESSION_HANDLE, CK_MECHANISM_PTR,
CK_OBJECT_HANDLE);
CK_RV C_Encrypt(CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG,
CK_BYTE_PTR, CK_ULONG_PTR);
CK_RV C_DecryptInit(CK_SESSION_HANDLE, CK_MECHANISM_PTR,
CK_OBJECT_HANDLE);
CK_RV C_Decrypt(CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR,
CK_ULONG_PTR);
CK_RV C_SignInit(CK_SESSION_HANDLE, CK_MECHANISM_PTR,
CK_OBJECT_HANDLE);
CK_RV C_Sign(CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR,
CK_ULONG_PTR);
CK_RV C_VerifyInit(CK_SESSION_HANDLE, CK_MECHANISM_PTR,
CK_OBJECT_HANDLE);
CK_RV C_Verify(CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR,
CK_ULONG);
CK_RV C_GenerateRandom(CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG);
"""))
return ffi
class P11CryptoPluginKeyException(exception.BarbicanException):
message = u._("More than one key found for label")
class P11CryptoPluginException(exception.BarbicanException):
message = u._("General exception")
class P11CryptoPlugin(plugin.CryptoPluginBase):
"""PKCS11 supporting implementation of the crypto plugin.
Generates a single master key and a single HMAC key that remain in the
HSM, then generates a key per project in the HSM, wraps the key, computes
an HMAC, and stores it in the DB. The project key is never unencrypted
outside the HSM.
"""
def __init__(self, conf=cfg.CONF, ffi=None):
self.enc_sem = semaphore.Semaphore()
self.dec_sem = semaphore.Semaphore()
self.verify_sem = semaphore.Semaphore()
self.block_size = 16 # in bytes
# TODO(reaperhulk): abstract this so alternate algorithms/vendors
# are possible.
self.algorithm = VENDOR_SAFENET_CKM_AES_GCM
if conf.p11_crypto_plugin.library_path is None:
raise ValueError(u._("library_path is required"))
self.ffi = _build_ffi() if not ffi else ffi
self.lib = self.ffi.dlopen(conf.p11_crypto_plugin.library_path)
self._check_error(self.lib.C_Initialize(self.ffi.NULL))
self.session = self._open_session(1)
self.rw_session = self._open_session(1)
self._login(conf.p11_crypto_plugin.login, self.session)
self._login(conf.p11_crypto_plugin.login, self.rw_session)
self._perform_rng_self_test()
self.current_mkek_label = conf.p11_crypto_plugin.mkek_label
self.current_hmac_label = conf.p11_crypto_plugin.hmac_label
LOG.debug("Current mkek label: %s", self.current_mkek_label)
LOG.debug("Current hmac label: %s", self.current_hmac_label)
self.key_handles = {}
# cache current MKEK handle in the dictionary
self._get_or_generate_mkek(
self.current_mkek_label,
conf.p11_crypto_plugin.mkek_length
)
self._get_or_generate_hmac_key(self.current_hmac_label)
def _perform_rng_self_test(self):
test_random = self._generate_random(100)
if self.ffi.buffer(test_random, 100)[:] == b"\x00" * 100:
raise P11CryptoPluginException("Apparent RNG self-test failure.")
def _open_session(self, slot):
session_ptr = self.ffi.new("CK_SESSION_HANDLE *")
rv = self.lib.C_OpenSession(
slot,
CKF_RW_SESSION | CKF_SERIAL_SESSION,
self.ffi.NULL,
self.ffi.NULL,
session_ptr
)
self._check_error(rv)
session = session_ptr[0]
return session
def _login(self, password, session):
rv = self.lib.C_Login(
session,
CKU_USER,
password,
len(password)
)
self._check_error(rv)
def _check_error(self, value):
if value != CKR_OK:
raise P11CryptoPluginException(
"HSM returned response code: {0}".format(value)
)
def _build_attributes(self, attrs):
attributes = self.ffi.new("CK_ATTRIBUTE[{0}]".format(len(attrs)))
val_list = []
for index, attr in enumerate(attrs):
attributes[index].type = attr.type
if isinstance(attr.value, bool):
if attr.value:
val_list.append(self.ffi.new("unsigned char *", 1))
else:
val_list.append(self.ffi.new("unsigned char *", 0))
attributes[index].value_len = 1 # sizeof(char) is 1
elif isinstance(attr.value, int):
# second because bools are also considered ints
val_list.append(self.ffi.new("CK_ULONG *", attr.value))
attributes[index].value_len = 8
elif isinstance(attr.value, str):
val_list.append(self.ffi.new("char []", attr.value))
attributes[index].value_len = len(attr.value)
else:
raise TypeError("Unknown attribute type provided.")
attributes[index].value = val_list[-1]
return CKAttributes(attributes, val_list)
def _get_or_generate_mkek(self, mkek_label, mkek_length):
mkek = self._get_key_handle(mkek_label)
if not mkek:
# Generate a key that is persistent and not extractable
ck_attributes = self._build_attributes([
Attribute(CKA_CLASS, CKO_SECRET_KEY),
Attribute(CKA_KEY_TYPE, CKK_AES),
Attribute(CKA_VALUE_LEN, mkek_length),
Attribute(CKA_LABEL, mkek_label),
Attribute(CKA_PRIVATE, True),
Attribute(CKA_SENSITIVE, True),
Attribute(CKA_ENCRYPT, True),
Attribute(CKA_DECRYPT, True),
Attribute(CKA_SIGN, True),
Attribute(CKA_VERIFY, True),
Attribute(CKA_TOKEN, True),
Attribute(CKA_WRAP, True),
Attribute(CKA_UNWRAP, True),
Attribute(CKA_EXTRACTABLE, False)
])
mkek = self._generate_kek(ck_attributes.template)
self.key_handles[mkek_label] = mkek
return mkek
def _get_or_generate_hmac_key(self, hmac_label):
hmac_key = self._get_key_handle(hmac_label)
if not hmac_key:
# Generate a key that is persistent and not extractable
ck_attributes = self._build_attributes([
Attribute(CKA_CLASS, CKO_SECRET_KEY),
Attribute(CKA_KEY_TYPE, CKK_AES),
Attribute(CKA_VALUE_LEN, 32),
Attribute(CKA_LABEL, hmac_label),
Attribute(CKA_PRIVATE, True),
Attribute(CKA_SENSITIVE, True),
Attribute(CKA_SIGN, True),
Attribute(CKA_VERIFY, True),
Attribute(CKA_TOKEN, True),
Attribute(CKA_EXTRACTABLE, False)
])
hmac_key = self._generate_kek(ck_attributes.template)
self.key_handles[hmac_label] = hmac_key
return hmac_key
def _get_key_handle(self, mkek_label):
if mkek_label in self.key_handles:
return self.key_handles[mkek_label]
ck_attributes = self._build_attributes([
Attribute(CKA_CLASS, CKO_SECRET_KEY),
Attribute(CKA_KEY_TYPE, CKK_AES),
Attribute(CKA_LABEL, mkek_label)
])
rv = self.lib.C_FindObjectsInit(
self.session, ck_attributes.template, len(ck_attributes.template)
)
self._check_error(rv)
returned_count = self.ffi.new("CK_ULONG *")
object_handle_ptr = self.ffi.new("CK_OBJECT_HANDLE *")
rv = self.lib.C_FindObjects(
self.session, object_handle_ptr, 2, returned_count
)
self._check_error(rv)
if returned_count[0] == 1:
key = object_handle_ptr[0]
rv = self.lib.C_FindObjectsFinal(self.session)
self._check_error(rv)
if returned_count[0] == 1:
return key
elif returned_count[0] == 0:
return None
else:
raise P11CryptoPluginKeyException()
def _generate_random(self, length):
buf = self.ffi.new("CK_BYTE[{0}]".format(length))
rv = self.lib.C_GenerateRandom(self.session, buf, length)
self._check_error(rv)
return buf
def _build_gcm_mech(self, iv):
mech = self.ffi.new("CK_MECHANISM *")
mech.mechanism = self.algorithm
gcm = self.ffi.new("CK_AES_GCM_PARAMS *")
gcm.pIv = iv
gcm.ulIvLen = 16
gcm.ulIvBits = 128
gcm.ulTagBits = 128
mech.parameter = gcm
mech.parameter_len = 48 # sizeof(CK_AES_GCM_PARAMS)
return CKMechanism(mech, gcm)
def _generate_kek(self, template):
"""Generates both master and project KEKs
:param template: A tuple of tuples in (CKA_TYPE, VALUE) form
"""
mech = self.ffi.new("CK_MECHANISM *")
mech.mechanism = CKM_AES_KEY_GEN
object_handle_ptr = self.ffi.new("CK_OBJECT_HANDLE *")
rv = self.lib.C_GenerateKey(
self.rw_session, mech, template, len(template), object_handle_ptr
)
self._check_error(rv)
return object_handle_ptr[0]
def _generate_wrapped_kek(self, kek_label, key_length):
# generate a non-persistent key that is extractable
ck_attributes = self._build_attributes([
Attribute(CKA_CLASS, CKO_SECRET_KEY),
Attribute(CKA_KEY_TYPE, CKK_AES),
Attribute(CKA_VALUE_LEN, key_length),
Attribute(CKA_LABEL, kek_label),
Attribute(CKA_PRIVATE, True),
Attribute(CKA_SENSITIVE, True),
Attribute(CKA_ENCRYPT, True),
Attribute(CKA_DECRYPT, True),
Attribute(CKA_TOKEN, False), # not persistent
Attribute(CKA_WRAP, True),
Attribute(CKA_UNWRAP, True),
Attribute(CKA_EXTRACTABLE, True)
])
kek = self._generate_kek(ck_attributes.template)
mech = self.ffi.new("CK_MECHANISM *")
mech.mechanism = CKM_AES_CBC_PAD
iv = self._generate_random(16)
mech.parameter = iv
mech.parameter_len = 16
mkek = self.key_handles[self.current_mkek_label]
# Since we're using CKM_AES_CBC_PAD the maximum length of the
# padded key will be the key length + one block. We allocate the
# worst case scenario as a CK_BYTE array.
padded_length = key_length + self.block_size
buf = self.ffi.new("CK_BYTE[{0}]".format(padded_length))
buf_len = self.ffi.new("CK_ULONG *", padded_length)
rv = self.lib.C_WrapKey(self.rw_session, mech, mkek, kek, buf, buf_len)
self._check_error(rv)
wrapped_key = self.ffi.buffer(buf, buf_len[0])[:]
hmac = self._compute_hmac(wrapped_key)
return {
'iv': base64.b64encode(self.ffi.buffer(iv)[:]),
'wrapped_key': base64.b64encode(wrapped_key),
'hmac': base64.b64encode(hmac),
'mkek_label': self.current_mkek_label,
'hmac_label': self.current_hmac_label
}
def _compute_hmac(self, wrapped_key):
mech = self.ffi.new("CK_MECHANISM *")
mech.mechanism = CKM_SHA256_HMAC
hmac_key = self.key_handles[self.current_hmac_label]
rv = self.lib.C_SignInit(self.rw_session, mech, hmac_key)
self._check_error(rv)
ck_bytes = self.ffi.new("CK_BYTE[]", wrapped_key)
buf = self.ffi.new("CK_BYTE[32]")
buf_len = self.ffi.new("CK_ULONG *", 32)
rv = self.lib.C_Sign(
self.rw_session, ck_bytes, len(wrapped_key), buf, buf_len
)
self._check_error(rv)
return self.ffi.buffer(buf, buf_len[0])[:]
def _verify_hmac(self, hmac_key, sig, wrapped_key):
mech = self.ffi.new("CK_MECHANISM *")
mech.mechanism = CKM_SHA256_HMAC
with self.verify_sem:
rv = self.lib.C_VerifyInit(self.rw_session, mech, hmac_key)
self._check_error(rv)
ck_bytes = self.ffi.new("CK_BYTE[]", wrapped_key)
ck_sig = self.ffi.new("CK_BYTE[]", sig)
rv = self.lib.C_Verify(
self.rw_session, ck_bytes, len(wrapped_key), ck_sig, len(sig)
)
self._check_error(rv)
def _unwrap_key(self, plugin_meta):
"""Unwraps byte string to key handle in HSM.
:param plugin_meta: kek_meta_dto plugin meta (json string)
:returns: Key handle from HSM. No unencrypted bytes.
"""
meta = json.loads(plugin_meta)
iv = base64.b64decode(meta['iv'])
hmac = base64.b64decode(meta['hmac'])
wrapped_key = base64.b64decode(meta['wrapped_key'])
mkek = self._get_key_handle(meta['mkek_label'])
hmac_key = self._get_key_handle(meta['hmac_label'])
LOG.debug("Unwrapping key with %s mkek label", meta['mkek_label'])
LOG.debug("Verifying key with %s hmac label", meta['hmac_label'])
self._verify_hmac(hmac_key, hmac, wrapped_key)
unwrapped = self.ffi.new("CK_OBJECT_HANDLE *")
mech = self.ffi.new("CK_MECHANISM *")
mech.mechanism = CKM_AES_CBC_PAD
iv = self.ffi.new("CK_BYTE[]", iv)
mech.parameter = iv
mech.parameter_len = 16
ck_attributes = self._build_attributes([
Attribute(CKA_CLASS, CKO_SECRET_KEY),
Attribute(CKA_KEY_TYPE, CKK_AES),
Attribute(CKA_ENCRYPT, True),
Attribute(CKA_DECRYPT, True),
Attribute(CKA_TOKEN, False),
Attribute(CKA_WRAP, True),
Attribute(CKA_UNWRAP, True),
Attribute(CKA_EXTRACTABLE, True)
])
rv = self.lib.C_UnwrapKey(
self.rw_session, mech, mkek, wrapped_key, len(wrapped_key),
ck_attributes.template, len(ck_attributes.template), unwrapped
)
self._check_error(rv)
return unwrapped[0]
def _pad(self, unencrypted):
padder = padding.PKCS7(self.block_size * 8).padder()
return padder.update(unencrypted) + padder.finalize()
def _unpad(self, unencrypted):
unpadder = padding.PKCS7(self.block_size * 8).unpadder()
return unpadder.update(unencrypted) + unpadder.finalize()
def encrypt(self, encrypt_dto, kek_meta_dto, project_id):
key = self._unwrap_key(kek_meta_dto.plugin_meta)
iv = self._generate_random(16)
ck_mechanism = self._build_gcm_mech(iv)
with self.enc_sem:
rv = self.lib.C_EncryptInit(self.session, ck_mechanism.mech, key)
self._check_error(rv)
# GCM does not require padding, but sometimes HSMs don't seem to
# know that and then you need to pad things for no reason.
pt_padded = self._pad(encrypt_dto.unencrypted)
pt_len = len(pt_padded)
# The GCM mechanism adds a 16 byte tag to the front of the
# cyphertext (which is the same length as the (annoyingly) padded
# plaintext) so adding 16 bytes guarantees sufficient space.
ct_len = self.ffi.new("CK_ULONG *", pt_len + 16)
ct = self.ffi.new("CK_BYTE[{0}]".format(pt_len + 16))
rv = self.lib.C_Encrypt(
self.session, pt_padded, pt_len, ct, ct_len
)
self._check_error(rv)
cyphertext = self.ffi.buffer(ct, ct_len[0])[:]
kek_meta_extended = json.dumps({
'iv': base64.b64encode(self.ffi.buffer(iv)[:])
})
return plugin.ResponseDTO(cyphertext, kek_meta_extended)
def decrypt(self, decrypt_dto, kek_meta_dto, kek_meta_extended,
project_id):
key = self._unwrap_key(kek_meta_dto.plugin_meta)
meta_extended = json.loads(kek_meta_extended)
iv = base64.b64decode(meta_extended['iv'])
iv = self.ffi.new("CK_BYTE[]", iv)
ck_mechanism = self._build_gcm_mech(iv)
with self.dec_sem:
rv = self.lib.C_DecryptInit(self.session, ck_mechanism.mech, key)
self._check_error(rv)
pt = self.ffi.new(
"CK_BYTE[{0}]".format(len(decrypt_dto.encrypted))
)
pt_len = self.ffi.new("CK_ULONG *", len(decrypt_dto.encrypted))
rv = self.lib.C_Decrypt(
self.session,
decrypt_dto.encrypted,
len(decrypt_dto.encrypted),
pt,
pt_len
)
self._check_error(rv)
return self._unpad(self.ffi.buffer(pt, pt_len[0])[:])
def bind_kek_metadata(self, kek_meta_dto):
# Enforce idempotency: If we've already generated a key leave now.
if not kek_meta_dto.plugin_meta:
kek_length = 32
kek_meta_dto.plugin_meta = json.dumps(
self._generate_wrapped_kek(kek_meta_dto.kek_label, kek_length)
)
# To be persisted by Barbican:
kek_meta_dto.algorithm = 'AES'
kek_meta_dto.bit_length = kek_length * 8
kek_meta_dto.mode = 'CBC'
return kek_meta_dto
def generate_symmetric(self, generate_dto, kek_meta_dto, project_id):
byte_length = generate_dto.bit_length / 8
buf = self._generate_random(byte_length)
rand = self.ffi.buffer(buf)[:]
assert len(rand) == byte_length
return self.encrypt(plugin.EncryptDTO(rand), kek_meta_dto, project_id)
def generate_asymmetric(self, generate_dto, kek_meta_dto, project_id):
raise NotImplementedError(u._("Feature not implemented for PKCS11"))
def supports(self, type_enum, algorithm=None, bit_length=None, mode=None):
if type_enum == plugin.PluginSupportTypes.ENCRYPT_DECRYPT:
return True
elif type_enum == plugin.PluginSupportTypes.SYMMETRIC_KEY_GENERATION:
return True
elif type_enum == plugin.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION:
return False
else:
return False
|
|
import re
from cast.Lexer import PatternMatchingLexer, StatelessPatternMatchingLexer
from cast.Token import cToken, TokenList
from cast.c_Parser import c_Parser
def parseIdentifier( string, lineno, colno, terminalId, lexer ):
addedToken = token( string, lineno, colno, terminalId, lexer)
if addedToken.source_string in lexer.typedefs:
tId = c_Parser.TERMINAL_TYPEDEF_IDENTIFIER
addedToken.id = tId
addedToken.terminal_str = c_Parser.terminals[tId]
else:
lexer.lastIdentifier = addedToken
def parseLabelIdentifier( string, lineno, colno, terminalId, lexer ):
hintId = c_Parser.TERMINAL_LABEL_HINT
ctx = lexer.getContext()
lexer.addToken(cToken(hintId, lexer.resource, c_Parser.terminals[hintId], '', lineno, colno, context=ctx))
lexer.addToken(cToken(terminalId, lexer.resource, c_Parser.terminals[terminalId], string, lineno, colno, context=ctx))
def parseTypedef( string, lineno, colno, terminalId, lexer ):
lexer.typedefBlocks = lexer.typedefBlocks.union({(lexer.braceLevel, lexer.parenLevel)})
token( string, lineno, colno, terminalId, lexer )
def parseLbrace( string, lineno, colno, terminalId, lexer ):
lexer.braceLevel += 1
token( string, lineno, colno, terminalId, lexer )
def parseRbrace( string, lineno, colno, terminalId, lexer ):
lexer.braceLevel -= 1
token( string, lineno, colno, terminalId, lexer )
if (c_Parser.TERMINAL_RBRACE, lexer.braceLevel) in lexer.endifTokens:
lexer.endifTokens = lexer.endifTokens.difference({(c_Parser.TERMINAL_RBRACE, lexer.braceLevel)})
token('', lineno, colno, c_Parser.TERMINAL_ENDIF, lexer)
def parseLparenCast( string, lineno, colno, terminalId, lexer ):
lexer.parenLevel += 1
if lexer.braceLevel == 0:
token( string, lineno, colno, c_Parser.TERMINAL_LPAREN, lexer )
else:
token( string, lineno, colno, c_Parser.TERMINAL_LPAREN_CAST, lexer )
def parseLparen( string, lineno, colno, terminalId, lexer ):
lexer.parenLevel += 1
token( string, lineno, colno, terminalId, lexer )
def parseRparen( string, lineno, colno, terminalId, lexer ):
lexer.parenLevel -= 1
token( string, lineno, colno, terminalId, lexer )
if lexer.isIf():
lexer.addEndif()
def parseIf( string, lineno, colno, terminalId, lexer ):
token( string, lineno, colno, terminalId, lexer )
lexer.markIf()
def parseElse( string, lineno, colno, terminalId, lexer ):
token( string, lineno, colno, terminalId, lexer )
lexer.addEndif()
def parseSemi( string, lineno, colno, terminalId, lexer ):
token( string, lineno, colno, terminalId, lexer )
if (c_Parser.TERMINAL_SEMI, lexer.braceLevel,) in lexer.endifTokens:
lexer.endifTokens = lexer.endifTokens.difference({(c_Parser.TERMINAL_SEMI, lexer.braceLevel)})
token('', lineno, colno, c_Parser.TERMINAL_ENDIF, lexer)
if (lexer.braceLevel, lexer.parenLevel) in lexer.typedefBlocks:
lexer.typedefBlocks = lexer.typedefBlocks.difference({(lexer.braceLevel, lexer.parenLevel)})
tId = c_Parser.TERMINAL_TYPEDEF_IDENTIFIER
if lexer.lastIdentifier:
lexer.typedefs[lexer.lastIdentifier.source_string] = cToken(tId, lexer.resource, c_Parser.terminals[tId], lexer.lastIdentifier.source_string, lineno, colno, lexer.getContext())
else:
raise Exception('no last identifier')
def parseComma( string, lineno, colno, terminalId, lexer ):
token( string, lineno, colno, terminalId, lexer )
if (lexer.braceLevel, lexer.parenLevel) in lexer.typedefBlocks:
tId = c_Parser.TERMINAL_TYPEDEF_IDENTIFIER
if lexer.lastIdentifier:
lexer.typedefs[lexer.lastIdentifier.source_string] = cToken(tId, lexer.resource, c_Parser.terminals[tId], lexer.lastIdentifier.source_string, lineno, colno, lexer.getContext())
else:
raise Exception('no last identifier')
decls = None
def declaration_specifiers():
global decls
if not decls:
c = lambda x: c_Parser.terminals[x]
# decls = set(map(c, ['typedef','extern','static','auto','register','void','char','short','int','long','float','double','signed','unsigned','bool','complex','imaginary','struct','union','enum','typedef_identifier','const','restrict','volatile','inline']))
decls = {
c('typedef'), c('extern'), c('static'), c('auto'), \
c('register'), c('void'), c('char'), c('short'), c('int'), \
c('long'), c('float'), c('double'), c('signed'), c('unsigned'), \
c('bool'), c('complex'), c('imaginary'), c('struct'), c('union'), c('enum'), \
c('typedef_identifier'), c('const'), c('restrict'), c('volatile'), \
c('inline')
}
return decls
def token(string, lineno, colno, terminalId, lexer):
matchedToken = cToken(terminalId, lexer.resource, c_Parser.terminals[terminalId], string, lineno, colno, lexer.getContext())
lexer.addToken(matchedToken)
return matchedToken
identifierRegex = r'([a-zA-Z_]|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)([a-zA-Z_0-9]|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)*'
class Factory():
def createLexer(self, sourceCode, pp_expander=None, context=None):
return cLexer(sourceCode, pp_expander, context)
def createStatelessLexer(self, sourceCode):
return StatelessCLexer(sourceCode)
class cLexer(PatternMatchingLexer):
type_specifier = ['void', 'char', 'short', 'int', 'long', 'float', 'double', 'signed', 'unsigned', '_Bool', '_Complex']
cRegex = [
# Comments
( re.compile(r'/\*.*?\*/', re.S), None, None ),
( re.compile(r'//.*', 0), None, None ),
# Keywords
( re.compile(r'auto(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_AUTO, token ),
( re.compile(r'_Bool(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_BOOL, token ),
( re.compile(r'break(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_BREAK, token ),
( re.compile(r'case(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_CASE, token ),
( re.compile(r'char(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_CHAR, token ),
( re.compile(r'_Complex(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_COMPLEX, token ),
( re.compile(r'const(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_CONST, token ),
( re.compile(r'continue(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_CONTINUE, token ),
( re.compile(r'default(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_DEFAULT, token ),
( re.compile(r'do(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_DO, token ),
( re.compile(r'double(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_DOUBLE, token ),
( re.compile(r'else\s+if(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_ELSE_IF, parseIf ),
( re.compile(r'else(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_ELSE, parseElse ),
( re.compile(r'enum(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_ENUM, token ),
( re.compile(r'extern(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_EXTERN, token ),
( re.compile(r'float(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_FLOAT, token ),
( re.compile(r'for(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_FOR, token ),
( re.compile(r'goto(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_GOTO, token ),
( re.compile(r'if(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_IF, parseIf ),
( re.compile(r'_Imaginary(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_IMAGINARY, token ),
( re.compile(r'inline(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_INLINE, token ),
( re.compile(r'int(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_INT, token ),
( re.compile(r'long(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_LONG, token ),
( re.compile(r'register(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_REGISTER, token ),
( re.compile(r'restrict(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_RESTRICT, token ),
( re.compile(r'return(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_RETURN, token ),
( re.compile(r'short(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_SHORT, token ),
( re.compile(r'signed(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_SIGNED, token ),
( re.compile(r'sizeof(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_SIZEOF, token ),
( re.compile(r'static(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_STATIC, token ),
( re.compile(r'struct(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_STRUCT, token ),
( re.compile(r'switch(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_SWITCH, token ),
( re.compile(r'typedef(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_TYPEDEF, parseTypedef ),
( re.compile(r'union(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_UNION, token ),
( re.compile(r'unsigned(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_UNSIGNED, token ),
( re.compile(r'void(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_VOID, token ),
( re.compile(r'volatile(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_VOLATILE, token ),
( re.compile(r'while(?=[^a-zA-Z_]|$)'), c_Parser.TERMINAL_WHILE, token ),
# Identifiers
( re.compile('%s(?=\s*:)' % (identifierRegex)), c_Parser.TERMINAL_IDENTIFIER, parseLabelIdentifier ),
( re.compile(identifierRegex), c_Parser.TERMINAL_IDENTIFIER, parseIdentifier ),
# Unicode Characters
( re.compile(r'\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?'), c_Parser.TERMINAL_UNIVERSAL_CHARACTER_NAME, token ),
# Digraphs
( re.compile(r'<%'), c_Parser.TERMINAL_LBRACE, token ),
( re.compile(r'%>'), c_Parser.TERMINAL_RBRACE, token ),
( re.compile(r'<:'), c_Parser.TERMINAL_LSQUARE, token ),
( re.compile(r':>'), c_Parser.TERMINAL_RSQUARE, token ),
( re.compile(r'%:%:'), c_Parser.TERMINAL_POUNDPOUND, token ),
( re.compile(r'%:'), c_Parser.TERMINAL_POUND, token ),
# Punctuators
( re.compile(r'\['), c_Parser.TERMINAL_LSQUARE, token ),
( re.compile(r'\]'), c_Parser.TERMINAL_RSQUARE, token ),
( re.compile(r'\((?=\s*' + 'void[\s]*\))'), c_Parser.TERMINAL_LPAREN, parseLparen ),
( re.compile(r'\('), c_Parser.TERMINAL_LPAREN, parseLparen ),
( re.compile(r'\)'), c_Parser.TERMINAL_RPAREN, parseRparen ),
( re.compile(r'\{'), c_Parser.TERMINAL_LBRACE, parseLbrace ),
( re.compile(r'\}'), c_Parser.TERMINAL_RBRACE, parseRbrace ),
( re.compile(r'\.\.\.'), c_Parser.TERMINAL_ELIPSIS, token ),
( re.compile(r'\.'), c_Parser.TERMINAL_DOT, token ),
( re.compile(r'->'), c_Parser.TERMINAL_ARROW, token ),
( re.compile(r'\+\+'), c_Parser.TERMINAL_INCR, token ),
( re.compile(r'--'), c_Parser.TERMINAL_DECR, token ),
( re.compile(r'&(?!&)'), c_Parser.TERMINAL_BITAND, token ),
( re.compile(r'\*(?!=)'), c_Parser.TERMINAL_ASTERISK, token ),
( re.compile(r'\+(?!=)'), c_Parser.TERMINAL_ADD, token ),
( re.compile(r'-(?!=)'), c_Parser.TERMINAL_SUB, token ),
( re.compile(r'~'), c_Parser.TERMINAL_TILDE, token ),
( re.compile(r'!(?!=)'), c_Parser.TERMINAL_EXCLAMATION_POINT, token ),
( re.compile(r'/(?!=)'), c_Parser.TERMINAL_DIV, token ),
( re.compile(r'%(?!=)'), c_Parser.TERMINAL_MOD, token ),
( re.compile(r'<<(?!=)'), c_Parser.TERMINAL_LSHIFT, token ),
( re.compile(r'>>(?!=)'), c_Parser.TERMINAL_RSHIFT, token ),
( re.compile(r'<(?!=)'), c_Parser.TERMINAL_LT, token ),
( re.compile(r'>(?!=)'), c_Parser.TERMINAL_GT, token ),
( re.compile(r'<='), c_Parser.TERMINAL_LTEQ, token ),
( re.compile(r'>='), c_Parser.TERMINAL_GTEQ, token ),
( re.compile(r'=='), c_Parser.TERMINAL_EQ, token ),
( re.compile(r'!='), c_Parser.TERMINAL_NEQ, token ),
( re.compile(r'\^(?!=)'), c_Parser.TERMINAL_BITXOR, token ),
( re.compile(r'\|(?!\|)'), c_Parser.TERMINAL_BITOR, token ),
( re.compile(r'&&'), c_Parser.TERMINAL_AND, token ),
( re.compile(r'\|\|'), c_Parser.TERMINAL_OR, token ),
( re.compile(r'\?'), c_Parser.TERMINAL_QUESTIONMARK, token ),
( re.compile(r':'), c_Parser.TERMINAL_COLON, token ),
( re.compile(r';'), c_Parser.TERMINAL_SEMI, parseSemi ),
( re.compile(r'=(?!=)'), c_Parser.TERMINAL_ASSIGN, token ),
( re.compile(r'\*='), c_Parser.TERMINAL_MULEQ, token ),
( re.compile(r'/='), c_Parser.TERMINAL_DIVEQ, token ),
( re.compile(r'%='), c_Parser.TERMINAL_MODEQ, token ),
( re.compile(r'\+='), c_Parser.TERMINAL_ADDEQ, token ),
( re.compile(r'-='), c_Parser.TERMINAL_SUBEQ, token ),
( re.compile(r'<<='), c_Parser.TERMINAL_LSHIFTEQ, token ),
( re.compile(r'>>='), c_Parser.TERMINAL_RSHIFTEQ, token ),
( re.compile(r'&='), c_Parser.TERMINAL_BITANDEQ, token ),
( re.compile(r'\^='), c_Parser.TERMINAL_BITXOREQ, token ),
( re.compile(r'\|='), c_Parser.TERMINAL_BITOREQ, token ),
( re.compile(r',(?=\s*})'), c_Parser.TERMINAL_TRAILING_COMMA, token ),
( re.compile(r',(?=\s*\.\.\.)'), c_Parser.TERMINAL_COMMA_VA_ARGS, token ),
( re.compile(r','), c_Parser.TERMINAL_COMMA, parseComma ),
( re.compile(r'##'), c_Parser.TERMINAL_POUNDPOUND, token ),
( re.compile(r'#(?!#)'), c_Parser.TERMINAL_POUND, token ),
# Constants, Literals
( re.compile(r'([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)([uU](ll|LL)|[uU][lL]?|(ll|LL)[uU]?|[lL][uU])?'), c_Parser.TERMINAL_INTEGER_CONSTANT, token ),
( re.compile(r'(([0-9]+)?\.([0-9]+)|[0-9]+\.|[0-9]+)([eE][-+]?[0-9]+)?([flFL]|[dD][fFdDlL])?'), c_Parser.TERMINAL_DECIMAL_FLOATING_CONSTANT, token ),
( re.compile(r'[L]?"([^\\\"\n]|\\[\\"\'nrbtfav\?]|\\[0-7]{1,3}|\\x[0-9a-fA-F]+|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)*"'), c_Parser.TERMINAL_STRING_LITERAL, token ),
( re.compile(r'0[xX](([0-9a-fA-F]+)?\.([0-9a-fA-F]+)|[0-9a-fA-F]+\.)[pP][-+]?[0-9]+[flFL]?'), c_Parser.TERMINAL_HEXADECIMAL_FLOATING_CONSTANT, token ),
( re.compile(r"[L]?'([^\\'\n]|\\[\\\"\'nrbtfav\?]|\\[0-7]{1,3}|\\x[0-9a-fA-F]+|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)+'"), c_Parser.TERMINAL_CHARACTER_CONSTANT, token ),
# Whitespace
( re.compile(r'\s+', 0), None, None )
]
def __init__(self, sourceCode, pp_expander=None, context=None):
if context:
self.__dict__ = context
else:
self.braceLevel = 0
self.parenLevel = 0
self.ifBlocks = set()
self.typedefBlocks = set()
self.typedefs = dict()
self.lastIdentifier = None
self.endifTokens = set()
self.hint_braceLevel = 0
self.hint_parenLevel = 0
self.hint_structDecl = set()
self.hint_lock = False
super().__init__(sourceCode, self.cRegex)
mtokens = list(self)
if pp_expander:
mtokens = pp_expander(mtokens)
mtokens = self.addParserHints(TokenList(mtokens))
self.addTokens(mtokens)
def update_hint_context(self, token):
if token.id == c_Parser.TERMINAL_LPAREN:
self.hint_parenLevel += 1
elif token.id == c_Parser.TERMINAL_RPAREN:
self.hint_parenLevel -= 1
elif token.id == c_Parser.TERMINAL_LBRACE:
self.hint_braceLevel += 1
elif token.id == c_Parser.TERMINAL_RBRACE:
self.hint_braceLevel -= 1
def parse_parameter_list(self, tokenIterator):
param = []
params = []
hint = c_Parser.TERMINAL_ABSTRACT_PARAMETER_HINT
startParenLevel = self.hint_parenLevel
start = True
while True:
try:
token = next(tokenIterator)
self.update_hint_context(token)
except StopIteration:
break
if start and token.id == c_Parser.TERMINAL_RPAREN:
return [token]
start = False
if token.id == c_Parser.TERMINAL_LPAREN and \
( tokenIterator.check('+1', declaration_specifiers()) or \
tokenIterator.check('+1', [c_Parser.TERMINAL_RPAREN]) ):
param.append(token)
param.extend(self.parse_parameter_list(tokenIterator))
continue
elif (token.id == c_Parser.TERMINAL_COMMA) or \
(token.id == c_Parser.TERMINAL_RPAREN and self.hint_parenLevel == startParenLevel - 1):
params.append(cToken(hint, self.resource, c_Parser.terminals[hint], '', param[0].lineno, param[0].colno, self.getContext()))
params.extend(param)
params.append(token)
param = []
hint = c_Parser.TERMINAL_ABSTRACT_PARAMETER_HINT
if token.id == c_Parser.TERMINAL_RPAREN:
break
continue
else:
param.append(token)
if token.id == c_Parser.TERMINAL_IDENTIFIER:
hint = c_Parser.TERMINAL_NAMED_PARAMETER_HINT
if len(param):
params.append(cToken(hint, self.resource, c_Parser.terminals[hint], '', param[0].lineno, param[0].colno, self.getContext()))
params.extend(param)
params.append(token)
return params
def parse_until(self, tokenIterator, terminal_id):
tokens = []
while True:
try:
n = next(tokenIterator)
self.update_hint_context(n)
tokens.append(n)
if n.id == terminal_id:
break
except StopIteration:
break
return tokens
def parse_parameters(self, tokenIterator):
tokens = []
hintId = False
if not tokenIterator.check('+1', [c_Parser.TERMINAL_IDENTIFIER]):
tokens.extend( self.parse_parameter_list(tokenIterator) )
else:
tokens.extend( self.parse_until(tokenIterator, c_Parser.TERMINAL_RPAREN) )
return tokens
def parseExternalDeclaration(self, tokenIterator):
# returns as soon as a hint is determined or token stream ends
ytokens = []
xtokens = []
self.lock = True
self.keepGoing = True
collectDeclarationSpecifiers = True
while self.keepGoing:
self.keepGoing = parseParams = funcFound = rparenFound = identFound = parametersParsed = False
hintId = False
ztokens = []
declarationSpecifiers = []
while True:
try:
token2 = next(tokenIterator)
except StopIteration:
break
self.update_hint_context(token2)
if collectDeclarationSpecifiers:
if self.hint_braceLevel in self.hint_structDecl:
declarationSpecifiers.append(token2)
if parseParams and token2.id == c_Parser.TERMINAL_LPAREN and \
( tokenIterator.check('+1', declaration_specifiers()) or \
tokenIterator.check('+1', [c_Parser.TERMINAL_RPAREN, c_Parser.TERMINAL_IDENTIFIER]) ):
paramTokens = self.parse_parameters(tokenIterator)
declarationSpecifiers.extend(paramTokens)
parseParams = False
if token2.id == c_Parser.TERMINAL_RBRACE:
self.hint_structDecl = self.hint_structDecl.difference({self.hint_braceLevel})
if token2.id == c_Parser.TERMINAL_IDENTIFIER and self.hint_parenLevel > 0:
parseParams = True
continue
if len(self.hint_structDecl) == 0:
collectDeclarationSpecifiers = False
continue
elif token2.id in {c_Parser.TERMINAL_STRUCT, c_Parser.TERMINAL_UNION}:
declarationSpecifiers.append(token2)
while True:
try:
n = next(tokenIterator)
self.update_hint_context(n)
declarationSpecifiers.append(n)
if n.id == c_Parser.TERMINAL_LBRACE:
break
except StopIteration:
break
self.hint_structDecl = self.hint_structDecl.union({self.hint_braceLevel})
continue
else:
declarationSpecifiers.append(token2)
if not tokenIterator.check('+1', declaration_specifiers()):
collectDeclarationSpecifiers = False
continue
ztokens.append(token2)
if self.hint_braceLevel == 0 and \
token2.id == c_Parser.TERMINAL_IDENTIFIER and \
(self.hint_parenLevel > 0 or tokenIterator.check('+1', [c_Parser.TERMINAL_LPAREN])):
parseParams = True
if tokenIterator.check('+1', [c_Parser.TERMINAL_LPAREN]):
funcFound = True
continue
if parseParams and token2.id == c_Parser.TERMINAL_LPAREN and \
( tokenIterator.check('+1', declaration_specifiers()) or \
tokenIterator.check('+1', [c_Parser.TERMINAL_RPAREN, c_Parser.TERMINAL_IDENTIFIER]) ):
paramTokens = self.parse_parameters(tokenIterator)
ztokens.extend(paramTokens)
if tokenIterator.check('+1', [c_Parser.TERMINAL_LBRACE]):
hintId = c_Parser.TERMINAL_FUNCTION_DEFINITION_HINT
elif tokenIterator.check('+1', declaration_specifiers()):
hintId = c_Parser.TERMINAL_FUNCTION_DEFINITION_HINT
ztokens.extend( self.parse_until(tokenIterator, c_Parser.TERMINAL_LBRACE) )
if funcFound and hintId:
break
continue
if token2.id in [c_Parser.TERMINAL_SEMI, c_Parser.TERMINAL_COMMA]:
if self.hint_braceLevel == 0 and self.hint_parenLevel == 0:
if funcFound:
hintId = c_Parser.TERMINAL_FUNCTION_PROTOTYPE_HINT
else:
hintId = c_Parser.TERMINAL_DECLARATOR_HINT
if token2.id == c_Parser.TERMINAL_COMMA:
self.keepGoing = True
break
ytokens.extend(declarationSpecifiers)
if hintId != False:
first = declarationSpecifiers[0] if len(declarationSpecifiers) else ztokens[0]
hint = cToken(hintId, self.resource, c_Parser.terminals[hintId], '', first.lineno, first.colno, self.getContext())
ytokens.append(hint)
ytokens.extend(ztokens)
# endwhile
first = ytokens[0] if len(ytokens) else ztokens[0]
edHintId = c_Parser.TERMINAL_EXTERNAL_DECLARATION_HINT
edHint = cToken(edHintId, self.resource, c_Parser.terminals[edHintId], '', first.lineno, first.colno, self.getContext());
xtokens.append(edHint)
xtokens.extend(ytokens)
self.hint_lock = False
return xtokens
def addParserHints(self, tokenIterator):
xtokens = []
tokenIterator = iter(tokenIterator)
while True: #for token in tokenIterator:
try:
token = next(tokenIterator)
except StopIteration:
break
if self.hint_lock:
self.update_hint_context(token)
xtokens.append(token)
elif self.hint_braceLevel == 0 and token.id in declaration_specifiers():
xtokens.extend(self.parseExternalDeclaration(tokenIterator.go('-1')))
else:
self.update_hint_context(token)
xtokens.append(token)
return xtokens
def markIf(self):
self.ifBlocks = self.ifBlocks.union({(self.braceLevel, self.parenLevel)})
def unmarkIf(self):
self.ifBlocks = self.ifBlocks.difference({(self.braceLevel, self.parenLevel)})
def isIf(self):
return (self.braceLevel, self.parenLevel) in self.ifBlocks
def addEndif(self):
self.unmarkIf()
nextTokens = set(map(lambda x: x[0], self.peek(2)))
t = set()
if c_Parser.TERMINAL_LBRACE in nextTokens:
t = {(c_Parser.TERMINAL_RBRACE, self.braceLevel,)}
elif not len(nextTokens.intersection({c_Parser.TERMINAL_FOR, c_Parser.TERMINAL_IF, c_Parser.TERMINAL_WHILE, c_Parser.TERMINAL_DO})):
t = {(c_Parser.TERMINAL_SEMI, self.braceLevel,)}
else:
self.markIf()
self.endifTokens = self.endifTokens.union(t)
def __next__(self):
token = super().__next__()
return cToken(token.id, self.resource, token.terminal_str, token.source_string, token.lineno, token.colno, context=self.getContext())
def getContext(self):
return self.__dict__
class StatelessCLexer(StatelessPatternMatchingLexer):
def __init__(self):
super().__init__(cLexer.cRegex)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def _delete_initial(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-06-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_06_01.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-06-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2017-06-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkInterface')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network
interface operation.
:type parameters:
~azure.mgmt.network.v2017_06_01.models.NetworkInterface
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
NetworkInterface or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.NetworkInterface]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_06_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-06-01"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_06_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-06-01"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def _get_effective_route_table_initial(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-06-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_effective_route_table(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
EffectiveRouteListResult or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.EffectiveRouteListResult]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('EffectiveRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _list_effective_network_security_groups_initial(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-06-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_effective_network_security_groups(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
EffectiveNetworkSecurityGroupListResult or ClientRawResponse if
raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.EffectiveNetworkSecurityGroupListResult]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_virtual_machine_scale_set_vm_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, custom_headers=None, raw=False, **operation_config):
"""Gets information about all network interfaces in a virtual machine in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_06_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-03-30"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_06_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-03-30"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_virtual_machine_scale_set_network_interface(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_06_01.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-03-30"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
from oslo_config import cfg
import stubout
import webob
from senlin.common import exception
from senlin.common import wsgi
from senlin.tests.common import base
class RequestTest(base.SenlinTestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
super(RequestTest, self).setUp()
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123')
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml'))
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "text/html"
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml'))
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type(('application/json'))
self.assertEqual("application/json", result)
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_xml_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept_json_xml_quality(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
accepted = 'unknown-lang'
request.headers = {'Accept-Language': accepted}
def fake_best_match(self, offers, default_match=None):
# Best match on an unknown locale returns None
return None
self.stubs.SmartSet(request.accept_language,
'best_match', fake_best_match)
self.assertIsNone(request.best_match_language())
# If Accept-Language is missing or empty, match should be None
request.headers = {'Accept-Language': ''}
self.assertIsNone(request.best_match_language())
request.headers.pop('Accept-Language')
self.assertIsNone(request.best_match_language())
class ResourceTest(base.SenlinTestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
super(ResourceTest, self).setUp()
def test_get_action_args(self):
env = {
'wsgiorg.routing_args': [
None,
{
'controller': None,
'format': None,
'action': 'update',
'id': 12,
},
],
}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_invalid_index(self):
env = {'wsgiorg.routing_args': []}
expected = {}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_del_controller_error(self):
actions = {'format': None,
'action': 'update',
'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_get_action_args_del_format_error(self):
actions = {'action': 'update', 'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(expected, actual)
def test_dispatch(self):
class Controller(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(expected, actual)
def test_dispatch_default(self):
class Controller(object):
def default(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(expected, actual)
def test_dispatch_no_default(self):
class Controller(object):
def show(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
self.assertRaises(AttributeError, resource.dispatch, Controller(),
'index', 'on', pants='off')
def test_resource_call_error_handle(self):
class Controller(object):
def delete(self, req, identity):
return (req, identity)
actions = {'action': 'delete', 'id': 12, 'body': 'data'}
env = {'wsgiorg.routing_args': [None, actions]}
request = wsgi.Request.blank('/tests/123', environ=env)
request.body = '{"foo" : "value"}'
resource = wsgi.Resource(Controller(),
wsgi.JSONRequestDeserializer(),
None)
# The Resource does not throw webob.HTTPExceptions, since they
# would be considered responses by wsgi and the request flow would end,
# instead they are wrapped so they can reach the fault application
# where they are converted to a JSON response
e = self.assertRaises(exception.HTTPExceptionDisguise,
resource, request)
self.assertIsInstance(e.exc, webob.exc.HTTPBadRequest)
def test_resource_call_error_handle_localized(self):
class Controller(object):
def delete(self, req, identity):
return (req, identity)
actions = {'action': 'delete', 'id': 12, 'body': 'data'}
env = {'wsgiorg.routing_args': [None, actions]}
request = wsgi.Request.blank('/tests/123', environ=env)
request.body = '{"foo" : "value"}'
message_es = "No Encontrado"
translated_ex = webob.exc.HTTPBadRequest(message_es)
resource = wsgi.Resource(Controller(),
wsgi.JSONRequestDeserializer(),
None)
def fake_translate_exception(ex, locale):
return translated_ex
self.stubs.SmartSet(wsgi, 'translate_exception',
fake_translate_exception)
e = self.assertRaises(exception.HTTPExceptionDisguise,
resource, request)
self.assertEqual(message_es, six.text_type(e.exc))
class ResourceExceptionHandlingTest(base.SenlinTestCase):
scenarios = [
('client_exceptions', dict(
exception=exception.ClusterNotSpecified,
exception_catch=exception.ClusterNotSpecified)),
('webob_bad_request', dict(
exception=webob.exc.HTTPBadRequest,
exception_catch=exception.HTTPExceptionDisguise)),
('webob_not_found', dict(
exception=webob.exc.HTTPNotFound,
exception_catch=exception.HTTPExceptionDisguise)),
]
def test_resource_client_exceptions_dont_log_error(self):
class Controller(object):
def __init__(self, excpetion_to_raise):
self.excpetion_to_raise = excpetion_to_raise
def raise_exception(self, req, body):
raise self.excpetion_to_raise()
actions = {'action': 'raise_exception', 'body': 'data'}
env = {'wsgiorg.routing_args': [None, actions]}
request = wsgi.Request.blank('/tests/123', environ=env)
request.body = '{"foo" : "value"}'
resource = wsgi.Resource(Controller(self.exception),
wsgi.JSONRequestDeserializer(),
None)
e = self.assertRaises(self.exception_catch, resource, request)
e = e.exc if hasattr(e, 'exc') else e
self.assertNotIn(six.text_type(e), self.LOG.output)
class JSONRequestDeserializerTest(base.SenlinTestCase):
def test_has_body_no_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers.pop('Content-Length')
request.headers['Content-Type'] = 'application/json'
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_zero_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers['Content-Length'] = 0
request.headers['Content-Type'] = 'application/json'
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_length_no_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
self.assertIn('Content-Length', request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_length_plain_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'text/plain'
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_type_malformed(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'application/json'
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'application/json'
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_wrong_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'application/xml'
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_aws_content_type_only(self):
request = wsgi.Request.blank('/?ContentType=JSON')
request.method = 'GET'
request.body = '{"key": "value"}'
self.assertIn('Content-Length', request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_respect_aws_content_type(self):
request = wsgi.Request.blank('/?ContentType=JSON')
request.method = 'GET'
request.body = '{"key": "value"}'
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'application/xml'
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_content_type_with_get(self):
request = wsgi.Request.blank('/')
request.method = 'GET'
request.body = '{"key": "value"}'
self.assertIn('Content-Length', request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_no_body_no_content_length(self):
request = wsgi.Request.blank('/')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_from_json(self):
fixture = '{"key": "value"}'
expected = {"key": "value"}
actual = wsgi.JSONRequestDeserializer().from_json(fixture)
self.assertEqual(expected, actual)
def test_from_json_malformed(self):
fixture = 'kjasdklfjsklajf'
self.assertRaises(webob.exc.HTTPBadRequest,
wsgi.JSONRequestDeserializer().from_json, fixture)
def test_default_no_body(self):
request = wsgi.Request.blank('/')
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {}
self.assertEqual(expected, actual)
def test_default_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_default_with_get_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'GET'
request.body = '{"key": "value"}'
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_default_with_get_with_body_with_aws(self):
request = wsgi.Request.blank('/?ContentType=JSON')
request.method = 'GET'
request.body = '{"key": "value"}'
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_from_json_exceeds_max_json_mb(self):
cfg.CONF.set_override('max_json_body_size', 10)
body = json.dumps(['a'] * cfg.CONF.max_json_body_size)
self.assertTrue(len(body) > cfg.CONF.max_json_body_size)
error = self.assertRaises(exception.RequestLimitExceeded,
wsgi.JSONRequestDeserializer().from_json,
body)
msg = 'Request limit exceeded: JSON body size ' + \
'(%s bytes) exceeds maximum allowed size (%s bytes).' % \
(len(body), cfg.CONF.max_json_body_size)
self.assertEqual(msg, six.text_type(error))
|
|
from bs4 import BeautifulSoup
from flask import current_app
from flask import request, make_response, render_template, url_for, Blueprint
from redwind import hooks
from redwind import util
from redwind.extensions import db
from redwind.models import Post, Mention, get_settings
from redwind.tasks import get_queue, async_app_context
from werkzeug.exceptions import NotFound
import datetime
import mf2py
import mf2util
import requests
import urllib.parse
import urllib.request
wm_receiver = Blueprint('wm_receiver', __name__)
class MentionResult:
def __init__(self, mention, create):
self.mention = mention
self.create = create
class ProcessResult:
def __init__(self, post=None, error=None, delete=False):
self.post = post
self.error = error
self.delete = delete
self.mention_results = []
def add_mention(self, mention, create):
self.mention_results.append(MentionResult(mention, create))
@property
def mentions(self):
return [r.mention for r in self.mention_results]
def register(app):
app.register_blueprint(wm_receiver)
@wm_receiver.route('/webmention', methods=['GET', 'POST'])
def receive_webmention():
if request.method == 'GET':
return render_template('webmention.jinja2')
source = request.form.get('source')
target = request.form.get('target')
callback = request.form.get('callback')
if not source:
return make_response(
'webmention missing required source parameter', 400)
if not target:
return make_response(
'webmention missing required target parameter', 400)
current_app.logger.debug(
"Webmention from %s to %s received", source, target)
job = get_queue().enqueue(
do_process_webmention, source, target, callback, current_app.config)
status_url = url_for('.webmention_status', key=job.id, _external=True)
return make_response(
render_template('wm_received.jinja2', status_url=status_url), 202)
@wm_receiver.route('/webmention/status/<key>')
def webmention_status(key):
job = get_queue().fetch_job(key)
if not job:
rv = {
'response_code': 400,
'status': 'unknown',
'reason': 'Job does not exist or its status has expired',
}
elif job.result == 'queued':
rv = {
'response_code': 202,
'status': 'queued',
'reason': 'Mention has been queued for processing',
}
else:
rv = job.result or {}
return make_response(
render_template('wm_status.jinja2', **rv),
rv.get('response_code', 400))
def do_process_webmention(source, target, callback, app_config):
def call_callback(result):
if callback:
requests.post(callback, data=result)
with async_app_context(app_config):
try:
result = interpret_mention(source, target)
if result.error:
current_app.logger.warn(
'Failed to process webmention: %s', result.error)
response = {
'source': source,
'target': target,
'response_code': 400,
'status': 'error',
'reason': result.error
}
call_callback(response)
return response
if result.post and result.delete:
result.post.mentions = [m for m in result.post.mentions if
m.url != source]
elif result.post:
result.post.mentions.extend(result.mentions)
db.session.commit()
current_app.logger.debug("saved mentions to %s", result.post.path)
hooks.fire('mention-received', post=result.post)
if result.post:
for mres in result.mention_results:
if mres.create:
send_push_notification(result.post, mres.mention,
app_config)
response = {
'source': source,
'target': target,
'response_code': 200,
'status': 'success',
'reason': 'Deleted' if result.delete
else 'Created' if any(mres.create for mres
in result.mention_results)
else 'Updated'
}
call_callback(response)
return response
except Exception as e:
current_app.logger.exception(
"exception while processing webmention")
response = {
'source': source,
'target': target,
'response_code': 400,
'status': 'error',
'reason': "exception while processing webmention {}".format(e)
}
call_callback(response)
return response
def send_push_notification(post, mention, app_config):
# ignore mentions from bridgy
if mention.url and mention.url.startswith('https://brid-gy.appspot.com/'):
return
if 'PUSHOVER_TOKEN' in app_config and 'PUSHOVER_USER' in app_config:
token = app_config['PUSHOVER_TOKEN']
user = app_config['PUSHOVER_USER']
message = '{} from {}{}'.format(
mention.reftype, mention.author_name,
(': ' + mention.content_plain[:256])
if mention.content_plain else '')
requests.post('https://api.pushover.net/1/messages.json', data={
'token': token,
'user': user,
'message': message,
'url': post.permalink,
})
def interpret_mention(source, target):
current_app.logger.debug(
'processing webmention from %s to %s', source, target)
if target and target.strip('/') == get_settings().site_url.strip('/'):
# received a domain-level mention
current_app.logger.debug(
'received domain-level webmention from %s', source)
target_post = None
target_urls = (target,)
# TODO save domain-level webmention somewhere
return ProcessResult(
error="Receiving domain-level webmentions is not yet implemented")
else:
# confirm that target is a valid link to a post
target_post = find_target_post(target)
if not target_post:
current_app.logger.warn(
"Webmention could not find target post: %s. Giving up", target)
return ProcessResult(
error="Webmention could not find target post: {}"
.format(target))
target_urls = (target, target_post.permalink,)
if source in target_urls:
return ProcessResult(
error='{} and {} refer to the same post'.format(source, target))
# confirm that source actually refers to the post
source_response = util.fetch_html(source)
current_app.logger.debug(
'received response from source %s', source_response)
if source_response.status_code == 410:
current_app.logger.debug("Webmention indicates original was deleted")
return ProcessResult(post=target_post, delete=True)
if source_response.status_code // 100 != 2:
current_app.logger.warn(
"Webmention could not read source post: %s. Giving up", source)
return ProcessResult(
post=target_post,
error="Bad response when reading source post: {}, {}".format(
source, source_response))
source_length = source_response.headers.get('Content-Length')
if source_length and int(source_length) > 2097152:
current_app.logger.warn("Very large source. length=%s", source_length)
return ProcessResult(
post=target_post,
error="Source is very large. Length={}".format(source_length))
link_to_target = find_link_to_target(source, source_response, target_urls)
if not link_to_target:
current_app.logger.warn(
"Webmention source %s does not appear to link to target %s. "
"Giving up", source, target)
return ProcessResult(
post=target_post,
error="Could not find any links from source to target")
mentions = create_mentions(target_post, source, source_response)
if not mentions:
return ProcessResult(
post=target_post,
error="Could not parse a mention from the source")
result = ProcessResult(post=target_post)
for mention in mentions:
result.add_mention(mention, create=not mention.id)
return result
def find_link_to_target(source_url, source_response, target_urls):
if source_response.status_code // 2 != 100:
current_app.logger.warn(
"Received unexpected response from webmention source: %s",
source_response.text)
return None
# Don't worry about Microformats for now; just see if there is a
# link anywhere that points back to the target
soup = BeautifulSoup(source_response.text)
for link in soup.find_all(['a', 'link']):
link_target = link.get('href')
if link_target in target_urls:
return link
def find_target_post(target_url):
current_app.logger.debug("looking for target post at %s", target_url)
# follow redirects if necessary
redirect_url = urllib.request.urlopen(target_url).geturl()
if redirect_url and redirect_url != target_url:
current_app.logger.debug("followed redirection to %s", redirect_url)
target_url = redirect_url
parsed_url = urllib.parse.urlparse(target_url)
if not parsed_url:
current_app.logger.warn(
"Could not parse target_url of received webmention: %s",
target_url)
return None
try:
# FIXME this is a less-than-perfect fix for hosting from a
# subdirectory. The url_map may have some clever work-around.
parsed_site_root = urllib.parse.urlparse(get_settings().site_url)
site_prefix = parsed_site_root.path
if site_prefix.endswith('/'):
site_prefix = site_prefix[:-1]
if not parsed_url.path.startswith(parsed_site_root.path):
raise NotFound
urls = current_app.url_map.bind(get_settings().site_url)
path = parsed_url.path[len(site_prefix):]
current_app.logger.debug('target path with no prefix %s', path)
endpoint, args = urls.match(path)
current_app.logger.debug(
'found match for target url %r: %r', endpoint, args)
except NotFound:
current_app.logger.warn(
'Webmention could not find target for %s', parsed_url.path)
return None
post = None
if endpoint == 'views.post_by_path':
year = args.get('year')
month = args.get('month')
slug = args.get('slug')
post = Post.load_by_path(
'{}/{:02d}/{}'.format(year, month, slug))
elif endpoint == 'views.post_by_date':
post_type = args.get('post_type')
year = args.get('year')
month = args.get('month')
day = args.get('day')
index = args.get('index')
post = Post.load_by_date(post_type, year, month, day, index)
elif endpoint == 'views.post_by_old_date':
post_type = args.get('post_type')
yymmdd = args.get('yymmdd')
year = int('20' + yymmdd[0:2])
month = int(yymmdd[2:4])
day = int(yymmdd[4:6])
post = Post.load_by_date(post_type, year, month, day, index)
elif endpoint == 'views.post_by_id':
dbid = args.get('dbid')
post = Post.load_by_id(dbid)
if not post:
current_app.logger.warn(
"Webmention target points to unknown post: {}".format(args)),
return post
def create_mentions(post, url, source_response):
target_urls = []
if post:
base_target_urls = [post.permalink]
for base_url in base_target_urls:
target_urls.append(base_url)
target_urls.append(base_url.replace('https://', 'http://')
if base_url.startswith('https://')
else base_url.replace('http://', 'https://'))
blob = mf2py.parse(doc=source_response.text, url=url)
if not blob:
current_app.logger.debug('create_mention: no mf2 in source_response')
return
entry = mf2util.interpret_comment(blob, url, target_urls)
current_app.logger.debug('interpreted comment: %r', entry)
if not entry:
current_app.logger.debug(
'create_mention: mf2util found no comment entry')
return
comment_type = entry.get('comment_type', [])
to_process = [(entry, url)]
# process 2nd level "downstream" comments
if 'reply' in comment_type:
downstream_cmts = entry.get('comment', [])
current_app.logger.debug('adding in downstream comments:%d',
len(downstream_cmts))
for dc in downstream_cmts:
if dc.get('url'):
to_process.append((dc, dc.get('url')))
results = []
for entry, url in to_process:
current_app.logger.debug('processing %s %r', url, entry)
content = util.clean_foreign_html(entry.get('content', ''))
content_plain = util.format_as_text(content)
published = entry.get('published')
if not published:
published = datetime.datetime.utcnow()
# update an existing mention
mention = next((m for m in post.mentions if m.url == url), None)
# or create a new one
if not mention:
mention = Mention()
mention.url = url
mention.permalink = entry.get('url') or url
mention.reftype = comment_type[0] if comment_type else 'reference'
mention.author_name = entry.get('author', {}).get('name', '')
mention.author_url = entry.get('author', {}).get('url', '')
mention.author_image = entry.get('author', {}).get('photo')
mention.content = content
mention.content_plain = content_plain
mention.published = published
mention.title = entry.get('name')
mention.syndication = entry.get('syndication', [])
mention.rsvp = entry.get('rsvp')
results.append(mention)
return results
|
|
"""SCons.Tool.JavaCommon
Stuff for processing Java.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/JavaCommon.py 4369 2009/09/19 15:58:29 scons"
import os
import os.path
import re
import string
java_parsing = 1
default_java_version = '1.4'
if java_parsing:
# Parse Java files for class names.
#
# This is a really cool parser from Charles Crain
# that finds appropriate class names in Java source.
# A regular expression that will find, in a java file:
# newlines;
# double-backslashes;
# a single-line comment "//";
# single or double quotes preceeded by a backslash;
# single quotes, double quotes, open or close braces, semi-colons,
# periods, open or close parentheses;
# floating-point numbers;
# any alphanumeric token (keyword, class name, specifier);
# any alphanumeric token surrounded by angle brackets (generics);
# the multi-line comment begin and end tokens /* and */;
# array declarations "[]".
_reToken = re.compile(r'(\n|\\\\|//|\\[\'"]|[\'"\{\}\;\.\(\)]|' +
r'\d*\.\d*|[A-Za-z_][\w\$\.]*|<[A-Za-z_]\w+>|' +
r'/\*|\*/|\[\])')
class OuterState:
"""The initial state for parsing a Java file for classes,
interfaces, and anonymous inner classes."""
def __init__(self, version=default_java_version):
if not version in ('1.1', '1.2', '1.3','1.4', '1.5', '1.6',
'5', '6'):
msg = "Java version %s not supported" % version
raise NotImplementedError, msg
self.version = version
self.listClasses = []
self.listOutputs = []
self.stackBrackets = []
self.brackets = 0
self.nextAnon = 1
self.localClasses = []
self.stackAnonClassBrackets = []
self.anonStacksStack = [[0]]
self.package = None
def trace(self):
pass
def __getClassState(self):
try:
return self.classState
except AttributeError:
ret = ClassState(self)
self.classState = ret
return ret
def __getPackageState(self):
try:
return self.packageState
except AttributeError:
ret = PackageState(self)
self.packageState = ret
return ret
def __getAnonClassState(self):
try:
return self.anonState
except AttributeError:
self.outer_state = self
ret = SkipState(1, AnonClassState(self))
self.anonState = ret
return ret
def __getSkipState(self):
try:
return self.skipState
except AttributeError:
ret = SkipState(1, self)
self.skipState = ret
return ret
def __getAnonStack(self):
return self.anonStacksStack[-1]
def openBracket(self):
self.brackets = self.brackets + 1
def closeBracket(self):
self.brackets = self.brackets - 1
if len(self.stackBrackets) and \
self.brackets == self.stackBrackets[-1]:
self.listOutputs.append(string.join(self.listClasses, '$'))
self.localClasses.pop()
self.listClasses.pop()
self.anonStacksStack.pop()
self.stackBrackets.pop()
if len(self.stackAnonClassBrackets) and \
self.brackets == self.stackAnonClassBrackets[-1]:
self.__getAnonStack().pop()
self.stackAnonClassBrackets.pop()
def parseToken(self, token):
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '{':
self.openBracket()
elif token == '}':
self.closeBracket()
elif token in [ '"', "'" ]:
return IgnoreState(token, self)
elif token == "new":
# anonymous inner class
if len(self.listClasses) > 0:
return self.__getAnonClassState()
return self.__getSkipState() # Skip the class name
elif token in ['class', 'interface', 'enum']:
if len(self.listClasses) == 0:
self.nextAnon = 1
self.stackBrackets.append(self.brackets)
return self.__getClassState()
elif token == 'package':
return self.__getPackageState()
elif token == '.':
# Skip the attribute, it might be named "class", in which
# case we don't want to treat the following token as
# an inner class name...
return self.__getSkipState()
return self
def addAnonClass(self):
"""Add an anonymous inner class"""
if self.version in ('1.1', '1.2', '1.3', '1.4'):
clazz = self.listClasses[0]
self.listOutputs.append('%s$%d' % (clazz, self.nextAnon))
elif self.version in ('1.5', '1.6', '5', '6'):
self.stackAnonClassBrackets.append(self.brackets)
className = []
className.extend(self.listClasses)
self.__getAnonStack()[-1] = self.__getAnonStack()[-1] + 1
for anon in self.__getAnonStack():
className.append(str(anon))
self.listOutputs.append(string.join(className, '$'))
self.nextAnon = self.nextAnon + 1
self.__getAnonStack().append(0)
def setPackage(self, package):
self.package = package
class AnonClassState:
"""A state that looks for anonymous inner classes."""
def __init__(self, old_state):
# outer_state is always an instance of OuterState
self.outer_state = old_state.outer_state
self.old_state = old_state
self.brace_level = 0
def parseToken(self, token):
# This is an anonymous class if and only if the next
# non-whitespace token is a bracket. Everything between
# braces should be parsed as normal java code.
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '\n':
return self
elif token[0] == '<' and token[-1] == '>':
return self
elif token == '(':
self.brace_level = self.brace_level + 1
return self
if self.brace_level > 0:
if token == 'new':
# look further for anonymous inner class
return SkipState(1, AnonClassState(self))
elif token in [ '"', "'" ]:
return IgnoreState(token, self)
elif token == ')':
self.brace_level = self.brace_level - 1
return self
if token == '{':
self.outer_state.addAnonClass()
return self.old_state.parseToken(token)
class SkipState:
"""A state that will skip a specified number of tokens before
reverting to the previous state."""
def __init__(self, tokens_to_skip, old_state):
self.tokens_to_skip = tokens_to_skip
self.old_state = old_state
def parseToken(self, token):
self.tokens_to_skip = self.tokens_to_skip - 1
if self.tokens_to_skip < 1:
return self.old_state
return self
class ClassState:
"""A state we go into when we hit a class or interface keyword."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
# the next non-whitespace token should be the name of the class
if token == '\n':
return self
# If that's an inner class which is declared in a method, it
# requires an index prepended to the class-name, e.g.
# 'Foo$1Inner' (Tigris Issue 2087)
if self.outer_state.localClasses and \
self.outer_state.stackBrackets[-1] > \
self.outer_state.stackBrackets[-2]+1:
locals = self.outer_state.localClasses[-1]
try:
idx = locals[token]
locals[token] = locals[token]+1
except KeyError:
locals[token] = 1
token = str(locals[token]) + token
self.outer_state.localClasses.append({})
self.outer_state.listClasses.append(token)
self.outer_state.anonStacksStack.append([0])
return self.outer_state
class IgnoreState:
"""A state that will ignore all tokens until it gets to a
specified token."""
def __init__(self, ignore_until, old_state):
self.ignore_until = ignore_until
self.old_state = old_state
def parseToken(self, token):
if self.ignore_until == token:
return self.old_state
return self
class PackageState:
"""The state we enter when we encounter the package keyword.
We assume the next token will be the package name."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
self.outer_state.setPackage(token)
return self.outer_state
def parse_java_file(fn, version=default_java_version):
return parse_java(open(fn, 'r').read(), version)
def parse_java(contents, version=default_java_version, trace=None):
"""Parse a .java file and return a double of package directory,
plus a list of .class files that compiling that .java file will
produce"""
package = None
initial = OuterState(version)
currstate = initial
for token in _reToken.findall(contents):
# The regex produces a bunch of groups, but only one will
# have anything in it.
currstate = currstate.parseToken(token)
if trace: trace(token, currstate)
if initial.package:
package = string.replace(initial.package, '.', os.sep)
return (package, initial.listOutputs)
else:
# Don't actually parse Java files for class names.
#
# We might make this a configurable option in the future if
# Java-file parsing takes too long (although it shouldn't relative
# to how long the Java compiler itself seems to take...).
def parse_java_file(fn):
""" "Parse" a .java file.
This actually just splits the file name, so the assumption here
is that the file name matches the public class name, and that
the path to the file is the same as the package name.
"""
return os.path.split(file)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
from __future__ import unicode_literals
import hashlib
import json
import os
import posixpath
import re
from collections import OrderedDict
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, caches,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.six import iteritems
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import (
unquote, urldefrag, urlsplit, urlunsplit,
)
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class HashedFilesMixin(object):
default_template = """url("%s")"""
max_post_process_passes = 5
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(HashedFilesMixin, self).__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
# `name` is the base name to construct the new hashed filename from.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if filename:
filename = urlsplit(unquote(filename)).path.strip()
filename = filename or clean_name
opened = False
if content is None:
if not self.exists(filename):
raise ValueError("The file '%s' could not be found with %r." % (filename, self))
try:
content = self.open(filename)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
Return the non-hashed URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super(HashedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
"""
Return the non-hashed URL in DEBUG mode.
"""
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
return template % unquote(transformed_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns.keys())
]
# Do a single pass first. Post-process all files once, then repeat for
# adjustable files.
for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
paths = {path: paths[path] for path in adjustable_paths}
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield 'All', None, RuntimeError('Max post-process passes exceeded.')
# Store the processed paths
self.hashed_files.update(hashed_files)
def _post_process(self, paths, adjustable_paths, hashed_files):
# Sort the files by directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
substitutions = True
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
# generate the hash with the original content, even for
# adjustable files.
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode(settings.FILE_CHARSET)
for extension, patterns in iteritems(self._patterns):
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, hashed_files, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
# Save intermediate file for reference
saved_name = self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(self.clean_name(saved_name))
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(self.clean_name(saved_name))
# and then set the cache accordingly
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
# Normalize the path to avoid multiple names for the same file like
# ../foo/bar.css and ../foo/../foo/bar.css which normalize to the same
# path.
name = posixpath.normpath(name)
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
return cache_name
def stored_name(self, name):
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name:
return cache_name
# No cached name found, recalculate it from the files.
intermediate_name = name
for i in range(self.max_post_process_passes + 1):
cache_name = self.clean_name(
self.hashed_name(name, content=None, filename=intermediate_name)
)
if intermediate_name == cache_name:
# Store the hashed name if there was a miss.
self.hashed_files[hash_key] = cache_name
return cache_name
else:
# Move on to the next intermediate file.
intermediate_name = cache_name
# If the cache name can't be determined after the max number of passes,
# the intermediate files on disk may be corrupt; avoid an infinite loop.
raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
manifest_strict = True
def __init__(self, *args, **kwargs):
super(ManifestFilesMixin, self).__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode('utf-8')
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
all_post_processed = super(ManifestFilesMixin,
self).post_process(*args, **kwargs)
for post_processed in all_post_processed:
yield post_processed
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode('utf-8')
self._save(self.manifest_name, ContentFile(contents))
def stored_name(self, name):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
hash_key = self.hash_key(clean_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
if self.manifest_strict:
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
cache_name = self.clean_name(self.hashed_name(name))
unparsed_name = list(parsed_name)
unparsed_name[2] = cache_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
class _MappingCache(object):
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic base exception handling.
SHOULD include dedicated exception logging.
"""
import collections
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from ironic.common.i18n import _, _LE
from ironic.conf import CONF
LOG = logging.getLogger(__name__)
def _ensure_exception_kwargs_serializable(exc_class_name, kwargs):
"""Ensure that kwargs are serializable
Ensure that all kwargs passed to exception constructor can be passed over
RPC, by trying to convert them to JSON, or, as a last resort, to string.
If it is not possible, unserializable kwargs will be removed, letting the
receiver to handle the exception string as it is configured to.
:param exc_class_name: an IronicException class name.
:param kwargs: a dictionary of keyword arguments passed to the exception
constructor.
:returns: a dictionary of serializable keyword arguments.
"""
serializers = [(jsonutils.dumps, _('when converting to JSON')),
(six.text_type, _('when converting to string'))]
exceptions = collections.defaultdict(list)
serializable_kwargs = {}
for k, v in kwargs.items():
for serializer, msg in serializers:
try:
serializable_kwargs[k] = serializer(v)
exceptions.pop(k, None)
break
except Exception as e:
exceptions[k].append(
'(%(serializer_type)s) %(e_type)s: %(e_contents)s' %
{'serializer_type': msg, 'e_contents': e,
'e_type': e.__class__.__name__})
if exceptions:
LOG.error(
_LE("One or more arguments passed to the %(exc_class)s "
"constructor as kwargs can not be serialized. The serialized "
"arguments: %(serialized)s. These unserialized kwargs were "
"dropped because of the exceptions encountered during their "
"serialization:\n%(errors)s"),
dict(errors=';\n'.join("%s: %s" % (k, '; '.join(v))
for k, v in exceptions.items()),
exc_class=exc_class_name, serialized=serializable_kwargs)
)
# We might be able to actually put the following keys' values into
# format string, but there is no guarantee, drop it just in case.
for k in exceptions:
del kwargs[k]
return serializable_kwargs
class IronicException(Exception):
"""Base Ironic Exception
To correctly use this class, inherit from it and define
a '_msg_fmt' property. That message will get printf'd
with the keyword arguments provided to the constructor.
If you need to access the message from an exception you should use
six.text_type(exc)
"""
_msg_fmt = _("An unknown exception occurred.")
code = http_client.INTERNAL_SERVER_ERROR
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = _ensure_exception_kwargs_serializable(
self.__class__.__name__, kwargs)
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self._msg_fmt % kwargs
except Exception as e:
# kwargs doesn't match a variable in self._msg_fmt
# log the issue and the kwargs
prs = ', '.join('%s: %s' % pair for pair in kwargs.items())
LOG.exception(_LE('Exception in string format operation '
'(arguments %s)'), prs)
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core self._msg_fmt out if something
# happened
message = self._msg_fmt
super(IronicException, self).__init__(message)
def __str__(self):
"""Encode to utf-8 then wsme api can consume it as well."""
if not six.PY3:
return six.text_type(self.args[0]).encode('utf-8')
return self.args[0]
def __unicode__(self):
"""Return a unicode representation of the exception message."""
return six.text_type(self.args[0])
class NotAuthorized(IronicException):
_msg_fmt = _("Not authorized.")
code = http_client.FORBIDDEN
class OperationNotPermitted(NotAuthorized):
_msg_fmt = _("Operation not permitted.")
class Invalid(IronicException):
_msg_fmt = _("Unacceptable parameters.")
code = http_client.BAD_REQUEST
class Conflict(IronicException):
_msg_fmt = _('Conflict.')
code = http_client.CONFLICT
class TemporaryFailure(IronicException):
_msg_fmt = _("Resource temporarily unavailable, please retry.")
code = http_client.SERVICE_UNAVAILABLE
class NotAcceptable(IronicException):
# TODO(deva): We need to set response headers in the API for this exception
_msg_fmt = _("Request not acceptable.")
code = http_client.NOT_ACCEPTABLE
class InvalidState(Conflict):
_msg_fmt = _("Invalid resource state.")
class NodeAlreadyExists(Conflict):
_msg_fmt = _("A node with UUID %(uuid)s already exists.")
class MACAlreadyExists(Conflict):
_msg_fmt = _("A port with MAC address %(mac)s already exists.")
class ChassisAlreadyExists(Conflict):
_msg_fmt = _("A chassis with UUID %(uuid)s already exists.")
class PortAlreadyExists(Conflict):
_msg_fmt = _("A port with UUID %(uuid)s already exists.")
class PortgroupAlreadyExists(Conflict):
_msg_fmt = _("A portgroup with UUID %(uuid)s already exists.")
class PortgroupDuplicateName(Conflict):
_msg_fmt = _("A portgroup with name %(name)s already exists.")
class PortgroupMACAlreadyExists(Conflict):
_msg_fmt = _("A portgroup with MAC address %(mac)s already exists.")
class InstanceAssociated(Conflict):
_msg_fmt = _("Instance %(instance_uuid)s is already associated with a "
"node, it cannot be associated with this other node %(node)s")
class DuplicateName(Conflict):
_msg_fmt = _("A node with name %(name)s already exists.")
class VolumeConnectorAlreadyExists(Conflict):
_msg_fmt = _("A volume connector with UUID %(uuid)s already exists.")
class VolumeConnectorTypeAndIdAlreadyExists(Conflict):
_msg_fmt = _("A volume connector with type %(type)s and connector ID "
"%(connector_id)s already exists.")
class VolumeTargetAlreadyExists(Conflict):
_msg_fmt = _("A volume target with UUID %(uuid)s already exists.")
class VolumeTargetBootIndexAlreadyExists(Conflict):
_msg_fmt = _("A volume target with boot index '%(boot_index)s' "
"for the same node already exists.")
class VifAlreadyAttached(Conflict):
_msg_fmt = _("Unable to attach VIF because VIF %(vif)s is already "
"attached to Ironic %(object_type)s %(object_uuid)s")
class NoFreePhysicalPorts(Invalid):
_msg_fmt = _("Unable to attach VIF %(vif)s, not "
"enough free physical ports.")
class VifNotAttached(Invalid):
_msg_fmt = _("Unable to detach VIF %(vif)s from node %(node)s "
"because it is not attached to it.")
class InvalidUUID(Invalid):
_msg_fmt = _("Expected a UUID but received %(uuid)s.")
class InvalidUuidOrName(Invalid):
_msg_fmt = _("Expected a logical name or UUID but received %(name)s.")
class InvalidName(Invalid):
_msg_fmt = _("Expected a logical name but received %(name)s.")
class InvalidIdentity(Invalid):
_msg_fmt = _("Expected a UUID or int but received %(identity)s.")
class InvalidMAC(Invalid):
_msg_fmt = _("Expected a MAC address but received %(mac)s.")
class InvalidSwitchID(Invalid):
_msg_fmt = _("Expected a MAC address or OpenFlow datapath ID but "
"received %(switch_id)s.")
class InvalidDatapathID(Invalid):
_msg_fmt = _("Expected an OpenFlow datapath ID but received "
"%(datapath_id)s.")
class InvalidStateRequested(Invalid):
_msg_fmt = _('The requested action "%(action)s" can not be performed '
'on node "%(node)s" while it is in state "%(state)s".')
class PatchError(Invalid):
_msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
class InstanceDeployFailure(IronicException):
_msg_fmt = _("Failed to deploy instance: %(reason)s")
class ImageUnacceptable(IronicException):
_msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageConvertFailed(IronicException):
_msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
_msg_fmt = "%(err)s"
class MissingParameterValue(InvalidParameterValue):
_msg_fmt = "%(err)s"
class Duplicate(IronicException):
_msg_fmt = _("Resource already exists.")
class NotFound(IronicException):
_msg_fmt = _("Resource could not be found.")
code = http_client.NOT_FOUND
class DHCPLoadError(IronicException):
_msg_fmt = _("Failed to load DHCP provider %(dhcp_provider_name)s, "
"reason: %(reason)s")
# TODO(dtantsur): word "driver" is overused in class names here, and generally
# means stevedore driver, not ironic driver. Rename them in the future.
class DriverNotFound(NotFound):
_msg_fmt = _("Could not find the following driver(s) or hardware type(s): "
"%(driver_name)s.")
class DriverNotFoundInEntrypoint(DriverNotFound):
_msg_fmt = _("Could not find the following items in the "
"'%(entrypoint)s' entrypoint: %(names)s.")
class InterfaceNotFoundInEntrypoint(InvalidParameterValue):
_msg_fmt = _("Could not find the following interface in the "
"'%(entrypoint)s' entrypoint: %(iface)s. Valid interfaces "
"are %(valid)s.")
class IncompatibleInterface(InvalidParameterValue):
_msg_fmt = _("%(interface_type)s interface implementation "
"'%(interface_impl)s' is not supported by hardware type "
"%(hardware_type)s.")
class NoValidDefaultForInterface(InvalidParameterValue):
# NOTE(rloo): in the line below, there is no blank space after 'For'
# because node_info could be an empty string. If node_info
# is not empty, it should start with a space.
_msg_fmt = _("For%(node_info)s %(driver_type)s '%(driver)s', no default "
"value found for %(interface_type)s interface.")
class MustBeNone(InvalidParameterValue):
_msg_fmt = _("For node %(node)s with driver %(driver)s, these node "
"fields must be set to None: %(node_fields)s.")
class ImageNotFound(NotFound):
_msg_fmt = _("Image %(image_id)s could not be found.")
class NoValidHost(NotFound):
_msg_fmt = _("No valid host was found. Reason: %(reason)s")
class InstanceNotFound(NotFound):
_msg_fmt = _("Instance %(instance)s could not be found.")
class InputFileError(IronicException):
_msg_fmt = _("Error with file %(file_name)s. Reason: %(reason)s")
class NodeNotFound(NotFound):
_msg_fmt = _("Node %(node)s could not be found.")
class PortgroupNotFound(NotFound):
_msg_fmt = _("Portgroup %(portgroup)s could not be found.")
class PortgroupNotEmpty(Invalid):
_msg_fmt = _("Cannot complete the requested action because portgroup "
"%(portgroup)s contains ports.")
class NodeAssociated(InvalidState):
_msg_fmt = _("Node %(node)s is associated with instance %(instance)s.")
class PortNotFound(NotFound):
_msg_fmt = _("Port %(port)s could not be found.")
class FailedToUpdateDHCPOptOnPort(IronicException):
_msg_fmt = _("Update DHCP options on port: %(port_id)s failed.")
class FailedToCleanDHCPOpts(IronicException):
_msg_fmt = _("Clean up DHCP options on node: %(node)s failed.")
class FailedToGetIPAddressOnPort(IronicException):
_msg_fmt = _("Retrieve IP address on port: %(port_id)s failed.")
class InvalidIPv4Address(IronicException):
_msg_fmt = _("Invalid IPv4 address %(ip_address)s.")
class FailedToUpdateMacOnPort(IronicException):
_msg_fmt = _("Update MAC address on port: %(port_id)s failed.")
class ChassisNotFound(NotFound):
_msg_fmt = _("Chassis %(chassis)s could not be found.")
class VolumeConnectorNotFound(NotFound):
_msg_fmt = _("Volume connector %(connector)s could not be found.")
class VolumeTargetNotFound(NotFound):
_msg_fmt = _("Volume target %(target)s could not be found.")
class DriverNameConflict(IronicException):
_msg_fmt = _("Classic and dynamic drivers cannot have the "
"same names '%(names)s'.")
class NoDriversLoaded(IronicException):
_msg_fmt = _("Conductor %(conductor)s cannot be started "
"because no drivers were loaded.")
class ConductorNotFound(NotFound):
_msg_fmt = _("Conductor %(conductor)s could not be found.")
class ConductorAlreadyRegistered(IronicException):
_msg_fmt = _("Conductor %(conductor)s already registered.")
class ConductorHardwareInterfacesAlreadyRegistered(IronicException):
_msg_fmt = _("At least one of these (hardware type %(hardware_type)s, "
"interface type %(interface_type)s, interfaces "
"%(interfaces)s) combinations are already registered for "
"this conductor.")
class PowerStateFailure(InvalidState):
_msg_fmt = _("Failed to set node power state to %(pstate)s.")
class ExclusiveLockRequired(NotAuthorized):
_msg_fmt = _("An exclusive lock is required, "
"but the current context has a shared lock.")
class NodeMaintenanceFailure(Invalid):
_msg_fmt = _("Failed to toggle maintenance-mode flag "
"for node %(node)s: %(reason)s")
class NodeConsoleNotEnabled(Invalid):
_msg_fmt = _("Console access is not enabled on node %(node)s")
class NodeInMaintenance(Invalid):
_msg_fmt = _("The %(op)s operation can't be performed on node "
"%(node)s because it's in maintenance mode.")
class ChassisNotEmpty(Invalid):
_msg_fmt = _("Cannot complete the requested action because chassis "
"%(chassis)s contains nodes.")
class IPMIFailure(IronicException):
_msg_fmt = _("IPMI call failed: %(cmd)s.")
class SSHConnectFailed(IronicException):
_msg_fmt = _("Failed to establish SSH connection to host %(host)s.")
class SSHCommandFailed(IronicException):
_msg_fmt = _("Failed to execute command via SSH: %(cmd)s.")
class UnsupportedDriverExtension(Invalid):
_msg_fmt = _('Driver %(driver)s does not support %(extension)s '
'(disabled or not implemented).')
class GlanceConnectionFailed(IronicException):
_msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class ImageNotAuthorized(NotAuthorized):
_msg_fmt = _("Not authorized for image %(image_id)s.")
class InvalidImageRef(Invalid):
_msg_fmt = _("Invalid image href %(image_href)s.")
class ImageRefValidationFailed(IronicException):
_msg_fmt = _("Validation of image href %(image_href)s failed, "
"reason: %(reason)s")
class ImageDownloadFailed(IronicException):
_msg_fmt = _("Failed to download image %(image_href)s, reason: %(reason)s")
class KeystoneUnauthorized(IronicException):
_msg_fmt = _("Not authorized in Keystone.")
class KeystoneFailure(IronicException):
pass
class CatalogNotFound(IronicException):
_msg_fmt = _("Service type %(service_type)s with endpoint type "
"%(endpoint_type)s not found in keystone service catalog.")
class ServiceUnavailable(IronicException):
_msg_fmt = _("Connection failed")
class Forbidden(IronicException):
_msg_fmt = _("Requested OpenStack Images API is forbidden")
class BadRequest(IronicException):
pass
class InvalidEndpoint(IronicException):
_msg_fmt = _("The provided endpoint is invalid")
class CommunicationError(IronicException):
_msg_fmt = _("Unable to communicate with the server.")
class HTTPForbidden(NotAuthorized):
_msg_fmt = _("Access was denied to the following resource: %(resource)s")
class Unauthorized(IronicException):
pass
class HTTPNotFound(NotFound):
pass
class ConfigNotFound(IronicException):
_msg_fmt = _("Could not find config at %(path)s")
class NodeLocked(Conflict):
_msg_fmt = _("Node %(node)s is locked by host %(host)s, please retry "
"after the current operation is completed.")
class NodeNotLocked(Invalid):
_msg_fmt = _("Node %(node)s found not to be locked on release")
class NoFreeConductorWorker(TemporaryFailure):
_msg_fmt = _('Requested action cannot be performed due to lack of free '
'conductor workers.')
code = http_client.SERVICE_UNAVAILABLE
class VendorPassthruException(IronicException):
pass
class ConfigInvalid(IronicException):
_msg_fmt = _("Invalid configuration file. %(error_msg)s")
class DriverLoadError(IronicException):
_msg_fmt = _("Driver, hardware type or interface %(driver)s could not be "
"loaded. Reason: %(reason)s.")
class ConsoleError(IronicException):
pass
class NoConsolePid(ConsoleError):
_msg_fmt = _("Could not find pid in pid file %(pid_path)s")
class ConsoleSubprocessFailed(ConsoleError):
_msg_fmt = _("Console subprocess failed to start. %(error)s")
class PasswordFileFailedToCreate(IronicException):
_msg_fmt = _("Failed to create the password file. %(error)s")
class IloOperationError(IronicException):
_msg_fmt = _("%(operation)s failed, error: %(error)s")
class IloOperationNotSupported(IronicException):
_msg_fmt = _("%(operation)s not supported. error: %(error)s")
class DracOperationError(IronicException):
_msg_fmt = _('DRAC operation failed. Reason: %(error)s')
class FailedToGetSensorData(IronicException):
_msg_fmt = _("Failed to get sensor data for node %(node)s. "
"Error: %(error)s")
class FailedToParseSensorData(IronicException):
_msg_fmt = _("Failed to parse sensor data for node %(node)s. "
"Error: %(error)s")
class InsufficientDiskSpace(IronicException):
_msg_fmt = _("Disk volume where '%(path)s' is located doesn't have "
"enough disk space. Required %(required)d MiB, "
"only %(actual)d MiB available space present.")
class ImageCreationFailed(IronicException):
_msg_fmt = _('Creating %(image_type)s image failed: %(error)s')
class SwiftOperationError(IronicException):
_msg_fmt = _("Swift operation '%(operation)s' failed: %(error)s")
class SwiftObjectNotFoundError(SwiftOperationError):
_msg_fmt = _("Swift object %(obj)s from container %(container)s "
"not found. Operation '%(operation)s' failed.")
class SNMPFailure(IronicException):
_msg_fmt = _("SNMP operation '%(operation)s' failed: %(error)s")
class FileSystemNotSupported(IronicException):
_msg_fmt = _("Failed to create a file system. "
"File system %(fs)s is not supported.")
class IRMCOperationError(IronicException):
_msg_fmt = _('iRMC %(operation)s failed. Reason: %(error)s')
class IRMCSharedFileSystemNotMounted(IronicException):
_msg_fmt = _("iRMC shared file system '%(share)s' is not mounted.")
class HardwareInspectionFailure(IronicException):
_msg_fmt = _("Failed to inspect hardware. Reason: %(error)s")
class NodeCleaningFailure(IronicException):
_msg_fmt = _("Failed to clean node %(node)s: %(reason)s")
class PathNotFound(IronicException):
_msg_fmt = _("Path %(dir)s does not exist.")
class DirectoryNotWritable(IronicException):
_msg_fmt = _("Directory %(dir)s is not writable.")
class UcsOperationError(IronicException):
_msg_fmt = _("Cisco UCS client: operation %(operation)s failed for node"
" %(node)s. Reason: %(error)s")
class UcsConnectionError(IronicException):
_msg_fmt = _("Cisco UCS client: connection failed for node "
"%(node)s. Reason: %(error)s")
class ImageUploadFailed(IronicException):
_msg_fmt = _("Failed to upload %(image_name)s image to web server "
"%(web_server)s, reason: %(reason)s")
class CIMCException(IronicException):
_msg_fmt = _("Cisco IMC exception occurred for node %(node)s: %(error)s")
class OneViewError(IronicException):
_msg_fmt = _("OneView exception occurred. Error: %(error)s")
class OneViewInvalidNodeParameter(OneViewError):
_msg_fmt = _("Error while obtaining OneView info from node %(node_uuid)s. "
"Error: %(error)s")
class NodeTagNotFound(IronicException):
_msg_fmt = _("Node %(node_id)s doesn't have a tag '%(tag)s'")
class NetworkError(IronicException):
_msg_fmt = _("Network operation failure.")
class IncompleteLookup(Invalid):
_msg_fmt = _("At least one of 'addresses' and 'node_uuid' parameters "
"is required")
class NotificationSchemaObjectError(IronicException):
_msg_fmt = _("Expected object %(obj)s when populating notification payload"
" but got object %(source)s")
class NotificationSchemaKeyError(IronicException):
_msg_fmt = _("Object %(obj)s doesn't have the field \"%(field)s\" "
"required for populating notification schema key "
"\"%(key)s\"")
class NotificationPayloadError(IronicException):
_msg_fmt = _("Payload not populated when trying to send notification "
"\"%(class_name)s\"")
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.common import topics
from neutron import context as q_context
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.db import api as ndb
from neutron.plugins.nec import nec_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
PLUGIN_NAME = 'neutron.plugins.nec.nec_plugin.NECPluginV2'
OFC_MANAGER = 'neutron.plugins.nec.nec_plugin.ofc_manager.OFCManager'
OFC_DRIVER = 'neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver'
class NecPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
PACKET_FILTER_ENABLE = False
def setUp(self):
self.addCleanup(mock.patch.stopall)
ofc_manager_cls = mock.patch(OFC_MANAGER).start()
ofc_driver = ofc_manager_cls.return_value.driver
ofc_driver.filter_supported.return_value = self.PACKET_FILTER_ENABLE
super(NecPluginV2TestCase, self).setUp(self._plugin_name)
self.context = q_context.get_admin_context()
self.plugin = manager.NeutronManager.get_plugin()
class TestNecBasicGet(test_plugin.TestBasicGet, NecPluginV2TestCase):
pass
class TestNecV2HTTPResponse(test_plugin.TestV2HTTPResponse,
NecPluginV2TestCase):
pass
class TestNecPortsV2(test_plugin.TestPortsV2, NecPluginV2TestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = True
class TestNecNetworksV2(test_plugin.TestNetworksV2, NecPluginV2TestCase):
pass
class TestNecPortBinding(test_bindings.PortBindingsTestCase,
NecPluginV2TestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = True
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER
def setUp(self):
test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
super(TestNecPortBinding, self).setUp()
class TestNecPortBindingNoSG(TestNecPortBinding):
HAS_PORT_FILTER = False
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
class TestNecPortsV2Callback(NecPluginV2TestCase):
def setUp(self):
super(TestNecPortsV2Callback, self).setUp()
self.callbacks = nec_plugin.NECPluginV2RPCCallbacks(self.plugin)
self.ofc = self.plugin.ofc
self.ofc_port_exists = False
self._setup_side_effects()
def _setup_side_effects(self):
def _create_ofc_port_called(*args, **kwargs):
self.ofc_port_exists = True
def _delete_ofc_port_called(*args, **kwargs):
self.ofc_port_exists = False
def _exists_ofc_port_called(*args, **kwargs):
return self.ofc_port_exists
self.ofc.create_ofc_port.side_effect = _create_ofc_port_called
self.ofc.delete_ofc_port.side_effect = _delete_ofc_port_called
self.ofc.exists_ofc_port.side_effect = _exists_ofc_port_called
def _rpcapi_update_ports(self, agent_id='nec-q-agent.fake',
datapath_id="0xabc", added=[], removed=[]):
kwargs = {'topic': topics.AGENT,
'agent_id': agent_id,
'datapath_id': datapath_id,
'port_added': added, 'port_removed': removed}
self.callbacks.update_ports(self.context, **kwargs)
def _get_portinfo(self, port_id):
return ndb.get_portinfo(self.context.session, port_id)
def test_portinfo_create(self):
with self.port() as port:
port_id = port['port']['id']
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'DOWN')
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
self.assertIsNone(self._get_portinfo(port_id))
portinfo = {'id': port_id, 'port_no': 123}
self._rpcapi_update_ports(added=[portinfo])
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'ACTIVE')
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertIsNotNone(self._get_portinfo(port_id))
expected = [
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
]
self.ofc.assert_has_calls(expected)
def test_portinfo_delete_before_port_deletion(self):
self._test_portinfo_delete()
def test_portinfo_delete_after_port_deletion(self):
self._test_portinfo_delete(portinfo_delete_first=False)
def _test_portinfo_delete(self, portinfo_delete_first=True):
with self.port() as port:
port_id = port['port']['id']
portinfo = {'id': port_id, 'port_no': 456}
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
self.assertIsNone(self._get_portinfo(port_id))
self._rpcapi_update_ports(added=[portinfo])
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 0)
self.assertIsNotNone(self._get_portinfo(port_id))
# Before port-deletion, switch port removed message is sent.
if portinfo_delete_first:
self._rpcapi_update_ports(removed=[port_id])
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertIsNone(self._get_portinfo(port_id))
# The port is expected to delete when exiting with-clause.
if not portinfo_delete_first:
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertIsNotNone(self._get_portinfo(port_id))
self._rpcapi_update_ports(removed=[port_id])
# Ensure port deletion is called once.
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertIsNone(self._get_portinfo(port_id))
expected = [
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.delete_ofc_port(mock.ANY, port_id, mock.ANY),
]
self.ofc.assert_has_calls(expected)
def test_portinfo_added_unknown_port(self):
portinfo = {'id': 'dummy-p1', 'port_no': 123}
self._rpcapi_update_ports(added=[portinfo])
self.assertIsNotNone(ndb.get_portinfo(self.context.session,
'dummy-p1'))
self.assertEqual(self.ofc.exists_ofc_port.call_count, 0)
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
def _test_portinfo_change(self, portinfo_change_first=True):
with self.port() as port:
port_id = port['port']['id']
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
portinfo = {'id': port_id, 'port_no': 123}
self._rpcapi_update_ports(added=[portinfo])
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 0)
self.assertEqual(ndb.get_portinfo(self.context.session,
port_id).port_no, 123)
if portinfo_change_first:
portinfo = {'id': port_id, 'port_no': 456}
self._rpcapi_update_ports(added=[portinfo])
# OFC port is recreated.
self.assertEqual(self.ofc.create_ofc_port.call_count, 2)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertEqual(ndb.get_portinfo(self.context.session,
port_id).port_no, 456)
if not portinfo_change_first:
# The port is expected to delete when exiting with-clause.
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
portinfo = {'id': port_id, 'port_no': 456}
self._rpcapi_update_ports(added=[portinfo])
# No OFC operations are expected.
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertEqual(ndb.get_portinfo(self.context.session,
port_id).port_no, 456)
def test_portinfo_change(self):
self._test_portinfo_change()
def test_portinfo_change_for_nonexisting_port(self):
self._test_portinfo_change(portinfo_change_first=False)
def test_port_migration(self):
agent_id_a, datapath_id_a, port_no_a = 'nec-q-agent.aa', '0xaaa', 10
agent_id_b, datapath_id_b, port_no_b = 'nec-q-agent.bb', '0xbbb', 11
with self.port() as port:
port_id = port['port']['id']
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'DOWN')
portinfo_a = {'id': port_id, 'port_no': port_no_a}
self._rpcapi_update_ports(agent_id=agent_id_a,
datapath_id=datapath_id_a,
added=[portinfo_a])
portinfo_b = {'id': port_id, 'port_no': port_no_b}
self._rpcapi_update_ports(agent_id=agent_id_b,
datapath_id=datapath_id_b,
added=[portinfo_b])
self._rpcapi_update_ports(agent_id=agent_id_a,
datapath_id=datapath_id_a,
removed=[port_id])
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'ACTIVE')
self.assertTrue(self.ofc_port_exists)
expected = [
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.delete_ofc_port(mock.ANY, port_id, mock.ANY),
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(2, self.ofc.create_ofc_port.call_count)
self.assertEqual(1, self.ofc.delete_ofc_port.call_count)
class TestNecPluginDbTest(NecPluginV2TestCase):
def test_update_resource(self):
with self.network() as network:
self.assertEqual("ACTIVE", network['network']['status'])
net_id = network['network']['id']
for status in ["DOWN", "BUILD", "ERROR", "ACTIVE"]:
self.plugin._update_resource_status(
self.context, 'network', net_id,
getattr(nec_plugin.OperationalStatus, status))
n = self.plugin._get_network(self.context, net_id)
self.assertEqual(status, n.status)
class TestNecPluginOfcManager(NecPluginV2TestCase):
def setUp(self):
super(TestNecPluginOfcManager, self).setUp()
self.ofc = self.plugin.ofc
def _create_resource(self, resource, data):
collection = resource + 's'
data = {resource: data}
req = self.new_create_request(collection, data)
res = self.deserialize(self.fmt, req.get_response(self.api))
return res[resource]
def _update_resource(self, resource, id, data):
collection = resource + 's'
data = {resource: data}
req = self.new_update_request(collection, data, id)
res = self.deserialize(self.fmt, req.get_response(self.api))
return res[resource]
def _show_resource(self, resource, id):
collection = resource + 's'
req = self.new_show_request(collection, id)
res = self.deserialize(self.fmt, req.get_response(self.api))
return res[resource]
def _list_resource(self, resource):
collection = resource + 's'
req = self.new_list_request(collection)
res = req.get_response(self.api)
return res[collection]
def _delete_resource(self, resource, id):
collection = resource + 's'
req = self.new_delete_request(collection, id)
res = req.get_response(self.api)
return res.status_int
def test_create_network(self):
self.ofc.exists_ofc_tenant.return_value = False
net = None
ctx = mock.ANY
with self.network() as network:
net = network['network']
self.assertEqual(network['network']['status'], 'ACTIVE')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_network_with_admin_state_down(self):
self.ofc.exists_ofc_tenant.return_value = False
net = None
ctx = mock.ANY
with self.network(admin_state_up=False) as network:
net = network['network']
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_two_network(self):
self.ofc.exists_ofc_tenant.side_effect = [False, True]
nets = []
ctx = mock.ANY
with self.network() as net1:
nets.append(net1['network'])
self.assertEqual(net1['network']['status'], 'ACTIVE')
with self.network() as net2:
nets.append(net2['network'])
self.assertEqual(net2['network']['status'], 'ACTIVE')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, nets[0]['id'],
nets[0]['name']),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, nets[1]['id'],
nets[1]['name']),
mock.call.delete_ofc_network(ctx, nets[1]['id'], mock.ANY),
mock.call.delete_ofc_network(ctx, nets[0]['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_network_fail(self):
self.ofc.exists_ofc_tenant.return_value = False
self.ofc.create_ofc_network.side_effect = nexc.OFCException(
reason='hoge')
net = None
ctx = mock.ANY
with self.network() as network:
net = network['network']
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_update_network(self):
self.ofc.exists_ofc_tenant.return_value = False
net = None
ctx = mock.ANY
with self.network() as network:
net = network['network']
self.assertEqual(network['network']['status'], 'ACTIVE')
# Set admin_state_up to False
res = self._update_resource('network', net['id'],
{'admin_state_up': False})
self.assertFalse(res['admin_state_up'])
# Set admin_state_up to True
res = self._update_resource('network', net['id'],
{'admin_state_up': True})
self.assertTrue(res['admin_state_up'])
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def _rpcapi_update_ports(self, agent_id='nec-q-agent.fake',
datapath_id="0xabc", added=[], removed=[]):
kwargs = {'topic': topics.AGENT,
'agent_id': agent_id,
'datapath_id': datapath_id,
'port_added': added, 'port_removed': removed}
self.plugin.callback_nec.update_ports(self.context, **kwargs)
def test_create_port_no_ofc_creation(self):
self.ofc.exists_ofc_tenant.return_value = False
self.ofc.exists_ofc_port.return_value = False
net = None
p1 = None
ctx = mock.ANY
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
p1 = port['port']
net_id = port['port']['network_id']
net = self._show_resource('network', net_id)
self.assertEqual(net['status'], 'ACTIVE')
self.assertEqual(p1['status'], 'ACTIVE')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_port_with_ofc_creation(self):
self.ofc.exists_ofc_tenant.return_value = False
self.ofc.exists_ofc_port.side_effect = [False, True]
net = None
p1 = None
ctx = mock.ANY
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
p1 = port['port']
net_id = port['port']['network_id']
net = self._show_resource('network', net_id)
self.assertEqual(net['status'], 'ACTIVE')
self.assertEqual(p1['status'], 'ACTIVE')
# Check the port is not created on OFC
self.assertFalse(self.ofc.create_ofc_port.call_count)
# Register portinfo, then the port is created on OFC
portinfo = {'id': p1['id'], 'port_no': 123}
self._rpcapi_update_ports(added=[portinfo])
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.create_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_delete_network_with_dhcp_port(self):
self.ofc.exists_ofc_tenant.return_value = False
self.ofc.exists_ofc_port.side_effect = [False, True]
ctx = mock.ANY
with self.network() as network:
with self.subnet(network=network):
net = network['network']
p = self._create_resource('port',
{'network_id': net['id'],
'tenant_id': net['tenant_id'],
'device_owner': 'network:dhcp',
'device_id': 'dhcp-port1'})
# Make sure that the port is created on OFC.
portinfo = {'id': p['id'], 'port_no': 123}
self._rpcapi_update_ports(added=[portinfo])
# In a case of dhcp port, the port is deleted automatically
# when delete_network.
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id,
net['id'], net['name']),
mock.call.exists_ofc_port(ctx, p['id']),
mock.call.create_ofc_port(ctx, p['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p['id']),
mock.call.delete_ofc_port(ctx, p['id'], mock.ANY),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_update_port(self):
self._test_update_port_with_admin_state(resource='port')
def test_update_network_with_ofc_port(self):
self._test_update_port_with_admin_state(resource='network')
def _test_update_port_with_admin_state(self, resource='port'):
self.ofc.exists_ofc_tenant.return_value = False
self.ofc.exists_ofc_port.side_effect = [False, True, False]
net = None
p1 = None
ctx = mock.ANY
if resource == 'network':
net_ini_admin_state = False
port_ini_admin_state = True
else:
net_ini_admin_state = True
port_ini_admin_state = False
with self.network(admin_state_up=net_ini_admin_state) as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet,
admin_state_up=port_ini_admin_state) as port:
p1 = port['port']
net_id = port['port']['network_id']
res_id = net_id if resource == 'network' else p1['id']
net = self._show_resource('network', net_id)
# Check the port is not created on OFC
self.assertFalse(self.ofc.create_ofc_port.call_count)
# Register portinfo, then the port is created on OFC
portinfo = {'id': p1['id'], 'port_no': 123}
self._rpcapi_update_ports(added=[portinfo])
self.assertFalse(self.ofc.create_ofc_port.call_count)
self._update_resource(resource, res_id,
{'admin_state_up': True})
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertFalse(self.ofc.delete_ofc_port.call_count)
self._update_resource(resource, res_id,
{'admin_state_up': False})
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.create_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
|
|
"""
Container for different advection schemes for grids and particles.
Examples:
* semi_lagrangian (grid)
* mac_cormack (grid)
* runge_kutta_4 (particle)
"""
import warnings
from phi import math
from phi.field import SampledField, ConstantField, Field, PointCloud, extrapolate_valid, Grid, sample, reduce_sample
from phi.field._field_math import GridType
from phi.geom import Geometry
def euler(elements: Geometry, velocity: Field, dt: float, v0: math.Tensor = None) -> Geometry:
""" Euler integrator. """
if v0 is None:
v0 = sample(velocity, elements)
return elements.shifted(v0 * dt)
def rk4(elements: Geometry, velocity: Field, dt: float, v0: math.Tensor = None) -> Geometry:
""" Runge-Kutta-4 integrator. """
if v0 is None:
v0 = sample(velocity, elements)
vel_half = sample(velocity, elements.shifted(0.5 * dt * v0))
vel_half2 = sample(velocity, elements.shifted(0.5 * dt * vel_half))
vel_full = sample(velocity, elements.shifted(dt * vel_half2))
vel_rk4 = (1 / 6.) * (v0 + 2 * (vel_half + vel_half2) + vel_full)
return elements.shifted(dt * vel_rk4)
def advect(field: SampledField,
velocity: Field,
dt: float or math.Tensor,
integrator=euler) -> Field:
"""
Advect `field` along the `velocity` vectors using the specified integrator.
The behavior depends on the type of `field`:
* `phi.field.PointCloud`: Points are advected forward, see `points`.
* `phi.field.Grid`: Sample points are traced backward, see `semi_lagrangian`.
Args:
field: Field to be advected as `phi.field.SampledField`.
velocity: Any `phi.field.Field` that can be sampled in the elements of `field`.
dt: Time increment
integrator: ODE integrator for solving the movement.
Returns:
Advected field of same type as `field`
"""
if isinstance(field, PointCloud):
return points(field, velocity, dt=dt, integrator=integrator)
elif isinstance(field, Grid):
return semi_lagrangian(field, velocity, dt=dt, integrator=integrator)
elif isinstance(field, ConstantField):
return field
raise NotImplementedError(field)
def points(field: PointCloud, velocity: Field, dt: float, integrator=euler):
"""
Advects the sample points of a point cloud using a simple Euler step.
Each point moves by an amount equal to the local velocity times `dt`.
Args:
field: point cloud to be advected
velocity: velocity sampled at the same points as the point cloud
dt: Euler step time increment
integrator: ODE integrator for solving the movement.
Returns:
Advected point cloud
"""
new_elements = integrator(field.elements, velocity, dt)
return field.with_elements(new_elements)
def semi_lagrangian(field: GridType,
velocity: Field,
dt: float,
integrator=euler) -> GridType:
"""
Semi-Lagrangian advection with simple backward lookup.
This method samples the `velocity` at the grid points of `field`
to determine the lookup location for each grid point by walking backwards along the velocity vectors.
The new values are then determined by sampling `field` at these lookup locations.
Args:
field: quantity to be advected, stored on a grid (CenteredGrid or StaggeredGrid)
velocity: vector field, need not be compatible with with `field`.
dt: time increment
integrator: ODE integrator for solving the movement.
Returns:
Field with same sample points as `field`
"""
lookup = integrator(field.elements, velocity, -dt)
interpolated = reduce_sample(field, lookup)
return field.with_values(interpolated)
def mac_cormack(field: GridType,
velocity: Field,
dt: float,
correction_strength=1.0,
integrator=euler) -> GridType:
"""
MacCormack advection uses a forward and backward lookup to determine the first-order error of semi-Lagrangian advection.
It then uses that error estimate to correct the field values.
To avoid overshoots, the resulting value is bounded by the neighbouring grid cells of the backward lookup.
Args:
field: Field to be advected, one of `(CenteredGrid, StaggeredGrid)`
velocity: Vector field, need not be sampled at same locations as `field`.
dt: Time increment
correction_strength: The estimated error is multiplied by this factor before being applied.
The case correction_strength=0 equals semi-lagrangian advection. Set lower than 1.0 to avoid oscillations.
integrator: ODE integrator for solving the movement.
Returns:
Advected field of type `type(field)`
"""
v0 = sample(velocity, field.elements)
points_bwd = integrator(field.elements, velocity, -dt, v0=v0)
points_fwd = integrator(field.elements, velocity, dt, v0=v0)
# Semi-Lagrangian advection
field_semi_la = field.with_values(reduce_sample(field, points_bwd))
# Inverse semi-Lagrangian advection
field_inv_semi_la = field.with_values(reduce_sample(field_semi_la, points_fwd))
# correction
new_field = field_semi_la + correction_strength * 0.5 * (field - field_inv_semi_la)
# Address overshoots
limits = field.closest_values(points_bwd)
lower_limit = math.min(limits, [f'closest_{dim}' for dim in field.shape.spatial.names])
upper_limit = math.max(limits, [f'closest_{dim}' for dim in field.shape.spatial.names])
values_clamped = math.clip(new_field.values, lower_limit, upper_limit)
return new_field.with_values(values_clamped)
def runge_kutta_4(cloud: SampledField, velocity: Field, dt: float, accessible: Field = None, occupied: Field = None):
"""
Lagrangian advection of particles using a fourth-order runge-kutta scheme. If `accessible` and `occupied` are specified,
the advection uses velocity-dependent extrapolation of `velocity`.
Args:
cloud: PointCloud holding the particle positions as elements
velocity: velocity Grid which should get used for advection
dt: Time step for runge-kutta
accessible: Boundary conditions for restricting extrapolation to accessible positions
occupied: Binary Grid indicating particle positions on the grid for extrapolation
Returns:
PointCloud with advected particle positions and their corresponding values.
"""
warnings.warn("runge_kutta_4 is deprecated. Use points() with integrator=rk4 instead.", DeprecationWarning)
assert isinstance(velocity, Grid), 'runge_kutta advection with extrapolation works for Grids only.'
def extrapolation_helper(elements, t_shift, v_field, mask):
shift = math.ceil(math.max(math.abs(elements.center - points.center))) - t_shift
t_shift += shift
v_field, mask = extrapolate_valid(v_field, mask, int(shift))
v_field *= accessible
return v_field, mask, t_shift
points = cloud.elements
total_shift = 0
extrapolate = accessible is not None and occupied is not None
# --- Sample velocity at intermediate points and adjust velocity-dependent
# extrapolation to maximum shift of corresponding component ---
if extrapolate:
assert isinstance(occupied, type(velocity)), 'occupation mask must have same type as velocity.'
velocity, occupied = extrapolate_valid(velocity, occupied, 2)
velocity *= accessible
vel_k1 = sample(velocity, points)
shifted_points = points.shifted(0.5 * dt * vel_k1)
if extrapolate:
velocity, occupied, total_shift = extrapolation_helper(shifted_points, total_shift, velocity, occupied)
vel_k2 = sample(velocity, shifted_points)
shifted_points = points.shifted(0.5 * dt * vel_k2)
if extrapolate:
velocity, occupied, total_shift = extrapolation_helper(shifted_points, total_shift, velocity, occupied)
vel_k3 = sample(velocity, shifted_points)
shifted_points = points.shifted(dt * vel_k3)
if extrapolate:
velocity, _, _ = extrapolation_helper(shifted_points, total_shift, velocity, occupied)
vel_k4 = sample(velocity, shifted_points)
# --- Combine points with RK4 scheme ---
vel = (1/6.) * (vel_k1 + 2 * (vel_k2 + vel_k3) + vel_k4)
new_points = points.shifted(dt * vel)
return cloud.with_elements(new_points)
|
|
#! /usr/bin/env python
# License: 3-clause BSD
import sys
import os
import platform
import shutil
import tempfile
from distutils.command.build_py import build_py
from distutils.command.sdist import sdist
from distutils.command.clean import clean as Clean
from distutils.errors import CompileError, LinkError
from pkg_resources import parse_version
import traceback
# Get version and release info, which is all stored in pulse2percept/info.py
ver_file = os.path.join('pulse2percept', 'version.py')
with open(ver_file) as f:
exec(f.read())
VERSION = __version__
NUMPY_MIN_VERSION = '1.9.0'
SCIPY_MIN_VERSION = '1.0'
CYTHON_MIN_VERSION = '0.28'
JOBLIB_MIN_VERSION = '0.11'
DISTNAME = 'pulse2percept'
DESCRIPTION = 'A Python-based simulation framework for bionic vision'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Michael Beyeler, Ariel Rokem'
MAINTAINER_EMAIL = '[email protected], [email protected]'
URL = 'https://github.com/pulse2percept/pulse2percept'
DOWNLOAD_URL = 'https://pypi.org/project/pulse2percept/#files'
LICENSE = 'new BDS'
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/pulse2percept/pulse2percept/issues',
'Documentation': 'https://pulse2percept.github.io/pulse2percept',
'Source Code': 'https://github.com/pulse2percept/pulse2percept'
}
CLASSIFIERS = ['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
('Programming Language :: Python :: '
'Implementation :: CPython'),
('Programming Language :: Python :: '
'Implementation :: PyPy')
]
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = {
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
}
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
f'numpy >= {NUMPY_MIN_VERSION}',
f'scipy >= {SCIPY_MIN_VERSION}'
),
},
)
else:
extra_setuptools_args = dict()
class CleanCommand(Clean):
"""Custom clean command to remove build artifacts"""
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('pulse2percept'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
def openmp_build_ext():
"""Add support for OpenMP"""
from numpy.distutils.command.build_ext import build_ext
code = """#include <omp.h>
int main(int argc, char** argv) { return(0); }"""
class ConditionalOpenMP(build_ext):
def can_compile_link(self, compile_flags, link_flags):
cc = self.compiler
fname = 'test.c'
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
with open(fname, 'wt') as fobj:
fobj.write(code)
try:
objects = cc.compile([fname],
extra_postargs=compile_flags)
except CompileError:
return False
try:
# Link shared lib rather then executable to avoid
# http://bugs.python.org/issue4431 with MSVC 10+
cc.link_shared_lib(objects, "testlib",
extra_postargs=link_flags)
except (LinkError, TypeError):
return False
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
return True
def build_extensions(self):
""" Hook into extension building to check compiler flags """
compile_flags = link_flags = []
if self.compiler.compiler_type == 'msvc':
compile_flags += ['/openmp']
link_flags += ['/openmp']
else:
compile_flags += ['-fopenmp']
link_flags += ['-fopenmp']
if self.can_compile_link(compile_flags, link_flags):
for ext in self.extensions:
ext.extra_compile_args += compile_flags
ext.extra_link_args += link_flags
build_ext.build_extensions(self)
return ConditionalOpenMP
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('pulse2percept')
return config
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(NUMPY_MIN_VERSION)
numpy_status['version'] = numpy_version
except ImportError:
traceback.print_exc()
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def get_cython_status():
"""
Returns a dictionary containing a boolean specifying whether Cython is
up-to-date, along with the version string (empty string if not installed).
"""
cython_status = {}
try:
import Cython
from Cython.Build import cythonize
cython_version = Cython.__version__
cython_status['up_to_date'] = parse_version(
cython_version) >= parse_version(CYTHON_MIN_VERSION)
cython_status['version'] = cython_version
except ImportError:
traceback.print_exc()
cython_status['up_to_date'] = False
cython_status['version'] = ""
return cython_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
version=VERSION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
cmdclass={
'clean': CleanCommand,
'build_py': build_py,
'build_ext': openmp_build_ext(),
'sdist': sdist
},
python_requires=">=3.7",
install_requires=[
f'numpy>={NUMPY_MIN_VERSION}',
f'scipy>={SCIPY_MIN_VERSION}',
f'joblib>={JOBLIB_MIN_VERSION}'
],
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required
#
# They are required to succeed without NumPy for example when
# pip is used to install pulse2percept when NumPy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if sys.version_info < (3, 6):
raise RuntimeError(
f"pulse2percept requires Python 3.6 or later. The current"
f" Python version is {platform.python_version()} installed in {sys.executable}.")
instructions = ("Installation instructions are available on GitHub: "
"http://github.com/pulse2percept/pulse2percept\n")
# Make sure NumPy is installed:
numpy_status = get_numpy_status()
numpy_req_str = f"pulse2percept erquires NumPy >= {NUMPY_MIN_VERSION}\n"
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError(f"Your installation of Numerical Python "
f"(NumPy) {numpy_status[version]} is "
f"out-of-date.\n{numpy_req_str}{instructions}")
else:
raise ImportError(f"Numerical Python (NumPy) is not "
f"installed.\n{numpy_req_str}{instructions}")
from numpy.distutils.core import setup
# Make sure Cython is installed:
cython_status = get_cython_status()
cython_req_str = f"pulse2percept requires Cython >= {CYTHON_MIN_VERSION}.\n"
if cython_status['up_to_date'] is False:
if cython_status['version']:
raise ImportError(f"Your installation of C-Extensions for "
f"Python (Cython) {cython_status['version']} "
f"is out-of-date.\n{cython_req_str}{instructions}")
else:
raise ImportError(f"C-Extensions for Python (Cython) is not "
f"installed.\n{cython_req_str}{instructions}")
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2014, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import re
import copy
import time
from boto.ec2.regioninfo import RegionInfo
import boto
from concurrent.futures import ThreadPoolExecutor
import urllib2
import cookielib
import requests
from eutester import TestConnection
from boto.ec2.elb import ELBConnection
from boto.ec2.elb.listener import Listener
from boto.ec2.elb.healthcheck import HealthCheck
from os.path import join, abspath
ELBRegionData = {
'us-east-1': 'elasticloadbalancing.us-east-1.amazonaws.com',
'us-west-1': 'elasticloadbalancing.us-west-1.amazonaws.com',
'us-west-2': 'elasticloadbalancing.us-west-2.amazonaws.com',
'eu-west-1': 'elasticloadbalancing.eu-west-1.amazonaws.com',
'ap-northeast-1': 'elasticloadbalancing.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'elasticloadbalancing.ap-southeast-1.amazonaws.com'}
class ELBops(ELBConnection, TestConnection):
EUCARC_URL_NAME = 'aws_elb_url'
def __init__(self, eucarc=None, credpath=None,
aws_access_key_id=None, aws_secret_access_key=None,
is_secure=False, port=None, host=None, region=None, endpoint=None,
boto_debug=0, path=None, APIVersion=None, validate_certs=None,
test_resources=None, logger=None):
# Init test connection first to sort out base parameters...
TestConnection.__init__(self,
eucarc=eucarc,
credpath=credpath,
test_resources=test_resources,
logger=logger,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
is_secure=is_secure,
port=port,
host=host,
APIVersion=APIVersion,
validate_certs=validate_certs,
boto_debug=boto_debug,
test_resources=test_resources,
path=path)
if self.boto_debug:
self.show_connection_kwargs()
# Init IAM connection...
try:
ELBConnection.__init__(self, **self._connection_kwargs)
except:
self.show_connection_kwargs()
raise
def setup_elb_connection(self, endpoint=None, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True,
host=None, region=None, path="/", port=443, boto_debug=0):
"""
:param endpoint:
:param aws_access_key_id:
:param aws_secret_access_key:
:param is_secure:
:param host:
:param region:
:param path:
:param port:
:param boto_debug:
:raise:
"""
elb_region = RegionInfo()
if region:
self.debug("Check region: " + str(region))
try:
if not endpoint:
elb_region.endpoint = ELBRegionData[region]
else:
elb_region.endpoint = endpoint
except KeyError:
raise Exception('Unknown region: %s' % region)
else:
elb_region.name = 'eucalyptus'
if not host:
if endpoint:
elb_region.endpoint = endpoint
else:
elb_region.endpoint = self.get_elb_ip()
connection_args = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'is_secure': is_secure,
'debug': boto_debug,
'port': port,
'path': path,
'region': elb_region}
if re.search('2.6', boto.__version__):
connection_args['validate_certs'] = False
try:
elb_connection_args = copy.copy(connection_args)
elb_connection_args['path'] = path
elb_connection_args['region'] = elb_region
self.debug(
"Attempting to create load balancer connection to " + elb_region.endpoint + ':' + str(port) + path)
self.connection = boto.connect_elb(**elb_connection_args)
except Exception, e:
self.critical("Was unable to create elb connection because of exception: " + str(e))
def setup_elb_resource_trackers(self):
"""
Setup keys in the test_resources hash in order to track artifacts created
"""
self.test_resources["load_balancers"] = []
def create_listener(self, load_balancer_port=80, protocol="HTTP", instance_port=80, load_balancer=None):
self.debug(
"Creating ELB Listener for protocol " + protocol + " and port " + str(load_balancer_port) + "->" + str(
instance_port))
listner = Listener(load_balancer=load_balancer,
protocol=protocol,
load_balancer_port=load_balancer_port,
instance_port=instance_port)
return listner
def create_healthcheck(self, target="HTTP:80/instance-name", interval=5, timeout=2, healthy_threshold=2,
unhealthy_threshold=10):
self.debug("Creating healthcheck: " + target + " interval=" + str(interval) + " timeout=" + str(timeout) +
" healthy threshold=" + str(healthy_threshold) + " unhealthy threshold=" + str(unhealthy_threshold))
healthcheck = HealthCheck(target=target,
timeout=timeout,
interval=interval,
healthy_threshold=healthy_threshold,
unhealthy_threshold=unhealthy_threshold)
return healthcheck
def generate_http_requests(self, url, count=100, worker_threads=20):
self.debug("Generating {0} http requests against {1}".format(count, url))
jar = cookielib.FileCookieJar("cookies")
handler = urllib2.HTTPCookieProcessor(jar)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
response_futures = []
with ThreadPoolExecutor(max_workers=worker_threads) as executor:
for _ in range(count):
response_futures.append(executor.submit(requests.get, url))
responses = []
for response in response_futures:
http_response = response.result()
try:
http_error_code = http_response.status_code
if http_error_code == 200:
result = "{0}".format(http_response.content.rstrip())
self.debug("Request response: " + result)
responses.append(result)
else:
raise Exception("Error code " + http_error_code + " found when sending " +
str(worker_threads) + " concurrent requests to " + url)
finally:
http_response.close()
return responses
def register_lb_instances(self, name, instances, timeout=360, poll_count=15):
inst_ids = [inst.id for inst in instances]
self.debug("Registering instances {0} with lb {1}".format(inst_ids, name))
self.connection.register_instances(name, inst_ids)
poll_sleep = timeout / poll_count
for _ in range(poll_count):
self.debug("Checking instance health for {0}".format(inst_ids))
inst_states = self.connection.describe_instance_health(name, instances=inst_ids)
states = [state.state for state in inst_states]
if not states or 'OutOfService' in states:
time.sleep(poll_sleep)
elif 'InService' in states:
self.debug("Instances {0} for lb {1} are InService".format(inst_ids, name))
return
else:
# This should never happen
pass
raise Exception("Instances {0} failed to enter InService state before timeout".format(inst_ids))
def create_load_balancer(self, zones, name="test", load_balancer_port=80, instances=None):
self.debug("Creating load balancer: " + name + " on port " + str(load_balancer_port))
listener = self.create_listener(load_balancer_port=load_balancer_port)
self.connection.create_load_balancer(name, zones=zones, listeners=[listener])
healthcheck = self.create_healthcheck()
self.connection.configure_health_check(name, healthcheck)
if instances:
self.register_instances(name, instances)
### Validate the creation of the load balancer
lbs = self.connection.get_all_load_balancers(load_balancer_names=[name])
if not "load_balancers" in self.test_resources:
self.test_resources["load_balancers"] = []
if len(lbs) == 1:
self.test_resources["load_balancers"].append(lbs[0])
return lbs[0]
else:
raise Exception("Unable to retrieve load balancer after creation")
def delete_load_balancers(self, lbs, timeout=60):
for lb in lbs:
self.delete_load_balancer(lb, timeout)
def delete_load_balancer(self, lb, timeout=60, poll_sleep=10):
self.debug("Deleting Loadbalancer: {0}".format(lb.name))
self.connection.delete_load_balancer(lb.name)
poll_count = timeout / poll_sleep
for _ in range(poll_count):
lbs = self.connection.get_all_load_balancers(load_balancer_names=[lb.name])
if lb in lbs:
time.sleep(poll_sleep)
if lb in self.test_resources["load_balancers"]:
self.test_resources["load_balancers"].remove(lb)
def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
self.debug("Create app cookie stickiness policy: " + str(policy_name))
self.connection.create_app_cookie_stickiness_policy(name=name,
lb_name=lb_name,
policy_name=policy_name)
def create_lb_cookie_stickiness_policy(self, cookie_expiration_period, lb_name, policy_name):
self.debug("Create lb cookie stickiness policy: " + str(policy_name))
self.connection.create_lb_cookie_stickiness_policy(cookie_expiration_period=cookie_expiration_period,
lb_name=lb_name,
policy_name=policy_name)
def create_lb_policy(self, lb_name, policy_name, policy_type, policy_attributes):
self.debug("Create lb policy: " + str(policy_name))
self.connection.create_lb_policy(lb_name=lb_name,
policy_name=policy_name,
policy_type=policy_type,
policy_attributes=policy_attributes)
def set_lb_policy(self, lb_name, lb_port, policy_name=None):
self.debug("Set policy " + str(policy_name) + " for " + lb_name)
self.connection.set_lb_policies_of_listener(lb_name=lb_name,
lb_port=lb_port,
policies=policy_name)
def delete_lb_policy(self, lb_name, policy_name):
self.debug("Deleting lb policy " + str(policy_name) + " from " + str(lb_name))
self.connection.delete_lb_policy(lb_name=lb_name,
policy_name=policy_name)
def describe_lb_policies(self, lb):
lbs = self.connection.get_all_load_balancers(load_balancer_names=[lb])
return lbs[0].policies
def add_lb_listener(self, lb_name, listener):
self.debug("adding listener")
self.connection.create_load_balancer_listeners(name=lb_name, listeners=[listener])
def remove_lb_listener(self, lb_name, port):
self.debug("removing listener")
self.connection.delete_load_balancer_listeners(name=lb_name, ports=[port])
def add_server_cert(self, cert_name, cert_dir="./testcases/cloud_user/elb/test_data",
cert_file="ssl_server_certs_basics.crt",
key_file="ssl_server_certs_basics.pem"):
cert_body = open(join(cert_dir, cert_file)).read()
cert_key = open(join(cert_dir, key_file)).read()
self.upload_server_cert(cert_name=cert_name, cert_body=cert_body, private_key=cert_key)
|
|
import hashlib
import unittest
from mock import patch
import pytest
@patch('fabtools.require.files._mode')
@patch('fabtools.require.files._owner')
@patch('fabtools.require.files.umask')
@patch('fabtools.require.files.put')
@patch('fabtools.require.files.md5sum')
@patch('fabtools.require.files.is_file')
class FilesTestCase(unittest.TestCase):
def _file(self, *args, **kwargs):
""" Proxy to ensure ImportErrors actually cause test failures rather
than trashing the test run entirely """
from fabtools import require
require.files.file(*args, **kwargs)
def test_verify_remote_false(self, is_file, md5sum, put, umask, owner, mode):
""" If verify_remote is set to False, then we should find that
only is_file is used to check for the file's existence. Hashlib's
md5 should not have been called.
"""
is_file.return_value = True
self._file(contents='This is a test', verify_remote=False)
self.assertTrue(is_file.called)
self.assertFalse(md5sum.called)
def test_verify_remote_true(self, is_file, md5sum, put, umask, owner, mode):
""" If verify_remote is True, then we should find that an MD5 hash is
used to work out whether the file is different.
"""
is_file.return_value = True
md5sum.return_value = hashlib.md5('This is a test').hexdigest()
self._file(contents='This is a test', verify_remote=True)
self.assertTrue(is_file.called)
self.assertTrue(md5sum.called)
def test_temp_dir(self, is_file, md5sum, put, umask, owner, mode):
owner.return_value = 'root'
umask.return_value = '0002'
mode.return_value = '0664'
from fabtools import require
require.file('/var/tmp/foo', source=__file__, use_sudo=True, temp_dir='/somewhere')
put.assert_called_with(__file__, '/var/tmp/foo', use_sudo=True, temp_dir='/somewhere')
def test_home_as_temp_dir(self, is_file, md5sum, put, umask, owner, mode):
owner.return_value = 'root'
umask.return_value = '0002'
mode.return_value = '0664'
from fabtools import require
require.file('/var/tmp/foo', source=__file__, use_sudo=True, temp_dir='')
put.assert_called_with(__file__, '/var/tmp/foo', use_sudo=True, temp_dir='')
def test_default_temp_dir(self, is_file, md5sum, put, umask, owner, mode):
owner.return_value = 'root'
umask.return_value = '0002'
mode.return_value = '0664'
from fabtools import require
require.file('/var/tmp/foo', source=__file__, use_sudo=True)
put.assert_called_with(__file__, '/var/tmp/foo', use_sudo=True, temp_dir='/tmp')
class TestUploadTemplate(unittest.TestCase):
@patch('fabtools.files.run')
@patch('fabtools.files._upload_template')
def test_mkdir(self, mock_upload_template, mock_run):
from fabtools.files import upload_template
upload_template('filename', '/path/to/destination', mkdir=True)
args, kwargs = mock_run.call_args
self.assertEqual(args[0], 'mkdir -p /path/to')
@patch('fabtools.files.sudo')
@patch('fabtools.files._upload_template')
def test_mkdir_sudo(self, mock_upload_template, mock_sudo):
from fabtools.files import upload_template
upload_template('filename', '/path/to/destination', mkdir=True, use_sudo=True)
args, kwargs = mock_sudo.call_args
self.assertEqual(args[0], 'mkdir -p /path/to')
self.assertEqual(kwargs['user'], None)
@patch('fabtools.files.sudo')
@patch('fabtools.files._upload_template')
def test_mkdir_sudo_user(self, mock_upload_template, mock_sudo):
from fabtools.files import upload_template
upload_template('filename', '/path/to/destination', mkdir=True, use_sudo=True, user='alice')
args, kwargs = mock_sudo.call_args
self.assertEqual(args[0], 'mkdir -p /path/to')
self.assertEqual(kwargs['user'], 'alice')
@patch('fabtools.files.run_as_root')
@patch('fabtools.files._upload_template')
def test_chown(self, mock_upload_template, mock_run_as_root):
from fabric.api import env
from fabtools.files import upload_template
upload_template('filename', 'destination', chown=True)
args, kwargs = mock_run_as_root.call_args
self.assertEqual(args[0], 'chown %s: destination' % env.user)
@patch('fabtools.files.run_as_root')
@patch('fabtools.files._upload_template')
def test_chown_user(self, mock_upload_template, mock_run_as_root):
from fabtools.files import upload_template
upload_template('filename', 'destination', chown=True, user='alice')
args, kwargs = mock_run_as_root.call_args
self.assertEqual(args[0], 'chown alice: destination')
@patch('fabtools.files._upload_template')
def test_use_jinja_true(self, mock_upload_template):
from fabtools.files import upload_template
upload_template('filename', 'destination', use_jinja=True)
args, kwargs = mock_upload_template.call_args
self.assertEqual(kwargs['use_jinja'], True)
@patch('fabtools.files._upload_template')
def test_use_jinja_false(self, mock_upload_template):
from fabtools.files import upload_template
upload_template('filename', 'destination', use_jinja=False)
args, kwargs = mock_upload_template.call_args
self.assertEqual(kwargs['use_jinja'], False)
@pytest.yield_fixture(scope='module')
def mock_run():
with patch('fabtools.files.run') as mock:
yield mock
def test_copy(mock_run):
from fabtools.files import copy
copy('/tmp/src', '/tmp/dst')
mock_run.assert_called_with('/bin/cp /tmp/src /tmp/dst')
def test_copy_recursive(mock_run):
from fabtools.files import copy
copy('/tmp/src', '/tmp/dst', recursive=True)
mock_run.assert_called_with('/bin/cp -r /tmp/src /tmp/dst')
def test_copy_force(mock_run):
from fabtools.files import copy
copy('/tmp/src', '/tmp/dst', force=True)
mock_run.assert_called_with('/bin/cp -f /tmp/src /tmp/dst')
def test_copy_recursive_force(mock_run):
from fabtools.files import copy
copy('/tmp/src', '/tmp/dst', recursive=True, force=True)
mock_run.assert_called_with('/bin/cp -r -f /tmp/src /tmp/dst')
def test_move(mock_run):
from fabtools.files import move
move('/tmp/src', '/tmp/dst')
mock_run.assert_called_with('/bin/mv /tmp/src /tmp/dst')
def test_symlink(mock_run):
from fabtools.files import symlink
symlink('/tmp/src', '/tmp/dst')
mock_run.assert_called_with('/bin/ln -s /tmp/src /tmp/dst')
def test_symlink_force(mock_run):
from fabtools.files import symlink
symlink('/tmp/src', '/tmp/dst', force=True)
mock_run.assert_called_with('/bin/ln -s -f /tmp/src /tmp/dst')
def test_remove(mock_run):
from fabtools.files import remove
remove('/tmp/src')
mock_run.assert_called_with('/bin/rm /tmp/src')
def test_remove_recursive(mock_run):
from fabtools.files import remove
remove('/tmp/src', recursive=True)
mock_run.assert_called_with('/bin/rm -r /tmp/src')
def test_remove_force(mock_run):
from fabtools.files import remove
remove('/tmp/src.txt', force=True)
mock_run.assert_called_with('/bin/rm -f /tmp/src.txt')
def test_remove_recursive_force(mock_run):
from fabtools.files import remove
remove('/tmp/src', recursive=True, force=True)
mock_run.assert_called_with('/bin/rm -r -f /tmp/src')
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
from mock import patch
from testscenarios.scenarios import multiply_scenarios
import jenkins
from jenkins import plugins
from tests.base import JenkinsTestBase
class JenkinsPluginsBase(JenkinsTestBase):
plugin_info_json = {
u"plugins":
[
{
u"active": u'true',
u"backupVersion": u'null',
u"bundled": u'true',
u"deleted": u'false',
u"dependencies": [],
u"downgradable": u'false',
u"enabled": u'true',
u"hasUpdate": u'true',
u"longName": u"Jenkins Mailer Plugin",
u"pinned": u'false',
u"shortName": u"mailer",
u"supportsDynamicLoad": u"MAYBE",
u"url": u"http://wiki.jenkins-ci.org/display/JENKINS/Mailer",
u"version": u"1.5"
}
]
}
updated_plugin_info_json = {
u"plugins":
[
dict(plugin_info_json[u"plugins"][0],
**{u"version": u"1.6"})
]
}
class JenkinsPluginsInfoTest(JenkinsPluginsBase):
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_simple(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected to return a list of plugins
plugins_info = self.j.get_plugins_info()
self.assertEqual(plugins_info, self.plugin_info_json['plugins'])
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_return_none(self, jenkins_mock):
empty_plugin_info_json = {u"plugins": []}
jenkins_mock.return_value = json.dumps(empty_plugin_info_json)
plugins_info = self.j.get_plugins_info()
self.assertEqual(plugins_info, [])
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_depth(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
self.j.get_plugins_info(depth=1)
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=1'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_raise_BadStatusLine(self, jenkins_mock):
jenkins_mock.side_effect = jenkins.BadStatusLine('not a valid status line')
with self.assertRaises(jenkins.BadHTTPException) as context_manager:
self.j.get_plugins_info()
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_return_invalid_json(self, jenkins_mock):
jenkins_mock.return_value = 'not valid JSON'
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.get_plugins_info()
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self.assertEqual(
str(context_manager.exception),
'Could not parse JSON info for server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_raise_HTTPError(self, jenkins_mock):
jenkins_mock.side_effect = jenkins.HTTPError(
self.make_url('job/pluginManager/api/json?depth=2'),
code=401,
msg="basic auth failed",
hdrs=[],
fp=None)
with self.assertRaises(jenkins.BadHTTPException) as context_manager:
self.j.get_plugins_info(depth=52)
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
class JenkinsPluginInfoTest(JenkinsPluginsBase):
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_shortname(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected to return info on a single plugin
plugin_info = self.j.get_plugin_info("mailer")
self.assertEqual(plugin_info, self.plugin_info_json['plugins'][0])
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_longname(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected to return info on a single plugin
plugin_info = self.j.get_plugin_info("Jenkins Mailer Plugin")
self.assertEqual(plugin_info, self.plugin_info_json['plugins'][0])
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_get_plugin_info_updated(self, jenkins_mock):
jenkins_mock.side_effect = [
json.dumps(self.plugin_info_json),
json.dumps(self.updated_plugin_info_json)
]
j = jenkins.Jenkins(self.make_url(''), 'test', 'test')
plugins_info = j.get_plugins()
self.assertEqual(plugins_info["mailer"]["version"],
self.plugin_info_json['plugins'][0]["version"])
self.assertNotEqual(
plugins_info["mailer"]["version"],
self.updated_plugin_info_json['plugins'][0]["version"])
plugins_info = j.get_plugins()
self.assertEqual(
plugins_info["mailer"]["version"],
self.updated_plugin_info_json['plugins'][0]["version"])
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_return_none(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected not to find bogus so should return None
plugin_info = self.j.get_plugin_info("bogus")
self.assertEqual(plugin_info, None)
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_depth(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
self.j.get_plugin_info('test', depth=1)
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=1'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_raise_BadStatusLine(self, jenkins_mock):
jenkins_mock.side_effect = jenkins.BadStatusLine('not a valid status line')
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.get_plugin_info('test')
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_return_invalid_json(self, jenkins_mock):
jenkins_mock.return_value = 'not valid JSON'
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.get_plugin_info('test')
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self.assertEqual(
str(context_manager.exception),
'Could not parse JSON info for server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_raise_HTTPError(self, jenkins_mock):
jenkins_mock.side_effect = jenkins.HTTPError(
self.make_url('job/pluginManager/api/json?depth=2'),
code=401,
msg="basic auth failed",
hdrs=[],
fp=None)
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.get_plugin_info(u'TestPlugin', depth=52)
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
class PluginsTestScenarios(JenkinsPluginsBase):
scenarios = multiply_scenarios(
JenkinsPluginsBase.scenarios,
[
('s1', dict(v1='1.0.0', op='__gt__', v2='0.8.0')),
('s2', dict(v1='1.0.1alpha', op='__gt__', v2='1.0.0')),
('s3', dict(v1='1.0', op='__eq__', v2='1.0.0')),
('s4', dict(v1='1.0', op='__eq__', v2='1.0')),
('s5', dict(v1='1.0', op='__lt__', v2='1.8.0')),
('s6', dict(v1='1.0.1alpha', op='__lt__', v2='1.0.1')),
('s7', dict(v1='1.0alpha', op='__lt__', v2='1.0.0')),
('s8', dict(v1='1.0-alpha', op='__lt__', v2='1.0.0')),
('s9', dict(v1='1.1-alpha', op='__gt__', v2='1.0')),
('s10', dict(v1='1.0-SNAPSHOT', op='__lt__', v2='1.0')),
('s11', dict(v1='1.0.preview', op='__lt__', v2='1.0')),
('s12', dict(v1='1.1-SNAPSHOT', op='__gt__', v2='1.0')),
('s13', dict(v1='1.0a-SNAPSHOT', op='__lt__', v2='1.0a')),
])
def setUp(self):
super(PluginsTestScenarios, self).setUp()
plugin_info_json = dict(self.plugin_info_json)
plugin_info_json[u"plugins"][0][u"version"] = self.v1
patcher = patch.object(jenkins.Jenkins, 'jenkins_open')
self.jenkins_mock = patcher.start()
self.addCleanup(patcher.stop)
self.jenkins_mock.return_value = json.dumps(plugin_info_json)
def test_plugin_version_comparison(self):
"""Verify that valid versions are ordinally correct.
That is, for each given scenario, v1.op(v2)==True where 'op' is the
equality operator defined for the scenario.
"""
plugin_name = "Jenkins Mailer Plugin"
j = jenkins.Jenkins(self.base_url, 'test', 'test')
plugin_info = j.get_plugins()[plugin_name]
v1 = plugin_info.get("version")
op = getattr(v1, self.op)
self.assertTrue(op(self.v2),
msg="Unexpectedly found {0} {2} {1} == False "
"when comparing versions!"
.format(v1, self.v2, self.op))
def test_plugin_version_object_comparison(self):
"""Verify use of PluginVersion for comparison
Verify that converting the version to be compared to the same object
type of PluginVersion before comparing provides the same result.
"""
plugin_name = "Jenkins Mailer Plugin"
j = jenkins.Jenkins(self.base_url, 'test', 'test')
plugin_info = j.get_plugins()[plugin_name]
v1 = plugin_info.get("version")
op = getattr(v1, self.op)
v2 = plugins.PluginVersion(self.v2)
self.assertTrue(op(v2),
msg="Unexpectedly found {0} {2} {1} == False "
"when comparing versions!"
.format(v1, v2, self.op))
class PluginsTest(JenkinsPluginsBase):
def test_plugin_equal(self):
p1 = plugins.Plugin(self.plugin_info_json)
p2 = plugins.Plugin(self.plugin_info_json)
self.assertEqual(p1, p2)
def test_plugin_not_equal(self):
p1 = plugins.Plugin(self.plugin_info_json)
p2 = plugins.Plugin(self.plugin_info_json)
p2[u'version'] = u"1.6"
self.assertNotEqual(p1, p2)
|
|
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Various tests for querying the library database.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from test import _common
from test._common import unittest
import beets.library
from beets import dbcore
from beets import config
# A test case class providing a library with some dummy data and some
# assertions involving that data.
class DummyDataTestCase(_common.TestCase):
def setUp(self):
super(DummyDataTestCase, self).setUp()
self.lib = beets.library.Library(':memory:')
albums = [_common.album() for _ in range(3)]
albums[0].album = "album A"
albums[0].genre = "Rock"
albums[0].year = "2001"
albums[0].flex1 = "flex1-1"
albums[0].flex2 = "flex2-A"
albums[0].albumartist = "foo"
albums[0].albumartist_sort = None
albums[1].album = "album B"
albums[1].genre = "Rock"
albums[1].year = "2001"
albums[1].flex1 = "flex1-2"
albums[1].flex2 = "flex2-A"
albums[1].albumartist = "bar"
albums[1].albumartist_sort = None
albums[2].album = "album C"
albums[2].genre = "Jazz"
albums[2].year = "2005"
albums[2].flex1 = "flex1-1"
albums[2].flex2 = "flex2-B"
albums[2].albumartist = "baz"
albums[2].albumartist_sort = None
for album in albums:
self.lib.add(album)
items = [_common.item() for _ in range(4)]
items[0].title = 'foo bar'
items[0].artist = 'one'
items[0].album = 'baz'
items[0].year = 2001
items[0].comp = True
items[0].flex1 = "flex1-0"
items[0].flex2 = "flex2-A"
items[0].album_id = albums[0].id
items[0].artist_sort = None
items[1].title = 'baz qux'
items[1].artist = 'two'
items[1].album = 'baz'
items[1].year = 2002
items[1].comp = True
items[1].flex1 = "flex1-1"
items[1].flex2 = "flex2-A"
items[1].album_id = albums[0].id
items[1].artist_sort = None
items[2].title = 'beets 4 eva'
items[2].artist = 'three'
items[2].album = 'foo'
items[2].year = 2003
items[2].comp = False
items[2].flex1 = "flex1-2"
items[2].flex2 = "flex1-B"
items[2].album_id = albums[1].id
items[2].artist_sort = None
items[3].title = 'beets 4 eva'
items[3].artist = 'three'
items[3].album = 'foo2'
items[3].year = 2004
items[3].comp = False
items[3].flex1 = "flex1-2"
items[3].flex2 = "flex1-C"
items[3].album_id = albums[2].id
items[3].artist_sort = None
for item in items:
self.lib.add(item)
class SortFixedFieldTest(DummyDataTestCase):
def test_sort_asc(self):
q = ''
sort = dbcore.query.FixedFieldSort("year", True)
results = self.lib.items(q, sort)
self.assertLessEqual(results[0]['year'], results[1]['year'])
self.assertEqual(results[0]['year'], 2001)
# same thing with query string
q = 'year+'
results2 = self.lib.items(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_desc(self):
q = ''
sort = dbcore.query.FixedFieldSort("year", False)
results = self.lib.items(q, sort)
self.assertGreaterEqual(results[0]['year'], results[1]['year'])
self.assertEqual(results[0]['year'], 2004)
# same thing with query string
q = 'year-'
results2 = self.lib.items(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_two_field_asc(self):
q = ''
s1 = dbcore.query.FixedFieldSort("album", True)
s2 = dbcore.query.FixedFieldSort("year", True)
sort = dbcore.query.MultipleSort()
sort.add_sort(s1)
sort.add_sort(s2)
results = self.lib.items(q, sort)
self.assertLessEqual(results[0]['album'], results[1]['album'])
self.assertLessEqual(results[1]['album'], results[2]['album'])
self.assertEqual(results[0]['album'], 'baz')
self.assertEqual(results[1]['album'], 'baz')
self.assertLessEqual(results[0]['year'], results[1]['year'])
# same thing with query string
q = 'album+ year+'
results2 = self.lib.items(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
class SortFlexFieldTest(DummyDataTestCase):
def test_sort_asc(self):
q = ''
sort = dbcore.query.SlowFieldSort("flex1", True)
results = self.lib.items(q, sort)
self.assertLessEqual(results[0]['flex1'], results[1]['flex1'])
self.assertEqual(results[0]['flex1'], 'flex1-0')
# same thing with query string
q = 'flex1+'
results2 = self.lib.items(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_desc(self):
q = ''
sort = dbcore.query.SlowFieldSort("flex1", False)
results = self.lib.items(q, sort)
self.assertGreaterEqual(results[0]['flex1'], results[1]['flex1'])
self.assertGreaterEqual(results[1]['flex1'], results[2]['flex1'])
self.assertGreaterEqual(results[2]['flex1'], results[3]['flex1'])
self.assertEqual(results[0]['flex1'], 'flex1-2')
# same thing with query string
q = 'flex1-'
results2 = self.lib.items(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_two_field(self):
q = ''
s1 = dbcore.query.SlowFieldSort("flex2", False)
s2 = dbcore.query.SlowFieldSort("flex1", True)
sort = dbcore.query.MultipleSort()
sort.add_sort(s1)
sort.add_sort(s2)
results = self.lib.items(q, sort)
self.assertGreaterEqual(results[0]['flex2'], results[1]['flex2'])
self.assertGreaterEqual(results[1]['flex2'], results[2]['flex2'])
self.assertEqual(results[0]['flex2'], 'flex2-A')
self.assertEqual(results[1]['flex2'], 'flex2-A')
self.assertLessEqual(results[0]['flex1'], results[1]['flex1'])
# same thing with query string
q = 'flex2- flex1+'
results2 = self.lib.items(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
class SortAlbumFixedFieldTest(DummyDataTestCase):
def test_sort_asc(self):
q = ''
sort = dbcore.query.FixedFieldSort("year", True)
results = self.lib.albums(q, sort)
self.assertLessEqual(results[0]['year'], results[1]['year'])
self.assertEqual(results[0]['year'], 2001)
# same thing with query string
q = 'year+'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_desc(self):
q = ''
sort = dbcore.query.FixedFieldSort("year", False)
results = self.lib.albums(q, sort)
self.assertGreaterEqual(results[0]['year'], results[1]['year'])
self.assertEqual(results[0]['year'], 2005)
# same thing with query string
q = 'year-'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_two_field_asc(self):
q = ''
s1 = dbcore.query.FixedFieldSort("genre", True)
s2 = dbcore.query.FixedFieldSort("album", True)
sort = dbcore.query.MultipleSort()
sort.add_sort(s1)
sort.add_sort(s2)
results = self.lib.albums(q, sort)
self.assertLessEqual(results[0]['genre'], results[1]['genre'])
self.assertLessEqual(results[1]['genre'], results[2]['genre'])
self.assertEqual(results[1]['genre'], 'Rock')
self.assertEqual(results[2]['genre'], 'Rock')
self.assertLessEqual(results[1]['album'], results[2]['album'])
# same thing with query string
q = 'genre+ album+'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
class SortAlbumFlexFieldTest(DummyDataTestCase):
def test_sort_asc(self):
q = ''
sort = dbcore.query.SlowFieldSort("flex1", True)
results = self.lib.albums(q, sort)
self.assertLessEqual(results[0]['flex1'], results[1]['flex1'])
self.assertLessEqual(results[1]['flex1'], results[2]['flex1'])
# same thing with query string
q = 'flex1+'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_desc(self):
q = ''
sort = dbcore.query.SlowFieldSort("flex1", False)
results = self.lib.albums(q, sort)
self.assertGreaterEqual(results[0]['flex1'], results[1]['flex1'])
self.assertGreaterEqual(results[1]['flex1'], results[2]['flex1'])
# same thing with query string
q = 'flex1-'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_two_field_asc(self):
q = ''
s1 = dbcore.query.SlowFieldSort("flex2", True)
s2 = dbcore.query.SlowFieldSort("flex1", True)
sort = dbcore.query.MultipleSort()
sort.add_sort(s1)
sort.add_sort(s2)
results = self.lib.albums(q, sort)
self.assertLessEqual(results[0]['flex2'], results[1]['flex2'])
self.assertLessEqual(results[1]['flex2'], results[2]['flex2'])
self.assertEqual(results[0]['flex2'], 'flex2-A')
self.assertEqual(results[1]['flex2'], 'flex2-A')
self.assertLessEqual(results[0]['flex1'], results[1]['flex1'])
# same thing with query string
q = 'flex2+ flex1+'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
class SortAlbumComputedFieldTest(DummyDataTestCase):
def test_sort_asc(self):
q = ''
sort = dbcore.query.SlowFieldSort("path", True)
results = self.lib.albums(q, sort)
self.assertLessEqual(results[0]['path'], results[1]['path'])
self.assertLessEqual(results[1]['path'], results[2]['path'])
# same thing with query string
q = 'path+'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_sort_desc(self):
q = ''
sort = dbcore.query.SlowFieldSort("path", False)
results = self.lib.albums(q, sort)
self.assertGreaterEqual(results[0]['path'], results[1]['path'])
self.assertGreaterEqual(results[1]['path'], results[2]['path'])
# same thing with query string
q = 'path-'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
class SortCombinedFieldTest(DummyDataTestCase):
def test_computed_first(self):
q = ''
s1 = dbcore.query.SlowFieldSort("path", True)
s2 = dbcore.query.FixedFieldSort("year", True)
sort = dbcore.query.MultipleSort()
sort.add_sort(s1)
sort.add_sort(s2)
results = self.lib.albums(q, sort)
self.assertLessEqual(results[0]['path'], results[1]['path'])
self.assertLessEqual(results[1]['path'], results[2]['path'])
q = 'path+ year+'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
def test_computed_second(self):
q = ''
s1 = dbcore.query.FixedFieldSort("year", True)
s2 = dbcore.query.SlowFieldSort("path", True)
sort = dbcore.query.MultipleSort()
sort.add_sort(s1)
sort.add_sort(s2)
results = self.lib.albums(q, sort)
self.assertLessEqual(results[0]['year'], results[1]['year'])
self.assertLessEqual(results[1]['year'], results[2]['year'])
self.assertLessEqual(results[0]['path'], results[1]['path'])
q = 'year+ path+'
results2 = self.lib.albums(q)
for r1, r2 in zip(results, results2):
self.assertEqual(r1.id, r2.id)
class ConfigSortTest(DummyDataTestCase):
def test_default_sort_item(self):
results = list(self.lib.items())
self.assertLess(results[0].artist, results[1].artist)
def test_config_opposite_sort_item(self):
config['sort_item'] = 'artist-'
results = list(self.lib.items())
self.assertGreater(results[0].artist, results[1].artist)
def test_default_sort_album(self):
results = list(self.lib.albums())
self.assertLess(results[0].albumartist, results[1].albumartist)
def test_config_opposite_sort_album(self):
config['sort_album'] = 'albumartist-'
results = list(self.lib.albums())
self.assertGreater(results[0].albumartist, results[1].albumartist)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import pep8
import six
# Guidelines for writing new hacking checks
#
# - Use only for Neutron specific tests. OpenStack general tests
# should be submitted to the common 'hacking' module.
# - Pick numbers in the range N3xx. Find the current test with
# the highest allocated number and then pick the next value.
# - Keep the test method code in the source file ordered based
# on the N3xx value.
# - List the new rule in the top level HACKING.rst file
# - Add test cases for each new rule to
# neutron/tests/unit/hacking/test_checks.py
_all_log_levels = {
'error': '_LE',
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
_all_hints = set(_all_log_levels.values())
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
def _regex_for_level(level, hint):
return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
'level': level,
'wrong_hints': '|'.join(_all_hints - set([hint])),
}
log_translation_hint = re.compile(
'|'.join('(?:%s)' % _regex_for_level(level, hint)
for level, hint in six.iteritems(_all_log_levels)))
oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+")
oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]")
oslo_namespace_imports_from_root = re.compile(r"from[\s]+oslo[\s]+import[\s]+")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
if "neutron/tests" in filename:
return
if pep8.noqa(physical_line):
return
msg = "N320: Log messages require translation hints!"
if log_translation_hint.match(logical_line):
yield (0, msg)
def use_jsonutils(logical_line, filename):
msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
# Some files in the tree are not meant to be run from inside Neutron
# itself, so we should not complain about them not using jsonutils
json_check_skipped_patterns = [
"neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/"
"plugins/netwrap",
]
for pattern in json_check_skipped_patterns:
if pattern in filename:
return
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
N319
"""
for hint in _all_hints:
if logical_line.startswith("LOG.debug(%s(" % hint):
yield(0, "N319 Don't translate debug level logs")
def check_assert_called_once_with(logical_line, filename):
# Try to detect unintended calls of nonexistent mock methods like:
# assert_called_once
# assertCalledOnceWith
# assert_has_called
if 'neutron/tests/' in filename:
if '.assert_called_once_with(' in logical_line:
return
uncased_line = logical_line.lower().replace('_', '')
if '.assertcalledonce' in uncased_line:
msg = ("N322: Possible use of no-op mock method. "
"please use assert_called_once_with.")
yield (0, msg)
if '.asserthascalled' in uncased_line:
msg = ("N322: Possible use of no-op mock method. "
"please use assert_has_calls.")
yield (0, msg)
def check_oslo_namespace_imports(logical_line):
if re.match(oslo_namespace_imports_from_dot, logical_line):
msg = ("N323: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
elif re.match(oslo_namespace_imports_from_root, logical_line):
msg = ("N323: '%s' must be used instead of '%s'.") % (
logical_line.replace('from oslo import ', 'import oslo_'),
logical_line)
yield(0, msg)
elif re.match(oslo_namespace_imports_dot, logical_line):
msg = ("N323: '%s' must be used instead of '%s'.") % (
logical_line.replace('import', 'from').replace('.', ' import '),
logical_line)
yield(0, msg)
def check_no_contextlib_nested(logical_line, filename):
msg = ("N324: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information.")
if contextlib_nested.match(logical_line):
yield(0, msg)
def check_python3_xrange(logical_line):
if re.search(r"\bxrange\s*\(", logical_line):
yield(0, "N325: Do not use xrange. Use range, or six.moves.range for "
"large loops.")
def check_no_basestring(logical_line):
if re.search(r"\bbasestring\b", logical_line):
msg = ("N326: basestring is not Python3-compatible, use "
"six.string_types instead.")
yield(0, msg)
def check_python3_no_iteritems(logical_line):
if re.search(r".*\.iteritems\(\)", logical_line):
msg = ("N327: Use six.iteritems() instead of dict.iteritems().")
yield(0, msg)
def check_asserttrue(logical_line, filename):
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(True,.*\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of"
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\(.*, True\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of"
"assertEqual(True, observed)")
yield (0, msg)
def no_mutable_default_args(logical_line):
msg = "N329: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def factory(register):
register(validate_log_translations)
register(use_jsonutils)
register(check_assert_called_once_with)
register(no_translate_debug_logs)
register(check_oslo_namespace_imports)
register(check_no_contextlib_nested)
register(check_python3_xrange)
register(check_no_basestring)
register(check_python3_no_iteritems)
register(check_asserttrue)
register(no_mutable_default_args)
|
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.urls import reverse
from ci import models
from ci.tests import utils
from os import path
from mock import patch
import json
from django.test import override_settings
from ci.tests import DBTester
from requests_oauthlib import OAuth2Session
@override_settings(INSTALLED_GITSERVERS=[utils.github_config()])
class Tests(DBTester.DBTester):
def setUp(self):
super(Tests, self).setUp()
self.create_default_recipes()
def get_data(self, fname):
p = '{}/{}'.format(path.dirname(__file__), fname)
with open(p, 'r') as f:
contents = f.read()
return contents
def client_post_json(self, url, data):
json_data = json.dumps(data)
return self.client.post(url, json_data, content_type='application/json')
def test_webhook(self):
url = reverse('ci:github:webhook', args=[10000])
# only post allowed
response = self.client.get(url)
self.assertEqual(response.status_code, 405) # not allowed
# no user
data = {'key': 'value'}
response = self.client_post_json(url, data)
self.assertEqual(response.status_code, 400)
# not json
user = utils.get_test_user()
url = reverse('ci:github:webhook', args=[user.build_key])
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400)
# user with no recipes
response = self.client_post_json(url, data)
self.assertEqual(response.status_code, 400)
# unknown json
utils.create_recipe(user=user)
response = self.client_post_json(url, data)
self.assertEqual(response.status_code, 400)
@patch.object(OAuth2Session, 'post')
@patch.object(OAuth2Session, 'get')
@patch.object(OAuth2Session, 'delete')
def test_pull_request(self, mock_del, mock_get, mock_post):
url = reverse('ci:github:webhook', args=[self.build_user.build_key])
changed_files = utils.Response([{"filename": "foo"}])
mock_get.return_value = changed_files
mock_del.return_value = utils.Response()
mock_post.return_value = utils.Response()
data = self.get_data('pr_open_01.json')
py_data = json.loads(data)
py_data['pull_request']['base']['repo']['owner']['login'] = self.owner.name
py_data['pull_request']['base']['repo']['name'] = self.repo.name
py_data['pull_request']['title'] = '[WIP] testTitle'
# no events or jobs on a work in progress
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts()
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
# no events or jobs on a work in progress
py_data['pull_request']['title'] = 'WIP: testTitle'
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts()
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
# should produce a job and an event
py_data['pull_request']['title'] = 'testTitle'
self.set_counts()
mock_get.call_count = 0
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts(jobs=2,
ready=1,
events=1,
commits=2,
users=1,
repos=1,
branches=1,
prs=1,
active=2,
active_repos=1,
)
ev = models.Event.objects.latest()
self.assertEqual(ev.trigger_user, py_data['pull_request']['user']['login'])
self.assertEqual(mock_get.call_count, 1) # for changed files
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
# should just close the event
py_data['action'] = 'closed'
mock_get.call_count = 0
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts(pr_closed=True)
self.assertEqual(mock_get.call_count, 1) # for changed files
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
# should just open the same event
py_data['action'] = 'reopened'
mock_get.call_count = 0
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts()
self.assertEqual(mock_get.call_count, 1) # for changed files
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
# nothing should change
py_data['action'] = 'labeled'
mock_get.call_count = 0
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts()
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
# nothing should change
py_data['action'] = 'bad_action'
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 400)
self.assertIn(b"bad_action", response.content)
self.compare_counts()
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
# on synchronize we also remove labels on the PR
py_data['action'] = 'synchronize'
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(remote_update=True)]):
label_name = self.server.server_config()["remove_pr_label_prefix"][0]
mock_get.return_value = None
remove_label = utils.Response([{"name": label_name}])
mock_get.side_effect = [remove_label, changed_files]
mock_del.return_value = utils.Response()
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts()
self.assertEqual(mock_get.call_count, 2) # 1 for changed files, 1 in remove_pr_todo_labels
self.assertEqual(mock_del.call_count, 1) # for remove_pr_todo_labels
self.assertEqual(mock_post.call_count, 0)
# new sha, new event
py_data['pull_request']['head']['sha'] = '2345'
mock_get.side_effect = [remove_label, changed_files]
mock_get.call_count = 0
mock_del.call_count = 0
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts(jobs=2,
ready=1,
events=1,
commits=1,
active=2,
canceled=2,
events_canceled=1,
num_changelog=2,
num_events_completed=1,
num_jobs_completed=2,
)
self.assertEqual(mock_del.call_count, 1)
self.assertEqual(mock_get.call_count, 2) # 1 for changed files, 1 in remove_pr_todo_labels
self.assertEqual(mock_post.call_count, 2) # 2 new jobs pending status
@patch.object(OAuth2Session, 'post')
@patch.object(OAuth2Session, 'get')
@patch.object(OAuth2Session, 'delete')
def test_push(self, mock_del, mock_get, mock_post):
url = reverse('ci:github:webhook', args=[self.build_user.build_key])
data = self.get_data('push_01.json')
py_data = json.loads(data)
py_data['repository']['owner']['name'] = self.owner.name
py_data['repository']['name'] = self.repo.name
py_data['ref'] = 'refs/heads/{}'.format(self.branch.name)
# Everything OK
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts(jobs=2, ready=1, events=1, commits=2, active=2, active_repos=1)
ev = models.Event.objects.latest()
self.assertEqual(ev.cause, models.Event.PUSH)
self.assertEqual(ev.description, "Update README.md")
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
py_data['head_commit']['message'] = "Merge commit '123456789'"
py_data['after'] = '123456789'
py_data['before'] = '1'
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"OK")
self.compare_counts(jobs=2, ready=1, events=1, commits=2, active=2)
ev = models.Event.objects.latest()
self.assertEqual(ev.description, "Merge commit 123456")
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
def test_zen(self):
url = reverse('ci:github:webhook', args=[self.build_user.build_key])
data = self.get_data('ping.json')
py_data = json.loads(data)
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.compare_counts()
@patch.object(OAuth2Session, 'post')
@patch.object(OAuth2Session, 'get')
@patch.object(OAuth2Session, 'delete')
def test_release(self, mock_del, mock_get, mock_post):
jdata = [{"name": "1.0",
"commit": {"sha": "1234"},
}]
mock_get.return_value = utils.Response(jdata)
url = reverse('ci:github:webhook', args=[self.build_user.build_key])
data = self.get_data('release.json')
py_data = json.loads(data)
py_data['repository']['owner']['login'] = self.owner.name
py_data['repository']['name'] = self.repo.name
py_data['release']['target_commitish'] = self.branch.name
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.compare_counts()
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_get.call_count, 1) # getting SHA
self.assertEqual(mock_post.call_count, 0)
# The commit could be a hash, then we assume the branch is master
py_data['release']['target_commitish'] = "1"*40
mock_get.call_count = 0
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.compare_counts()
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(mock_post.call_count, 0)
rel = utils.create_recipe(name="Release1",
user=self.build_user,
repo=self.repo,
branch=self.branch,
cause=models.Recipe.CAUSE_RELEASE,
)
rel1 = utils.create_recipe(name="Release with dep",
user=self.build_user,
repo=self.repo,
branch=self.branch,
cause=models.Recipe.CAUSE_RELEASE,
)
rel1.depends_on.add(rel)
py_data['release']['target_commitish'] = self.branch.name
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 200)
self.compare_counts(events=1, commits=1, jobs=2, ready=1, active=2)
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_get.call_count, 1) # getting SHA
self.assertEqual(mock_post.call_count, 0)
mock_get.call_count = 0
mock_get.side_effect = Exception("Bam!")
self.set_counts()
response = self.client_post_json(url, py_data)
self.assertEqual(response.status_code, 400)
self.compare_counts()
self.assertEqual(mock_del.call_count, 0)
self.assertEqual(mock_get.call_count, 1) # getting SHA
self.assertEqual(mock_post.call_count, 0)
|
|
import imdb
from colorama import Fore, Style
from plugin import plugin, require
from functools import lru_cache
app = imdb.IMDb()
def main(jarvis, movie):
movie_id = search_movie(jarvis, movie)
if movie_id is None:
return None
return get_movie_by_id(movie_id)
@lru_cache(maxsize=50, typed=False)
def search_movie(jarvis, movie, all_results=False):
if movie == '':
jarvis.say("Please add movie name!", Fore.RED)
return None
results = app.search_movie(movie, results=10)
if not results:
jarvis.say("Error: Did not find movie!", Fore.RED)
return None
if not all_results:
first = results[0]
return first.movieID
return results
@lru_cache(maxsize=20, typed=False)
def get_movie_by_id(movie_id):
return app.get_movie(movie_id)
@require(network=True)
@plugin('movie cast')
def movie_cast(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['cast']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie director')
def movie_director(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['director']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie plot')
def movie_plot(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'plot outline' in data:
jarvis.say('Plot outline:', Fore.GREEN)
jarvis.say(data['plot outline'])
jarvis.say('')
if 'plot' in data:
jarvis.say('Plot:', Fore.GREEN)
for d in data['plot']:
jarvis.say(d)
@require(network=True)
@plugin('movie producer')
def movie_producer(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['producers']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie rating')
def movie_rating(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['rating']))
@require(network=True)
@plugin('movie year')
def movie_year(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['year']))
@require(network=True)
@plugin('movie runtime')
def movie_runtime(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'runtimes' in data:
jarvis.say(str(data['runtimes'][0]) + ' minutes')
else:
jarvis.say("No runtime data present")
@require(network=True)
@plugin('movie countries')
def movie_countries(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['countries']:
jarvis.say(str(d))
@require(network=True)
@plugin('movie genres')
def movie_genres(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['genres']:
jarvis.say(d)
@require(network=True)
@plugin('movie info')
def movie_info(jarvis, movie):
"""
Display table with various information
"""
data = main(jarvis, movie)
if data is not None:
get_movie_info(jarvis, data)
@require(network=True)
@plugin('movie search')
def movie_search(jarvis, movie):
""" search for a movie on IMDB"""
results = search_movie(jarvis, movie, all_results=True)
# if results is None or empty
if not results:
return None
# get only movies from the results, filtering out TV series, etc
movie_results = []
for item in results:
if item['kind'] == 'movie':
movie_results.append(item)
if len(movie_results) > 5:
count = 5
else:
count = len(movie_results)
jarvis.say('')
space = ' '
text = 'ID'
text += space * 3 + 'Movie title'
jarvis.say(text, Fore.GREEN)
for i in range(count):
item = movie_results[i]
text = Fore.GREEN + str(i + 1) + space * 4
text += Fore.RESET + item['smart long imdb canonical title']
jarvis.say(text)
jarvis.say('')
jarvis.say('Please enter ID to know more(q - quit):')
input_id = jarvis.input()
# If nothing is entered, just return
if input_id == '':
return None
if len(input_id) != 1:
return jarvis.say(Fore.RED + 'Please enter valid value')
elif input_id in '123456789':
input_id = int(input_id)
elif input_id == 'q':
return None
# if entered input is out of the given list of ID's
if (int(input_id) > count) or (int(input_id) < 1):
return jarvis.say(Fore.RED + 'Please enter id from the given list')
movie_id = movie_results[input_id - 1].movieID
data = get_movie_by_id(movie_id)
get_movie_info(jarvis, data)
def colorized_output(key, value):
"""
pretty print key value pair
"""
green_text = Fore.GREEN + "{:<14}".format(key)
normal_text = Style.RESET_ALL + ": " + str(value)
return green_text + normal_text
def get_movie_info(jarvis, data):
"""
Takes a movie attributes as input and prints them accordingly
"""
jarvis.say('')
jarvis.say(
'What type of information do you want: cast, producers, genres, etc.?')
jarvis.say('Write one after another separated by space, please:')
movie_attributes = jarvis.input()
movie_attributes = movie_attributes.split()
jarvis.say('')
for attribute in movie_attributes:
if attribute in data:
value = data[attribute]
if attribute == 'genres':
value = ', '.join(value)
if attribute == 'cast':
lst = [person['name'] for person in value]
value = ', '.join(lst[0:3])
if isinstance(value, list):
value = value[0]
jarvis.say(colorized_output(attribute.capitalize(), str(value)))
else:
jarvis.say(
colorized_output(
attribute.capitalize(),
'no information retrieved'))
# print IMDB url of the movie
movie_url = app.urls['movie_base'] + 'tt' + data.movieID
jarvis.say(colorized_output('IMDB url', movie_url))
jarvis.say('')
|
|
import MySQLdb
import time
import logging
from mysql_basic_c import mysql_basic_c as database
from ..check.PortCheck import check_port
logger = logging.getLogger(__name__)
class tab_container_runtime(object):
'''
Manage tab_container_runtime db table.
'''
def __init__(self, host, user, passwd, db):
'''
init class.
Args:
host:mysql database server host
user:mysql database server user
passwd:mysql database server password
db:which database will be used
Return:
Raise:
'''
self._db = database(host, user, passwd, db)
def del_user_zone(self, user, zonename):
'''
delete the record in db with the user and the zonename.
Args:
user:user field
zonename:zonename field
Return:
True:ok
False:database error
Raise:
'''
sql = "delete from tab_container_runtime where user='%s' and zonename='%s'" % (user,zonename)
return self._db.execute(sql)
def del_user(self, user):
'''
delete the record in db with the user.
Args:
user:user field
Return:
True:ok
False:database error
Raise:
'''
sql = "delete from tab_container_runtime where user='%s'" % user
return self._db.execute(sql)
def get_zone_list_for_user(self, user):
'''
get zone list for user.
Args:
user:user
Return:
(False,None):database error.
(True,[]):no data
(True,[zone1,zone2]):the normal case
Raise:
'''
sql = "select zonename from tab_container_runtime where user='%s'" % user
err,result = self._db.query(sql)
if False == err:
return (err,result)
zone_list = []
for row in result:
for r in row:
zone_list.append(r)
return (err, zone_list)
def get_user_list_for_zone(self, zonename):
'''
get user list for zone.
Args:
zonename:zonename
Return:
(False,None):database error.
(True,[]):no data
(True,[user1,user2]):the normal case
Raise:
'''
sql = "select user from tab_container_runtime where zonename='%s'" % zonename
err,result = self._db.query(sql)
if False == err:
return (err,result)
user_list = []
for row in result:
for r in row:
user_list.append(r)
return (err, user_list)
def get_screennum_list_for_user(self, user):
'''
get screennum list for user.
Args:
user:username
Return:
(False,None):database error.
(True,[]):no data
(True,[1,2]):the normal case
Raise:
'''
sql = "select screennum from tab_container_runtime where user='%s'" % user
err,result = self._db.query(sql)
if False == err:
return (err,result)
screennum_list = []
for row in result:
for r in row:
screennum_list.append(r)
return (err,screennum_list)
def get_screennum_for_user_zone(self, user, zonename):
'''
get the screen number for the user and zone.
Args:
user:username
zonename:zonename
Return:
(False,None):database error
(True,()): no record
(True,...):the screen number of the user and zone.
Raise:
'''
sql = "select screennum from tab_container_runtime where user='%s' and zonename='%s'" % (user, zonename)
err,result = self._db.query(sql)
if False == err:
return (err,result)
screennum = ()
for row in result:
for r in row:
screennum = r
return (err,screennum)
def get_info_for_user_fileinout(self, user):
'''
get the infomation(user,zonename,screennum) where filein True of fileout True for the user.
Args:
user:username
Return:
(False,None):database error.
(True,()):no record
(True,other):has record
'''
sql = "select user,zonename,screennum from tab_container_runtime where user='%s' and (filein=1 or fileout=1)" % (user,)
err,result = self._db.query(sql)
if False == err:
return (err,result)
ret = ()
for row in result:
ret = row
return (err,ret)
def get_screennum(self, hostname, user, zonename):
'''
get free screen number for the user and zonename.
Args:
hostname:hostname
user:username
zonename:zonename
Return:
(False,None):database error
(True,{"status":"old","screennum":screennum}): the old screennum
(True,{"status":"new","screennum":screennum}): the new screennum
Raise:
'''
sql = "select screennum from tab_container_runtime where user='%s' and zonename='%s'" % (user, zonename)
err,result = self._db.query(sql)
if False == err:
return (err,result)
screennum = ()
for row in result:
for r in row:
screennum = r
if () != screennum:
return (err,{"status":"old","screennum":screennum})
else:
screennum_list = []
sql = "select screennum from tab_container_runtime"
err,result = self._db.query(sql)
if False == err:
return (err,result)
for row in result:
for r in row:
screennum_list.append(r)
for i in range(1,2000):
if not i in screennum_list:
if 0 != check_port(hostname, 5900+i):
logger.info("host:%s, port:%s is not used.", hostname, 5900+i)
break
else:
logger.info("host:%s, port:%s is used.", hostname, 5900+i)
screennum = i
return (err,{"status":"new","screennum":screennum,"user":user,"zonename":zonename,"screenum_list":screennum_list})
def save_info(self, user, screennum, geometry, zonename, netname, filein, fileout, internet, transfer):
'''
insert a active container record.
Args:
user:username
screennum: the screen number
geometry:geometry
zonename:zone name
netname:netname,such as br0
filein:if import
fileout:if export
internet:if access external internet
transfer:transfer between zones
Return:
False:database error
True:success
Raise:
'''
now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
sql = "insert into tab_container_runtime(user,zonename,netname,start_time,geometry,screennum,filein,fileout,internet,transfer) values('%s','%s','%s','%s','%s',%d,%d,%d,%d,%s)" % (user,zonename,netname,now,geometry,screennum,filein,fileout,internet,transfer)
return self._db.execute(sql)
def update_info(self, user, screennum, geometry, zonename, netname, filein, fileout, internet, transfer, update_time=True):
'''
update a active container record.
Args:
user:username
screennum: the screen number
geometry:geometry
zonename:zone name
netname:netname,such as br0
filein:if import
fileout:if export
internet:if access external internet
transfer:transfer between zones
Return:
False:database error
True:success
Raise:
'''
now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
if True == update_time:
sql = "update tab_container_runtime set user='%s', zonename='%s', netname='%s', start_time='%s', geometry='%s', screennum=%s, filein=%s, fileout=%s, internet=%s , transfer=%s where user='%s' and zonename='%s'" % (user,zonename,netname,now,geometry,screennum,filein,fileout,internet,transfer,user,zonename)
else:
sql = "update tab_container_runtime set user='%s', zonename='%s', netname='%s', geometry='%s', screennum=%s, filein=%s, fileout=%s, internet=%s, transfer=%s where user='%s' and zonename='%s'" % (user,zonename,netname,geometry,screennum,filein,fileout,internet,transfer,user,zonename)
return self._db.execute(sql)
def delete_all(self):
'''
delete all record.
Args:
Return:
False:database error
True:success
Raise:
'''
sql = "delete from tab_container_runtime"
return self._db.execute(sql)
def is_force_offline_for_user_zone(self, user, zonename):
'''
get the force_offline for the user and zone.
Args:
user:username
zonename:zonename
Return:
(False,None):database error
(True,()):the empty value
(True,0/1):the force offline of the user and zone.
Raise:
'''
sql = "select force_offline from tab_container_runtime where user='%s' and zonename='%s'" % (user, zonename)
err,result = self._db.query(sql)
if False == err:
return (err,result)
force_offline = ()
for row in result:
for r in row:
force_offline = r
return (err,force_offline)
def get_screennum_list(self):
'''
get all screen numbers.
Args:
Return:
(False,[]):null
(True,[1,2,4]): the normal case
Raise:
'''
sql = "select screennum,user,zonename from tab_container_runtime"
err,result = self._db.query(sql)
if False == err:
return (err,result)
screennum_list = []
for row in result:
screennum_list.append(row)
return (True,screennum_list)
if __name__ == '__main__':
ac = tab_container_runtime('192.168.1.130','sboxweb','Sbox123456xZ','sbox_db')
print ac.get_screennum('user2', 'zone1')
print ac.get_screennum('jmzhang','zone1')
print ac.save_info('qqli',3,'1920x1080','zone1','br0',0,0,0,'192.168.1.155')
ac.get_info_for_user_fileinout('jmzhang')
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the reconfigure command."""
import os
import re
from datetime import datetime
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
from broker.grntest import VerifyGrnsMixin
from broker.notificationtest import VerifyNotificationsMixin
class TestReconfigure(VerifyGrnsMixin, VerifyNotificationsMixin,
TestBrokerCommand):
linux_version_prev = None
linux_version_curr = None
# Note that some tests for reconfigure --list appear in
# test_make_aquilon.py.
@classmethod
def setUpClass(cls):
super(TestReconfigure, cls).setUpClass()
cls.linux_version_prev = cls.config.get("unittest",
"linux_version_prev")
cls.linux_version_curr = cls.config.get("unittest",
"linux_version_curr")
def test_1000_edit_machine_plenary(self):
# "aq reconfigure" should refresh the machine plenary. We verify that by
# intentionally breaking it first.
path = self.plenary_name("machine", "americas", "ut", "ut9",
"ut9s03p45")
with open(path, "a") as fp:
fp.write('\n"broken" = "template";\n')
command = ["cat", "--machine=ut9s03p45"]
out = self.commandtest(command)
self.matchoutput(out, '"broken" = "template";', command)
def test_1001_reconfigure_aquilon95(self):
command = ["reconfigure", "--hostname=aquilon95.aqd-unittest.ms.com"]
self.successtest(command)
def test_1002_verify_machine_plenary(self):
command = ["cat", "--machine=ut9s03p45"]
out = self.commandtest(command)
self.matchclean(out, "broken", command)
self.matchoutput(out, '"sysloc/room" = "utroom2";', command)
self.matchoutput(out, '"sysloc/bunker" = "bucket2.ut";', command)
self.matchoutput(out, '"sysloc/building" = "ut";', command)
self.matchoutput(out, '"sysloc/city" = "ny";', command)
self.matchoutput(out, '"sysloc/continent" = "na";', command)
self.matchoutput(out, '"sysloc/country" = "us";', command)
def test_1010_reconfigurelist_grn_pre(self):
hosts = ["aquilon95.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest", command)
def test_1011_list_grn(self):
hosts = ["aquilon95.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("grnlist", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--grn=grn:/ms/ei/aquilon/aqd"]
self.successtest(command)
def test_1015_reconfigurelist_grn_post(self):
hosts = ["aquilon95.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_1020_reconfigurelist_cleargrn_pre(self):
hosts = ["aquilon95.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_1021_reconfigurelist_cleargrn(self):
hosts = ["aquilon95.aqd-unittest.ms.com"]
scratchfile = self.writescratch("grnlist", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile, "--cleargrn"]
self.statustest(command)
def test_1025_reconfigurelist_cleargrn_post(self):
hosts = ["aquilon95.aqd-unittest.ms.com"]
for h in hosts:
command = "show host --hostname %s" % h
out = self.commandtest(command.split(" "))
self.searchclean(out, "^ Owned by GRN", command)
def test_1030_reconfigure_cleargrn(self):
command = "show host --hostname aquilon91.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
command = ["reconfigure", "--hostname", "aquilon91.aqd-unittest.ms.com",
"--cleargrn"]
out = self.successtest(command)
command = "show host --hostname aquilon91.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.searchclean(out, "^ Owned by GRN", command)
def test_1040_reconfigure_membersof_cluster(self):
# This will exercise the cluster-aligned services code,
# which does not kick in at 'make' time because the hosts
# have not been bound to clusters yet.
command = ["reconfigure", "--membersof", "utecl1"]
out = self.statustest(command)
self.matchoutput(out, "/3 template(s) being processed",
command)
def test_1040_reconfigure_membersof_metacluster(self):
command = ["reconfigure", "--membersof", "utmc1"]
out = self.statustest(command)
self.matchoutput(out, "/5 template(s) being processed",
command)
def test_1050_cat_unittest02_pre(self):
command = "cat --hostname unittest02.one-nyp.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"system/build" = "build";', command)
self.matchoutput(out, '"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/unittest"], command)
# The rebind test has changed the service bindings for afs,
# it should now be set to q.ln.ms.com. The reconfigure will
# force it *back* to using a correct service map entry, in
# this case q.ny.ms.com.
def test_1051_reconfigure_unittest02(self):
basetime = datetime.now()
command = ["reconfigure", "--hostname", "unittest02.one-nyp.ms.com",
"--buildstatus", "ready", "--grn", "grn:/ms/ei/aquilon/aqd",
"--comments", "New host comments"]
err = self.statustest(command)
self.matchoutput(err,
"unittest02.one-nyp.ms.com adding binding for "
"service instance afs/q.ny.ms.com",
command)
self.matchoutput(err,
"unittest02.one-nyp.ms.com removing binding for "
"service instance afs/q.ln.ms.com",
command)
self.matchoutput(err, "Index rebuild and notifications will happen in "
"the background.", command)
self.wait_notification(basetime, 1)
def test_1055_show_unittest02(self):
command = "show host --hostname unittest02.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Build Status: ready", command)
self.matchoutput(out, "Advertise Status: True", command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
self.matchoutput(out, "Host Comments: New host comments", command)
def test_1055_cat_unittest02_data(self):
command = "cat --hostname unittest02.one-nyp.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template hostdata/unittest02.one-nyp.ms.com;",
command)
self.matchoutput(out,
'"hardware" = create("machine/americas/ut/ut3/ut3c5n10");',
command)
self.searchoutput(out,
r'"system/network/interfaces/eth0" = nlist\(\s*'
r'"bootproto", "static",\s*'
r'"broadcast", "%s",\s*'
r'"fqdn", "unittest02.one-nyp.ms.com",\s*'
r'"gateway", "%s",\s*'
r'"ip", "%s",\s*'
r'"netmask", "%s",\s*'
r'"network_environment", "internal",\s*'
r'"network_type", "unknown",\s*'
r'"route", list\(\s*'
r'nlist\(\s*'
r'"address", "250.250.0.0",\s*'
r'"gateway", "%s",\s*'
r'"netmask", "255.255.0.0"\s*\)\s*'
r'\)\s*\)' %
(self.net["unknown0"].broadcast,
self.net["unknown0"].gateway,
self.net["unknown0"].usable[0],
self.net["unknown0"].netmask,
self.net["unknown0"].gateway),
command)
self.matchoutput(out, '"system/advertise_status" = true;', command)
self.matchoutput(out, '"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/aqd"], command)
command = "cat --hostname unittest02.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"object template unittest02.one-nyp.ms.com;",
command)
self.searchoutput(out,
r'variable LOADPATH = list\(\s*"aquilon"\s*\);',
command)
self.matchoutput(out,
"""include { "archetype/base" };""",
command)
self.matchoutput(out,
"""\"/\" = create(\"hostdata/unittest02.one-nyp.ms.com\"""",
command)
self.matchoutput(out,
'include { "os/linux/%s/config" };' %
self.linux_version_prev,
command)
self.matchoutput(out,
"""include { "service/afs/q.ny.ms.com/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/bootserver/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/dns/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/ntp/pa.ny.na/client/config" };""",
command)
self.matchoutput(out,
"""include { "personality/compileserver/config" };""",
command)
self.matchoutput(out,
"""include { "archetype/final" };""",
command)
def test_1056_clear_comments(self):
command = ["reconfigure", "--hostname", "unittest02.one-nyp.ms.com",
"--comments", ""]
self.statustest(command)
def test_1057_verify_comments(self):
command = ["show_host", "--hostname", "unittest02.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Host Comments", command)
# These settings have not changed - the command should still succeed.
def test_1060_reconfigur_eunittest00(self):
basetime = datetime.now()
command = ["reconfigure", "--hostname", "unittest00.one-nyp.ms.com"]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
self.matchoutput(err, "Index rebuild and notifications will happen in "
"the background.", command)
self.wait_notification(basetime, 1)
def test_1065_cat_unittest00_data(self):
command = "cat --hostname unittest00.one-nyp.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template hostdata/unittest00.one-nyp.ms.com;",
command)
self.matchoutput(out,
'"hardware" = create("machine/americas/ut/ut3/ut3c1n3");',
command)
self.searchoutput(out,
r'"system/network/interfaces/eth0" = nlist\(\s*'
r'"bootproto", "static",\s*'
r'"broadcast", "%s",\s*'
r'"fqdn", "unittest00.one-nyp.ms.com",\s*'
r'"gateway", "%s",\s*'
r'"ip", "%s",\s*'
r'"netmask", "%s",\s*'
r'"network_environment", "internal",\s*'
r'"network_type", "unknown",\s*'
r'"route", list\(\s*'
r'nlist\(\s*'
r'"address", "250.250.0.0",\s*'
r'"gateway", "%s",\s*'
r'"netmask", "255.255.0.0"\s*\)\s*'
r'\)\s*\)' %
(self.net["unknown0"].broadcast,
self.net["unknown0"].gateway,
self.net["unknown0"].usable[2],
self.net["unknown0"].netmask,
self.net["unknown0"].gateway),
command)
self.searchoutput(out,
r'"system/network/interfaces/eth1" = nlist\(\s*'
r'"bootproto", "static",\s*'
r'"broadcast", "%s",\s*'
r'"fqdn", "unittest00-e1.one-nyp.ms.com",\s*'
r'"gateway", "%s",\s*'
r'"ip", "%s",\s*'
r'"netmask", "%s",\s*'
r'"network_environment", "internal",\s*'
r'"network_type", "unknown",\s*'
r'"route", list\(\s*'
r'nlist\(\s*'
r'"address", "250.250.0.0",\s*'
r'"gateway", "%s",\s*'
r'"netmask", "255.255.0.0"\s*\)\s*'
r'\)\s*\)' %
(self.net["unknown0"].broadcast,
self.net["unknown0"].gateway,
self.net["unknown0"].usable[3],
self.net["unknown0"].netmask,
self.net["unknown0"].gateway),
command)
self.matchoutput(out, '"system/advertise_status" = false;', command)
def test_1065_cat_unittest00(self):
command = "cat --hostname unittest00.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"""include { "archetype/base" };""",
command)
self.matchoutput(out,
"""\"/\" = create(\"hostdata/unittest00.one-nyp.ms.com\"""",
command)
self.matchoutput(out,
'include { "os/linux/%s/config" };' %
self.linux_version_prev,
command)
self.matchoutput(out,
"""include { "service/afs/q.ny.ms.com/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/bootserver/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/dns/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/ntp/pa.ny.na/client/config" };""",
command)
self.matchoutput(out,
"""include { "personality/compileserver/config" };""",
command)
self.matchoutput(out,
"""include { "archetype/final" };""",
command)
def test_1070_reconfigure_windows_status(self):
# Not a compileable archetype, so there should be no messages from the
# compiler
command = ["reconfigure",
"--hostname", "unittest01.one-nyp.ms.com",
"--buildstatus", "ready"]
out = self.statustest(command)
self.matchoutput(out, "No object profiles: nothing to do.", command)
self.assertFalse(os.path.exists(
self.build_profile_name("unittest01.one-nyp.ms.com",
domain="ut-prod")))
def test_1071_reconfigure_windows_personality(self):
# Not a compileable archetype, so there should be no messages from the
# compiler
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--personality", "desktop"]
out = self.statustest(command)
self.matchoutput(out, "No object profiles: nothing to do.", command)
def test_1072_reconfigure_windows_os(self):
# Not a compileable archetype, so there should be no messages from the
# compiler
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--osversion", "nt61e"]
out = self.statustest(command)
self.matchoutput(out, "No object profiles: nothing to do.", command)
def test_1073_make_compileable(self):
# We need a domain which is guaranteed to be compileable...
self.statustest(["manage", "--hostname", "unittest01.one-nyp.ms.com",
"--domain", "unittest", "--force"])
self.statustest(["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--archetype", "aurora", "--personality", "generic",
"--osname", "linux", "--osversion", self.linux_version_prev])
self.assertTrue(os.path.exists(
self.build_profile_name("unittest01.one-nyp.ms.com",
domain="unittest")))
def test_1074_make_noncompileable(self):
self.statustest(["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--archetype", "windows", "--personality", "desktop",
"--osname", "windows", "--osversion", "nt61e"])
self.assertFalse(os.path.exists(
self.build_profile_name("unittest01.one-nyp.ms.com",
domain="unittest")))
self.statustest(["manage", "--hostname", "unittest01.one-nyp.ms.com",
"--domain", "ut-prod", "--force"])
def test_1075_show_unittest01(self):
command = "show host --hostname unittest01.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Primary Name: unittest01.one-nyp.ms.com", command)
self.matchoutput(out, "Archetype: windows", command)
self.matchoutput(out, "Personality: desktop", command)
self.matchoutput(out, "Build Status: ready", command)
self.matchoutput(out, "Operating System: windows", command)
self.matchoutput(out, "Version: nt61e", command)
self.matchoutput(out, "Advertise Status: True", command)
self.matchoutput(out, "Domain: ut-prod", command)
def test_1080_reconfigure_os(self):
command = ["reconfigure",
"--hostname", "aquilon61.aqd-unittest.ms.com",
"--osname", "linux", "--osversion", self.linux_version_curr]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
def test_1085_reconfigure_os_split_args(self):
command = ["reconfigure",
"--hostname", "unittest17.aqd-unittest.ms.com",
"--osname", "linux", "--osversion", self.linux_version_curr]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
def test_1090_keepbindings(self):
command = ["reconfigure", "--keepbindings",
"--hostname", "aquilon86.aqd-unittest.ms.com",
"--personality", "inventory"]
err = self.statustest(command)
self.matchoutput(err, "1/1 template", command)
self.matchclean(err, "removing binding", command)
self.matchclean(err, "adding binding", command)
def test_1100_remove_bindings(self):
command = ["reconfigure",
"--hostname", "aquilon87.aqd-unittest.ms.com",
"--personality", "inventory"]
err = self.statustest(command)
self.matchoutput(err, "removing binding for service instance chooser1", command)
self.matchoutput(err, "removing binding for service instance chooser2", command)
self.matchoutput(err, "removing binding for service instance chooser3", command)
self.matchclean(err, "adding binding", command)
def test_1105_verify_services(self):
for service in ["chooser1", "chooser2", "chooser3"]:
command = ["search_host", "--service", service,
"--hostname", "aquilon87.aqd-unittest.ms.com"]
self.noouttest(command)
def test_1105_verify_plenary_data(self):
command = "cat --hostname aquilon87.aqd-unittest.ms.com --data"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"structure template hostdata/aquilon87.aqd-unittest.ms.com;",
command)
self.matchoutput(out,
'"hardware" = create("machine/americas/ut/ut9/ut9s03p37");',
command)
def test_1105_verify_plenary(self):
osversion = self.config.get("archetype_aquilon", "default_osversion")
command = "cat --hostname aquilon87.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchclean(out, "chooser1", command)
self.matchclean(out, "chooser2", command)
self.matchclean(out, "chooser3", command)
self.matchoutput(out,
"""include { "archetype/base" };""",
command)
self.matchoutput(out,
"""\"/\" = create(\"hostdata/aquilon87.aqd-unittest.ms.com\"""",
command)
self.matchoutput(out,
'include { "os/linux/%s/config" };' % osversion,
command)
self.matchoutput(out,
"""include { "service/aqd/ny-prod/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/ntp/pa.ny.na/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/bootserver/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/afs/q.ny.ms.com/client/config" };""",
command)
self.matchoutput(out,
"""include { "service/dns/unittest/client/config" };""",
command)
self.matchoutput(out,
"""include { "personality/inventory/config" };""",
command)
self.matchoutput(out,
"""include { "archetype/final" };""",
command)
def test_1110_reconfigure_debug(self):
command = ["reconfigure", "--debug",
"--hostname", "aquilon88.aqd-unittest.ms.com",
"--personality", "inventory"]
_, err = self.successtest(command)
self.matchoutput(err, "Creating service chooser", command)
def test_1120_reconfigure_aligned(self):
for i in range(1, 5):
command = ["reconfigure",
"--hostname", "evh%s.aqd-unittest.ms.com" % i]
self.statustest(command)
def test_1125_verify_aligned(self):
# Check that utecl1 is now aligned to a service and that
# all of its members are aligned to the same service.
# evh[234] should be bound to utecl1
command = "show esx cluster --cluster utecl1"
out = self.commandtest(command.split(" "))
m = re.search(r'Member Alignment: Service esx_management_server '
r'Instance (\S+)', out)
self.assertTrue(m, "Aligned instance not found in output:\n%s" % out)
instance = m.group(1)
# A better test might be to search for all hosts in the cluster
# and make sure they're all in this list. That search command
# does not exist yet, though.
command = ["search_host", "--service=esx_management_server",
"--instance=%s" % instance]
out = self.commandtest(command)
self.matchoutput(out, "evh2.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh3.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh4.aqd-unittest.ms.com", command)
def test_1130_list_camelcase(self):
hosts = ["Aquilon91.Aqd-Unittest.ms.com"]
scratchfile = self.writescratch("camelcase", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile]
self.successtest(command)
def test_1140_list_no_osversion(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingosversion", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile, "--osname=linux"]
self.successtest(command)
def test_1150_list_no_osname(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingosname", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--osversion=%s" % self.linux_version_prev]
self.successtest(command)
def test_1160_list_no_os_archetype(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingosarchetype", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--osname=linux", "--osversion=%s" % self.linux_version_prev]
self.successtest(command)
def test_1170_os_required_service(self):
command = ["reconfigure", "--hostname", "aquilon69.aqd-unittest.ms.com",
"--osname", "solaris", "--osversion", "11.1-x86_64"]
out = self.statustest(command)
self.matchoutput(out,
"aquilon69.aqd-unittest.ms.com adding binding for "
"service instance ips/northamerica",
command)
def test_1175_cat_aquilon69(self):
command = ["cat", "--hostname", "aquilon69.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
'include { "service/ips/northamerica/client/config" };',
command)
def test_2000_windows_wrong_os(self):
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--osname", "linux", "--osversion", self.linux_version_prev]
err = self.notfoundtest(command)
self.matchoutput(err,
"Operating System linux, version %s, archetype "
"windows not found." % self.linux_version_prev,
command)
def test_2000_os_archetype_mismatch(self):
# Trying to change archetype, but there's no suitable OS
command = ["reconfigure", "--hostname", "unittest01.one-nyp.ms.com",
"--archetype", "aquilon", "--personality", "unixeng-test"]
err = self.notfoundtest(command)
self.matchoutput(err,
"Operating System windows, version nt61e, "
"archetype aquilon not found.",
command)
def test_2000_os_archetype_mismatch_list(self):
hosts = ["unittest01.one-nyp.ms.com"]
scratchfile = self.writescratch("hostlist", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype", "aquilon", "--personality=unixeng-test"]
out = self.badrequesttest(command)
self.matchoutput(out,
"unittest01.one-nyp.ms.com: Operating System "
"windows, version nt61e, archetype aquilon not found.",
command)
def test_2000_missing_personality(self):
command = ["reconfigure",
"--hostname", "aquilon62.aqd-unittest.ms.com",
"--archetype", "windows"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality inventory, archetype windows not found.",
command)
def test_2000_personality_not_allowed(self):
command = ["reconfigure", "--hostname=evh2.aqd-unittest.ms.com",
"--personality=esx_server"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Personality vmhost/esx_server is not allowed by "
"ESX Cluster utecl1. Specify one of: "
"vmhost/vulcan-10g-server-prod.",
command)
def test_2000_personality_not_allowed_list(self):
hosts = ["evh2.aqd-unittest.ms.com"]
scratchfile = self.writescratch("persnotallowed", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype=vmhost", "--personality=esx_server"]
out = self.badrequesttest(command)
self.matchoutput(out,
"evh2.aqd-unittest.ms.com: Personality "
"vmhost/esx_server is not allowed by ESX Cluster "
"utecl1. Specify one of: vmhost/vulcan-10g-server-prod.",
command)
def test_2000_hostlist_multiple_domains(self):
hosts = ["unittest02.one-nyp.ms.com",
"server1.aqd-unittest.ms.com",
"server2.aqd-unittest.ms.com",
"evh1.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("diffdomains", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile]
out = self.badrequesttest(command)
self.matchoutput(out, "All objects must be in the same domain or sandbox:", command)
self.matchoutput(out, "3 objects in sandbox %s/utsandbox" % self.user, command)
self.matchoutput(out, "2 objects in domain unittest", command)
def test_2000_missing_required_service(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingmap", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype", "aquilon",
"--personality", "badpersonality2"]
out = self.badrequesttest(command)
self.matchoutput(out, "Could not find a relevant service map", command)
self.matchoutput(out, "The following hosts failed service binding:",
command)
self.matchoutput(out, "aquilon91.aqd-unittest.ms.com", command)
def test_2000_list_personality_no_archetype(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingarchetype", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality=generic"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Personality generic, archetype aquilon not found.",
command)
def test_2000_missing_personality_stage(self):
hosts = ["aquilon91.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingpersst", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality", "nostage",
"--personality_stage", "previous"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Personality aquilon/nostage does not have stage "
"previous.",
command)
def test_2000_empty_hostlist(self):
hosts = ["#host", "#does", "", " #not ", "#exist"]
scratchfile = self.writescratch("empty", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile]
out = self.badrequesttest(command)
self.matchoutput(out, "Empty list.", command)
def test_2000_bad_hosts_in_list(self):
hosts = ["host-does-not-exist.aqd-unittest.ms.com",
"another-host-does-not-exist.aqd-unittest.ms.com",
"aquilon91.aqd-unittest.ms.com",
"host.domain-does-not-exist.ms.com"]
scratchfile = self.writescratch("missinghost", "\n".join(hosts))
# Use the deprecated option name here
command = ["reconfigure", "--hostlist", scratchfile]
out = self.badrequesttest(command)
self.matchoutput(out, "The --hostlist option is deprecated.", command)
self.matchoutput(out, "Invalid hosts in list:", command)
self.matchoutput(out,
"Host host-does-not-exist.aqd-unittest.ms.com not found.",
command)
self.matchoutput(out,
"Host another-host-does-not-exist.aqd-unittest.ms.com not found.",
command)
self.matchoutput(out,
"Host host.domain-does-not-exist.ms.com not found.",
command)
self.matchoutput(out,
"DNS Domain domain-does-not-exist.ms.com not found.",
command)
self.matchclean(out, "aquilon91.aqd-unittest.ms.com:", command)
def test_2000_over_list_limit(self):
hostlimit = self.config.getint("broker", "reconfigure_max_list_size")
hosts = []
for i in range(1, 20):
hosts.append("thishostdoesnotexist%d.aqd-unittest.ms.com" % i)
scratchfile = self.writescratch("reconfigurelistlimit", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile, "--personality=generic"]
out = self.badrequesttest(command)
self.matchoutput(out, "The number of hosts in list {0:d} can not be more "
"than {1:d}".format(len(hosts), hostlimit), command)
def test_2000_cluster_req(self):
command = ["reconfigure", "--hostname", "aquilon62.aqd-unittest.ms.com",
"--personality", "clustered"]
out = self.badrequesttest(command)
self.matchoutput(out, "Personality aquilon/clustered requires cluster "
"membership", command)
def test_2000_cluster_req_list(self):
hosts = ["aquilon62.aqd-unittest.ms.com"]
scratchfile = self.writescratch("cluster_req", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality", "clustered"]
out = self.badrequesttest(command)
self.matchoutput(out, "Personality aquilon/clustered requires cluster "
"membership", command)
def test_2000_promote_mixed_personality(self):
hosts = ["unittest00.one-nyp.ms.com",
"unittest12.aqd-unittest.ms.com"]
scratchfile = self.writescratch("promote_mixed_personality",
"\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--personality_stage", "next"]
out = self.badrequesttest(command)
self.matchoutput(out, "Promoting hosts in multiple personalities is "
"not supported.", command)
def test_3000_missing_required_params(self):
command = ["reconfigure",
"--hostname", "aquilon62.aqd-unittest.ms.com",
"--personality", "badpersonality"]
out = self.badrequesttest(command)
self.matchoutput(out, "cannot locate template named 'personality/badpersonality/espinfo'", command)
buildfile = self.build_profile_name("aquilon62.aqd-unittest.ms.com",
domain="utsandbox")
results = self.grepcommand(["-l", "badpersonality", buildfile])
self.assertFalse(results, "Found bad personality data in plenary "
"template for aquilon62.aqd-unittest.ms.com")
def test_3010_missing_personality_template_hostlist(self):
hosts = ["aquilon93.aqd-unittest.ms.com"]
scratchfile = self.writescratch("missingtemplate", "\n".join(hosts))
command = ["reconfigure", "--list", scratchfile,
"--archetype", "aquilon", "--personality", "badpersonality"]
out = self.badrequesttest(command)
self.matchoutput(out, "cannot locate template named 'personality/badpersonality/espinfo'", command)
self.assertFalse(os.path.exists(
self.build_profile_name("aquilon93.aqd-unittest.ms.com",
domain="utsandbox")))
servicedir = os.path.join(self.config.get("broker", "plenarydir"),
"servicedata")
results = self.grepcommand(["-rl", "aquilon93.aqd-unittest.ms.com",
servicedir])
self.assertFalse(results, "Found service plenary data that includes "
"aquilon93.aqd-unittest.ms.com")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestReconfigure)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs IO somewhat specific to TensorFlow.
This includes reading/writing `tf.Example`s to/from TF record files and opening
files via `tf.gfile`.
"""
import collections
import gzip
from absl import logging
import tensorflow.compat.v1 as tf
import data
import preproc
import tokenization
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
# This needs to be kept in sync with `FeatureWriter`.
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["answer_types"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
# This needs to be kept in sync with `input_fn_builder`.
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["language_id"] = create_int_feature([feature.language_id])
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
features["answer_types"] = create_int_feature([feature.answer_type])
else:
features["wp_start_offset"] = create_int_feature(feature.wp_start_offset)
features["wp_end_offset"] = create_int_feature(feature.wp_end_offset)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
class CreateTFExampleFn(object):
"""Functor for creating TyDi tf.Examples to be written to a TFRecord file."""
def __init__(self, is_training, max_question_length, max_seq_length,
doc_stride, include_unknowns, vocab_file):
self.is_training = is_training
self.tokenizer = tokenization.TyDiTokenizer(vocab_file=vocab_file)
self.max_question_length = max_question_length
self.max_seq_length = max_seq_length
self.doc_stride = doc_stride
self.include_unknowns = include_unknowns
self.vocab = self.tokenizer.vocab # used by callers
def process(self, entry, errors, debug_info=None):
"""Converts TyDi entries into serialized tf examples.
Args:
entry: "TyDi entries", dicts as returned by `create_entry_from_json`.
errors: A list that this function appends to if errors are created. A
non-empty list indicates problems.
debug_info: A dict of information that may be useful during debugging.
These elements should be used for logging and debugging only. For
example, we log how the text was tokenized into WordPieces.
Yields:
`tf.train.Example` with the features needed for training or inference
(depending on how `is_training` was set in the constructor).
"""
if not debug_info:
debug_info = {}
tydi_example = data.to_tydi_example(entry, self.is_training)
debug_info["tydi_example"] = tydi_example
input_features = preproc.convert_single_example(
tydi_example,
tokenizer=self.tokenizer,
is_training=self.is_training,
max_question_length=self.max_question_length,
max_seq_length=self.max_seq_length,
doc_stride=self.doc_stride,
include_unknowns=self.include_unknowns,
errors=errors,
debug_info=debug_info)
for input_feature in input_features:
input_feature.example_index = int(entry["id"])
input_feature.unique_id = (
input_feature.example_index + input_feature.doc_span_index)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([input_feature.unique_id])
features["example_index"] = create_int_feature(
[input_feature.example_index])
features["input_ids"] = create_int_feature(input_feature.input_ids)
features["input_mask"] = create_int_feature(input_feature.input_mask)
features["segment_ids"] = create_int_feature(input_feature.segment_ids)
features["language_id"] = create_int_feature([input_feature.language_id])
if self.is_training:
features["start_positions"] = create_int_feature(
[input_feature.start_position])
features["end_positions"] = create_int_feature(
[input_feature.end_position])
features["answer_types"] = create_int_feature(
[input_feature.answer_type])
else:
features["wp_start_offset"] = create_int_feature(
input_feature.wp_start_offset)
features["wp_end_offset"] = create_int_feature(
input_feature.wp_end_offset)
yield tf.train.Example(features=tf.train.Features(feature=features))
def gopen(path):
"""Opens a file object given a (possibly gzipped) `path`."""
logging.info("*** Loading from: %s ***", path)
if ".gz" in path:
return gzip.GzipFile(fileobj=tf.gfile.Open(path, "rb")) # pytype: disable=wrong-arg-types
else:
return tf.gfile.Open(path, "r")
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from datetime import datetime, timedelta
from functools import partial
from hashlib import sha256
import hmac
import awssig.sigv4 as sigv4
from os import walk
from os.path import basename, dirname, splitext
from re import sub
from six import binary_type, iteritems, string_types
from six.moves import cStringIO, range
from string import ascii_letters, digits
from sys import stderr
from unittest import skip, TestCase
region = "us-east-1"
service = "service"
access_key = "AKIDEXAMPLE"
secret_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"
key_mapping = lambda access_key, session_token=None: secret_key
remove_auth = "remove_auth"
wrong_authtype = "wrong_authtype"
clobber_sig_equals = "clobber_sig_equals"
delete_credential = "delete_credential"
delete_signature = "delete_signature"
dup_signature = "dup_signature"
delete_date = "delete_date"
# Allowed characters in quoted-printable strings
allowed_qp = ascii_letters + digits + "-_.~"
class AWSSigV4TestCaseRunner(TestCase):
basedir = dirname(__file__) + "/aws-sig-v4-test-suite/"
tweaks = (
"", remove_auth, wrong_authtype, clobber_sig_equals, delete_credential,
delete_signature, dup_signature, delete_date,
)
def run_sigv4_case(self, filebase, tweak=""):
filebase = self.basedir + filebase
with open(filebase + ".sreq", "rb") as fd:
method_line = fd.readline().strip()
if isinstance(method_line, binary_type):
method_line = method_line.decode("utf-8")
headers = {}
last_header = None
while True:
line = fd.readline()
if line in (b"\n", b"",):
break
line = line.decode("utf-8")
if line.startswith(" ") or line.startswith("\t"):
assert last_header is not None
header = last_header
value = line.strip()
else:
try:
header, value = line.split(":", 1)
except ValueError as e:
raise ValueError("Invalid header line: %s" % line)
key = header.lower()
value = value.strip()
last_header = header
if key == "authorization":
if tweak == remove_auth:
continue
elif tweak == wrong_authtype:
value = "XX" + value
elif tweak == clobber_sig_equals:
value = value.replace("Signature=", "Signature")
elif tweak == delete_credential:
value = value.replace("Credential=", "Foo=")
elif tweak == delete_signature:
value = value.replace("Signature=", "Foo=")
elif tweak == dup_signature:
value += ", Signature=foo"
elif key in ("date", "x-amz-date",):
if tweak == delete_date:
continue
if key in headers:
headers[key].append(value)
else:
headers[key] = [value]
body = fd.read()
first_space = method_line.find(" ")
last_space = method_line.rfind(" ")
method = method_line[:first_space]
uri_path = method_line[first_space + 1:last_space]
qpos = uri_path.find("?")
if qpos == -1:
query_string = ""
else:
query_string = uri_path[qpos+1:]
uri_path = uri_path[:qpos]
with open(filebase + ".creq", "r") as fd:
canonical_request = fd.read().replace("\r", "")
with open(filebase + ".sts", "r") as fd:
string_to_sign = fd.read().replace("\r", "")
v = sigv4.AWSSigV4Verifier(
request_method=method, uri_path=uri_path, query_string=query_string,
headers=headers, body=body, region=region, service=service,
key_mapping=key_mapping, timestamp_mismatch=None)
if tweak:
try:
v.verify()
self.fail("Expected verify() to throw an InvalidSignature "
"error for tweak %s" % tweak)
except sigv4.InvalidSignatureError:
pass
else:
self.assertEqual(
v.canonical_request, canonical_request,
"Canonical request mismatch in %s\nExpected: %r\nReceived: %r" %
(filebase, canonical_request, v.canonical_request))
self.assertEqual(
v.string_to_sign, string_to_sign,
"String to sign mismatch in %s\nExpected: %r\nReceived: %r" %
(filebase, string_to_sign, v.string_to_sign))
v.verify()
def test_get_vanilla_utf8_query(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla-utf8-query/get-vanilla-utf8-query", tweak)
def test_get_vanilla_query_order_key_case(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla-query-order-key-case/get-vanilla-query-order-key-case", tweak)
def test_get_header_value_trim(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-header-value-trim/get-header-value-trim", tweak)
def test_get_vanilla_query_unreserved(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla-query-unreserved/get-vanilla-query-unreserved", tweak)
def test_get_vanilla_query_order_key(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla-query-order-key/get-vanilla-query-order-key", tweak)
def test_get_vanilla(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla/get-vanilla", tweak)
def test_post_sts_token_post_sts_header_after(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-sts-token/post-sts-header-after/post-sts-header-after", tweak)
def test_post_sts_token_post_sts_header_before(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-sts-token/post-sts-header-before/post-sts-header-before", tweak)
def test_get_unreserved(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-unreserved/get-unreserved", tweak)
def test_get_header_value_multiline(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-header-value-multiline/get-header-value-multiline", tweak)
def test_post_x_www_form_urlencoded_parameters(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-x-www-form-urlencoded-parameters/post-x-www-form-urlencoded-parameters", tweak)
def test_post_vanilla(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-vanilla/post-vanilla", tweak)
@skip("Testcase from AWS appears to be broken")
def test_post_x_www_form_urlencoded(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-x-www-form-urlencoded/post-x-www-form-urlencoded", tweak)
def test_post_header_key_case(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-header-key-case/post-header-key-case", tweak)
def test_get_vanilla_empty_query_key(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla-empty-query-key/get-vanilla-empty-query-key", tweak)
def test_post_header_key_sort(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-header-key-sort/post-header-key-sort", tweak)
def test_post_vanilla_empty_query_value(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-vanilla-empty-query-value/post-vanilla-empty-query-value", tweak)
def test_get_utf8(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-utf8/get-utf8", tweak)
def test_get_vanilla_query(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla-query/get-vanilla-query", tweak)
def test_get_header_value_order(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-header-value-order/get-header-value-order", tweak)
def test_post_vanilla_query(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-vanilla-query/post-vanilla-query", tweak)
def test_get_vanilla_query_order_value(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-vanilla-query-order-value/get-vanilla-query-order-value", tweak)
def test_post_header_value_case(self):
for tweak in self.tweaks:
self.run_sigv4_case("post-header-value-case/post-header-value-case", tweak)
def test_normalize_path_get_slash(self):
for tweak in self.tweaks:
self.run_sigv4_case("normalize-path/get-slash/get-slash", tweak)
def test_normalize_path_get_slashes(self):
for tweak in self.tweaks:
self.run_sigv4_case("normalize-path/get-slashes/get-slashes", tweak)
def test_normalize_path_get_space(self):
for tweak in self.tweaks:
self.run_sigv4_case("normalize-path/get-space/get-space", tweak)
def test_normalize_path_get_relative(self):
for tweak in self.tweaks:
self.run_sigv4_case("normalize-path/get-relative/get-relative", tweak)
def test_normalize_path_get_slash_pointless_dot(self):
for tweak in self.tweaks:
self.run_sigv4_case("normalize-path/get-slash-pointless-dot/get-slash-pointless-dot", tweak)
def test_normalize_path_get_slash_dot_slash(self):
for tweak in self.tweaks:
self.run_sigv4_case("normalize-path/get-slash-dot-slash/get-slash-dot-slash", tweak)
def test_normalize_path_get_relative_relative(self):
for tweak in self.tweaks:
self.run_sigv4_case("normalize-path/get-relative-relative/get-relative-relative", tweak)
def test_get_header_key_duplicate(self):
for tweak in self.tweaks:
self.run_sigv4_case("get-header-key-duplicate/get-header-key-duplicate", tweak)
class QuerySignatures(TestCase):
def __init__(self, *args, **kw):
TestCase.__init__(self, *args, **kw)
self.maxDiff = 1024
def runTest(self):
now = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
ten_minutes_ago = (
datetime.utcnow() - timedelta(minutes=10)).strftime("%Y%m%dT%H%M%SZ")
today = datetime.utcnow().strftime("%Y%m%d")
two_days_ago = (datetime.utcnow() - timedelta(days=2)).strftime("%Y%m%d")
tests = [
{
'method': "GET",
'url': "/?foo=bar",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
},
},
{
'method': "GET",
'url': "/?foo=bar&&baz=yay",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
},
},
{
'method': "POST",
'url': "////",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
}
},
{
'method': "POST",
'url': "/",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
'quote_chars': True
},
{
'method': "GET",
'url': "/?foo=bar",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
},
'timestamp_mismatch': 120,
},
{
'method': "GET",
'url': "/question%3Fmark%3Furl",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
},
'timestamp_mismatch': 120,
'quote_chars': False
},
{
'method': "GET",
'url': "/?foo=bar%20ok",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
},
'timestamp_mismatch': 120,
'fix_qp': False
}
]
bad = [
{
'method': "POST",
'url': "////",
'body': b"foo=bar",
'timestamp': now,
# Decanonicalized signed-headers
'signed_headers': ["host", "content-type"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
}
},
{
'method': "POST",
'url': "////",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
# Invalid credential scope format
'scope': "foo"
},
{
'method': "POST",
# Bad path encoding
'url': "/%zz",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
},
{
'method': "POST",
# Relative path
'url': "../foo",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
},
{
'method': "POST",
# Go up too far.
'url': "/a/b/../../..",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
},
{
'method': "POST",
'url': "////",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
# Incorrect region
'scope': (access_key + "/" + today + "/x-foo-bar/" + service +
"/aws4_request")
},
{
'method': "POST",
'url': "////",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
# Incorrect date
'scope': (access_key + "/" + two_days_ago + "/" + region + "/" + service +
"/aws4_request")
},
{
'method': "POST",
'url': "////",
# Invalid percent encoding
'body': b"foo=%zz",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/octet-stream"],
},
'fix_qp': False
},
{
'method': "GET",
'url': "/?foo=bar",
'body': b"",
# Old
'timestamp': ten_minutes_ago,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
},
'timestamp_mismatch': 120,
},
{
'method': "GET",
'url': "/?foo=bar",
'body': b"",
# Bad format
'timestamp': "20151008T999999Z",
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
},
},
]
for test in tests:
self.verify(**test)
for test in bad:
with self.assertRaises(sigv4.InvalidSignatureError):
self.verify(bad=True, **test)
return
def verify(self, method, url, body, timestamp, headers, signed_headers,
timestamp_mismatch=60, bad=False, scope=None,
quote_chars=False, fix_qp=True):
date = timestamp[:8]
credential_scope = "/".join([date, region, service, "aws4_request"])
if scope is None:
scope = access_key + "/" + credential_scope
if "?" in url:
uri, query_string = url.split("?", 1)
else:
uri = url
query_string = ""
if not fix_qp:
scope = scope.replace("/", "%2F")
normalized_uri = sub("//+", "/", uri)
query_params = [
"X-Amz-Algorithm=AWS4-HMAC-SHA256",
"X-Amz-Credential=" + scope,
"X-Amz-Date=" + timestamp,
"X-Amz-SignedHeaders=" + ";".join(signed_headers)]
if query_string:
query_params.extend(query_string.split("&"))
def fixup_qp(qp):
result = cStringIO()
key, value = qp.split("=", 1)
for c in value:
if c in allowed_qp:
result.write(c)
else:
result.write("%%%02X" % ord(c))
return key + "=" + result.getvalue()
if fix_qp:
canonical_query_string = "&".join(
sorted(map(fixup_qp, [qp for qp in query_params if qp])))
else:
canonical_query_string = "&".join(sorted(query_params))
canonical_headers = "".join([
(header + ":" + ",".join(headers[header]) + "\n")
for header in sorted(signed_headers)])
canonical_req = (
method + "\n" +
normalized_uri + "\n" +
canonical_query_string + "\n" +
canonical_headers + "\n" +
";".join(signed_headers) + "\n" +
sha256(body).hexdigest())
string_to_sign = (
"AWS4-HMAC-SHA256\n" +
timestamp + "\n" +
credential_scope + "\n" +
sha256(canonical_req.encode("utf-8")).hexdigest())
def sign(secret, value):
return hmac.new(secret, value.encode("utf-8"), sha256).digest()
k_date = sign(b"AWS4" + secret_key.encode("utf-8"), date)
k_region = sign(k_date, region)
k_service = sign(k_region, service)
k_signing = sign(k_service, "aws4_request")
signature = hmac.new(
k_signing, string_to_sign.encode("utf-8"), sha256).hexdigest()
query_params.append("X-Amz-Signature=" + signature)
if quote_chars:
bad_qp = []
for qp in query_params:
result = cStringIO()
for c in qp:
if c.isalpha():
result.write("%%%02X" % ord(c))
else:
result.write(c)
bad_qp.append(result.getvalue())
query_params = bad_qp
v = sigv4.AWSSigV4Verifier(
request_method=method, uri_path=uri,
query_string="&".join(query_params), headers=headers, body=body,
region=region, service=service, key_mapping=key_mapping,
timestamp_mismatch=timestamp_mismatch)
if not bad:
self.assertEqual(v.canonical_request, canonical_req)
self.assertEqual(v.string_to_sign, string_to_sign)
v.verify()
return
class QueryS3Signatures(TestCase):
def test_good_cases(self):
now = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
ten_minutes_ago = (
datetime.utcnow() - timedelta(minutes=10)).strftime("%Y%m%dT%H%M%SZ")
today = datetime.utcnow().strftime("%Y%m%d")
two_days_ago = (datetime.utcnow() - timedelta(days=2)).strftime("%Y%m%d")
tests = [
{
'method': "GET",
'url': "/a/b",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
'x-amz-content-sha256': [sha256(b"").hexdigest()],
},
},
{
'method': "GET",
'url': "/a/b",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
'x-amz-content-sha256': ["UNSIGNED-PAYLOAD"],
},
},
{
'method': "GET",
'url': "/a/b",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
'x-amz-content-sha256': ["STREAMING-AWS4-HMAC-SHA256-PAYLOAD"],
},
},
{
'method': "GET",
'url': "/a/b?foo=bar&&baz=yay",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
'x-amz-content-sha256': [sha256(b"").hexdigest()],
},
},
{
'method': "POST",
'url': "////",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/x-www-form-urlencoded; charset=UTF-8"],
'x-amz-content-sha256': [sha256(b"foo=bar").hexdigest()],
}
},
{
'method': "POST",
'url': "/",
'body': b"foo=bar",
'timestamp': now,
'signed_headers': ["content-type", "host"],
'headers': {
'host': ["host.example.com"],
'content-type': ["application/x-www-form-urlencoded; charset=UTF-8"],
'x-amz-content-sha256': [sha256(b"foo=bar").hexdigest()],
},
'quote_chars': True
},
{
'method': "GET",
'url': "/?foo=bar",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.amazonaws.com"],
'x-amz-content-sha256': [sha256(b"").hexdigest()],
},
'timestamp_mismatch': 120,
},
]
for test in tests:
self.verify(**test)
return
def test_missing_content_sha256_header(self):
now = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
ten_minutes_ago = (
datetime.utcnow() - timedelta(minutes=10)).strftime("%Y%m%dT%H%M%SZ")
today = datetime.utcnow().strftime("%Y%m%d")
two_days_ago = (datetime.utcnow() - timedelta(days=2)).strftime("%Y%m%d")
tests = [
{
'method': "GET",
'url': "/a/b",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
},
},
{
'method': "GET",
'url': "/a/b?foo=bar&&baz=yay",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
},
},
]
for test in tests:
with self.assertRaises(sigv4.InvalidSignatureError):
self.verify(bad=True, **test)
def test_bad_content_sha256_header(self):
now = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
ten_minutes_ago = (
datetime.utcnow() - timedelta(minutes=10)).strftime("%Y%m%dT%H%M%SZ")
today = datetime.utcnow().strftime("%Y%m%d")
two_days_ago = (datetime.utcnow() - timedelta(days=2)).strftime("%Y%m%d")
tests = [
{
'method': "GET",
'url': "/a/b",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
'x-amz-content-sha256': ["hello world"],
},
},
{
'method': "GET",
'url': "/a/b?foo=bar&&baz=yay",
'body': b"",
'timestamp': now,
'signed_headers': ["host"],
'headers': {
'host': ["host.us-east-1.s3.amazonaws.com"],
'x-amz-content-sha256': ["abcd1234"],
},
},
]
for test in tests:
with self.assertRaises(sigv4.InvalidSignatureError):
self.verify(bad=True, **test)
def verify(self, method, url, body, timestamp, headers, signed_headers,
timestamp_mismatch=60, bad=False, scope=None,
quote_chars=False, fix_qp=True):
date = timestamp[:8]
credential_scope = "/".join([date, region, service, "aws4_request"])
if scope is None:
scope = access_key + "/" + credential_scope
if "?" in url:
uri, query_string = url.split("?", 1)
else:
uri = url
query_string = ""
query_params = [
"X-Amz-Algorithm=AWS4-HMAC-SHA256",
"X-Amz-Credential=" + scope,
"X-Amz-Date=" + timestamp,
"X-Amz-SignedHeaders=" + ";".join(signed_headers)]
if query_string:
query_params.extend(query_string.split("&"))
def fixup_qp(qp):
result = cStringIO()
key, value = qp.split("=", 1)
for c in value:
if c in allowed_qp:
result.write(c)
else:
result.write("%%%02X" % ord(c))
return key + "=" + result.getvalue()
if fix_qp:
canonical_query_string = "&".join(
sorted(map(fixup_qp, [qp for qp in query_params if qp])))
else:
canonical_query_string = "&".join(sorted(query_params))
canonical_headers = "".join([
(header + ":" + ",".join(headers[header]) + "\n")
for header in sorted(signed_headers)])
canonical_req = (
method + "\n" +
uri + "\n" +
canonical_query_string + "\n" +
canonical_headers + "\n" +
";".join(signed_headers) + "\n" +
headers.get("x-amz-content-sha256", [sha256(body).hexdigest()])[0])
string_to_sign = (
"AWS4-HMAC-SHA256\n" +
timestamp + "\n" +
credential_scope + "\n" +
sha256(canonical_req.encode("utf-8")).hexdigest())
def sign(secret, value):
return hmac.new(secret, value.encode("utf-8"), sha256).digest()
k_date = sign(b"AWS4" + secret_key.encode("utf-8"), date)
k_region = sign(k_date, region)
k_service = sign(k_region, service)
k_signing = sign(k_service, "aws4_request")
signature = hmac.new(
k_signing, string_to_sign.encode("utf-8"), sha256).hexdigest()
query_params.append("X-Amz-Signature=" + signature)
if quote_chars:
bad_qp = []
for qp in query_params:
result = cStringIO()
for c in qp:
if c.isalpha():
result.write("%%%02X" % ord(c))
else:
result.write(c)
bad_qp.append(result.getvalue())
query_params = bad_qp
v = sigv4.AWSSigV4S3Verifier(
request_method=method, uri_path=uri,
query_string="&".join(query_params), headers=headers, body=body,
region=region, service=service, key_mapping=key_mapping,
timestamp_mismatch=timestamp_mismatch)
if not bad:
self.assertEqual(v.canonical_request, canonical_req)
self.assertEqual(v.string_to_sign, string_to_sign)
v.verify()
return
class BadInitializer(TestCase):
def test_request_method(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(request_method=None)
def test_uri_path(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(uri_path=None)
def test_query_string(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(query_string=None)
def test_headers(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(headers=None)
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(headers={"Host": 0})
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(headers={0: "Foo"})
def test_body(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(body=None)
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(body=u"Hello")
def test_region(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(region=None)
def test_service(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(service=None)
def test_timestamp_mismatch(self):
with self.assertRaises(TypeError):
sigv4.AWSSigV4Verifier(timestamp_mismatch="Hello")
with self.assertRaises(ValueError):
sigv4.AWSSigV4Verifier(timestamp_mismatch=-1)
|
|
import json
from nose.tools import eq_, ok_
from django.core.urlresolvers import reverse
from airmozilla.main.models import Event
from airmozilla.surveys.models import Survey, Question, next_question_order
from .base import ManageTestCase
class TestCase(ManageTestCase):
def test_event_survey(self):
survey = Survey.objects.create(
name='My Survey',
active=True
)
Question.objects.create(
survey=survey,
question={},
order=next_question_order(),
)
other_survey = Survey.objects.create(
name='Other Survey',
active=False
)
Question.objects.create(
survey=other_survey,
question={"question": "Something?"},
order=next_question_order(),
)
Question.objects.create(
survey=other_survey,
question={"question": "Something else?"},
order=next_question_order(),
)
event = Event.objects.get(title='Test event')
event_edit_url = reverse('manage:event_edit', args=(event.id,))
response = self.client.get(event_edit_url)
eq_(response.status_code, 200)
url = reverse('manage:event_survey', args=(event.id,))
ok_(url in response.content)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('My Survey' in response.content)
ok_('1 question' in response.content)
ok_('Other Survey' in response.content)
ok_('2 questions' in response.content)
ok_('none' in response.content)
eq_(Survey.events.through.objects.filter(event=event).count(), 0)
response = self.client.post(url, {'survey': survey.id})
eq_(response.status_code, 302)
self.assertRedirects(response, event_edit_url)
eq_(Survey.events.through.objects.filter(event=event).count(), 1)
response = self.client.get(url)
eq_(response.status_code, 200)
# change it back to none
response = self.client.post(url, {'survey': 0})
eq_(response.status_code, 302)
self.assertRedirects(response, event_edit_url)
eq_(Survey.events.through.objects.filter(event=event).count(), 0)
def test_list_surveys(self):
survey = Survey.objects.create(
name='My Survey',
active=True
)
for i in range(3):
Question.objects.create(
survey=survey,
question={},
order=next_question_order(),
)
event = Event.objects.get(title='Test event')
survey.events.add(event)
url = reverse('manage:surveys')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('My Survey' in response.content)
ok_('>3</td>' in response.content)
ok_('>1</td>' in response.content)
ok_("Yes, it's active" in response.content)
def test_event_edit_link_to_surveys(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
url = reverse('manage:surveys')
ok_(url in response.content)
# click that button
response = self.client.get(url)
eq_(response.status_code, 200)
def test_create_survey(self):
url = reverse('manage:survey_new')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('name="name"' in response.content)
response = self.client.post(url, {
'name': 'Name',
'active': True
})
eq_(response.status_code, 302)
survey = Survey.objects.get(name='Name')
self.assertRedirects(
response,
reverse('manage:survey_edit', args=(survey.id,))
)
def test_edit_survey(self):
survey = Survey.objects.create(name='Name')
url = reverse('manage:survey_edit', args=(survey.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('value="Name"' in response.content)
# check for error trying to activate with no questions
response = self.client.post(url, {
'active': True
})
eq_(response.status_code, 200)
ok_("Survey must have at least one question in order to be active"
in response.content
)
# add a question and check for successful activation
Question.objects.create(
survey=survey,
question={},
order=next_question_order(),
)
response = self.client.post(url, {
'name': 'New Name',
'active': True
})
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('manage:surveys')
)
survey = Survey.objects.get(id=survey.id)
eq_(survey.name, 'New Name')
ok_(survey.active)
def test_delete_survey(self):
survey = Survey.objects.create(name='Name')
url = reverse('manage:survey_delete', args=(survey.id,))
response = self.client.get(url)
eq_(response.status_code, 405)
response = self.client.post(url)
eq_(response.status_code, 302)
ok_(not Survey.objects.all())
def test_create_and_delete_question(self):
survey = Survey.objects.create(name='Name')
url = reverse(
'manage:survey_question_new', args=(survey.id,)
)
response = self.client.post(url, {})
eq_(response.status_code, 302)
question = Question.objects.get(survey=survey)
url = reverse(
'manage:survey_question_delete',
args=(survey.id, question.id)
)
response = self.client.post(url)
eq_(response.status_code, 302)
ok_(not Question.objects.filter(survey=survey))
def test_edit_question(self):
survey = Survey.objects.create(name='Name')
question = Question.objects.create(
survey=survey,
order=next_question_order(),
)
url = reverse(
'manage:survey_question_edit',
args=(survey.id, question.id)
)
q = {
'question': '?',
'choices': ['a', 'b']
}
payload = json.dumps(q)
response = self.client.post(url, {'question': payload})
eq_(response.status_code, 200)
eq_(
json.loads(response.content),
{'question': json.dumps(q, indent=2)}
)
# reload
question = Question.objects.get(id=question.id)
eq_(question.question, q)
def test_ordering_questions(self):
survey = Survey.objects.create(name='Name')
question_1 = Question.objects.create(
survey=survey,
question={'one': 1},
order=next_question_order(),
)
question_2 = Question.objects.create(
survey=survey,
question={'two': 2},
order=next_question_order(),
)
question_3 = Question.objects.create(
survey=survey,
question={'three': 3},
order=next_question_order(),
)
questions = list(Question.objects.filter(survey=survey))
eq_(questions, [question_1, question_2, question_3])
# let's move question_2 up one
url = reverse(
'manage:survey_question_edit',
args=(survey.id, question_2.id)
)
response = self.client.post(url, {'ordering': 'up'})
survey_questions_url = reverse(
'manage:survey_questions',
args=(survey.id,)
)
self.assertRedirects(response, survey_questions_url)
questions = list(Question.objects.filter(survey=survey))
eq_(questions, [question_2, question_1, question_3])
# let's move question_1 down one
url = reverse(
'manage:survey_question_edit',
args=(survey.id, question_1.id)
)
response = self.client.post(url, {'ordering': 'down'})
self.assertRedirects(response, survey_questions_url)
questions = list(Question.objects.filter(survey=survey))
eq_(questions, [question_2, question_3, question_1])
|
|
import h2.exceptions
import time
import enum
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy import flow
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol.websocket import WebSocketLayer
from mitmproxy.net import websockets
class _HttpTransmissionLayer(base.Layer):
def read_request_headers(self, flow):
raise NotImplementedError()
def read_request_body(self, request):
raise NotImplementedError()
def send_request(self, request):
raise NotImplementedError()
def read_response_headers(self):
raise NotImplementedError()
def read_response_body(self, request, response):
raise NotImplementedError()
yield "this is a generator" # pragma: no cover
def read_response(self, request):
response = self.read_response_headers()
response.data.content = b"".join(
self.read_response_body(request, response)
)
return response
def send_response(self, response):
if response.data.content is None:
raise exceptions.HttpException("Cannot assemble flow with missing content")
self.send_response_headers(response)
self.send_response_body(response, [response.data.content])
def send_response_headers(self, response):
raise NotImplementedError()
def send_response_body(self, response, chunks):
raise NotImplementedError()
def check_close_connection(self, f):
raise NotImplementedError()
class ConnectServerConnection:
"""
"Fake" ServerConnection to represent state after a CONNECT request to an upstream proxy.
"""
def __init__(self, address, ctx):
self.address = address
self._ctx = ctx
@property
def via(self):
return self._ctx.server_conn
def __getattr__(self, item):
return getattr(self.via, item)
def connected(self):
return self.via.connected()
class UpstreamConnectLayer(base.Layer):
def __init__(self, ctx, connect_request):
super().__init__(ctx)
self.connect_request = connect_request
self.server_conn = ConnectServerConnection(
(connect_request.host, connect_request.port),
self.ctx
)
def __call__(self):
layer = self.ctx.next_layer(self)
layer()
def _send_connect_request(self):
self.log("Sending CONNECT request", "debug", [
"Proxy Server: {}".format(self.ctx.server_conn.address),
"Connect to: {}:{}".format(self.connect_request.host, self.connect_request.port)
])
self.send_request(self.connect_request)
resp = self.read_response(self.connect_request)
if resp.status_code != 200:
raise exceptions.ProtocolException("Reconnect: Upstream server refuses CONNECT request")
def connect(self):
if not self.server_conn.connected():
self.ctx.connect()
self._send_connect_request()
else:
pass # swallow the message
def change_upstream_proxy_server(self, address):
self.log("Changing upstream proxy to {} (CONNECTed)".format(repr(address)), "debug")
if address != self.server_conn.via.address:
self.ctx.set_server(address)
def set_server(self, address):
if self.ctx.server_conn.connected():
self.ctx.disconnect()
self.connect_request.host = address[0]
self.connect_request.port = address[1]
self.server_conn.address = address
def is_ok(status):
return 200 <= status < 300
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
# At this point, we see only a subset of the proxy modes
MODE_REQUEST_FORMS = {
HTTPMode.regular: ("authority", "absolute"),
HTTPMode.transparent: ("relative",),
HTTPMode.upstream: ("authority", "absolute"),
}
def validate_request_form(mode, request):
if request.first_line_format == "absolute" and request.scheme != "http":
raise exceptions.HttpException(
"Invalid request scheme: %s" % request.scheme
)
allowed_request_forms = MODE_REQUEST_FORMS[mode]
if request.first_line_format not in allowed_request_forms:
if mode == HTTPMode.transparent:
err_message = (
"""
Mitmproxy received an {} request even though it is not running
in regular mode. This usually indicates a misconfiguration,
please see the mitmproxy mode documentation for details.
"""
).format("HTTP CONNECT" if request.first_line_format == "authority" else "absolute-form")
else:
err_message = "Invalid HTTP request form (expected: %s, got: %s)" % (
" or ".join(allowed_request_forms), request.first_line_format
)
raise exceptions.HttpException(err_message)
class HttpLayer(base.Layer):
if False:
# mypy type hints
server_conn = None # type: connections.ServerConnection
def __init__(self, ctx, mode):
super().__init__(ctx)
self.mode = mode
self.__initial_server_conn = None
"Contains the original destination in transparent mode, which needs to be restored"
"if an inline script modified the target server for a single http request"
# We cannot rely on server_conn.tls_established,
# see https://github.com/mitmproxy/mitmproxy/issues/925
self.__initial_server_tls = None
# Requests happening after CONNECT do not need Proxy-Authorization headers.
self.connect_request = False
def __call__(self):
if self.mode == HTTPMode.transparent:
self.__initial_server_tls = self.server_tls
self.__initial_server_conn = self.server_conn
while True:
flow = http.HTTPFlow(
self.client_conn,
self.server_conn,
live=self,
mode=self.mode.name
)
if not self._process_flow(flow):
return
def handle_regular_connect(self, f):
self.connect_request = True
try:
self.set_server((f.request.host, f.request.port))
if f.response:
resp = f.response
else:
resp = http.make_connect_response(f.request.data.http_version)
self.send_response(resp)
if is_ok(resp.status_code):
layer = self.ctx.next_layer(self)
layer()
except (
exceptions.ProtocolException, exceptions.NetlibException
) as e:
# HTTPS tasting means that ordinary errors like resolution
# and connection errors can happen here.
self.send_error_response(502, repr(e))
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
return False
def handle_upstream_connect(self, f):
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
self.send_request(f.request)
f.response = self.read_response_headers()
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
self.send_response(f.response)
if is_ok(f.response.status_code):
layer = UpstreamConnectLayer(self, f.request)
return layer()
return False
def _process_flow(self, f):
try:
try:
request = self.read_request_headers(f)
except exceptions.HttpReadDisconnect:
# don't throw an error for disconnects that happen
# before/between requests.
return False
f.request = request
if request.first_line_format == "authority":
# The standards are silent on what we should do with a CONNECT
# request body, so although it's not common, it's allowed.
f.request.data.content = b"".join(
self.read_request_body(f.request)
)
f.request.timestamp_end = time.time()
self.channel.ask("http_connect", f)
if self.mode is HTTPMode.regular:
return self.handle_regular_connect(f)
elif self.mode is HTTPMode.upstream:
return self.handle_upstream_connect(f)
else:
msg = "Unexpected CONNECT request."
self.send_error_response(400, msg)
raise exceptions.ProtocolException(msg)
validate_request_form(self.mode, request)
self.channel.ask("requestheaders", f)
# Re-validate request form in case the user has changed something.
validate_request_form(self.mode, request)
if request.headers.get("expect", "").lower() == "100-continue":
# TODO: We may have to use send_response_headers for HTTP2
# here.
self.send_response(http.expect_continue_response)
request.headers.pop("expect")
request.data.content = b"".join(self.read_request_body(request))
request.timestamp_end = time.time()
except exceptions.HttpException as e:
# We optimistically guess there might be an HTTP client on the
# other end
self.send_error_response(400, repr(e))
# Request may be malformed at this point, so we unset it.
f.request = None
f.error = flow.Error(str(e))
self.channel.ask("error", f)
raise exceptions.ProtocolException(
"HTTP protocol error in client request: {}".format(e)
)
self.log("request", "debug", [repr(request)])
# set first line format to relative in regular mode,
# see https://github.com/mitmproxy/mitmproxy/issues/1759
if self.mode is HTTPMode.regular and request.first_line_format == "absolute":
request.first_line_format = "relative"
# update host header in reverse proxy mode
if self.config.options.mode.startswith("reverse:") and not self.config.options.keep_host_header:
f.request.host_header = self.config.upstream_server.address[0]
# Determine .scheme, .host and .port attributes for inline scripts. For
# absolute-form requests, they are directly given in the request. For
# authority-form requests, we only need to determine the request
# scheme. For relative-form requests, we need to determine host and
# port as well.
if self.mode is HTTPMode.transparent:
# Setting request.host also updates the host header, which we want
# to preserve
host_header = f.request.host_header
f.request.host = self.__initial_server_conn.address[0]
f.request.port = self.__initial_server_conn.address[1]
f.request.host_header = host_header # set again as .host overwrites this.
f.request.scheme = "https" if self.__initial_server_tls else "http"
self.channel.ask("request", f)
try:
if websockets.check_handshake(request.headers) and websockets.check_client_version(request.headers):
# We only support RFC6455 with WebSocket version 13
# allow inline scripts to manipulate the client handshake
self.channel.ask("websocket_handshake", f)
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
def get_response():
self.send_request(f.request)
f.response = self.read_response_headers()
try:
get_response()
except exceptions.NetlibException as e:
self.log(
"server communication error: %s" % repr(e),
level="debug"
)
# In any case, we try to reconnect at least once. This is
# necessary because it might be possible that we already
# initiated an upstream connection after clientconnect that
# has already been expired, e.g consider the following event
# log:
# > clientconnect (transparent mode destination known)
# > serverconnect (required for client tls handshake)
# > read n% of large request
# > server detects timeout, disconnects
# > read (100-n)% of large request
# > send large request upstream
if isinstance(e, exceptions.Http2ProtocolException):
# do not try to reconnect for HTTP2
raise exceptions.ProtocolException(
"First and only attempt to get response via HTTP2 failed."
)
self.disconnect()
self.connect()
get_response()
# call the appropriate script hook - this is an opportunity for
# an inline script to set f.stream = True
self.channel.ask("responseheaders", f)
if f.response.stream:
f.response.data.content = None
else:
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
f.response.timestamp_end = time.time()
# no further manipulation of self.server_conn beyond this point
# we can safely set it as the final attribute value here.
f.server_conn = self.server_conn
else:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.channel.ask("responseheaders", f)
self.log("response", "debug", [repr(f.response)])
self.channel.ask("response", f)
if not f.response.stream:
# no streaming:
# we already received the full response from the server and can
# send it to the client straight away.
self.send_response(f.response)
else:
# streaming:
# First send the headers and then transfer the response incrementally
self.send_response_headers(f.response)
chunks = self.read_response_body(
f.request,
f.response
)
if callable(f.response.stream):
chunks = f.response.stream(chunks)
self.send_response_body(f.response, chunks)
f.response.timestamp_end = time.time()
if self.check_close_connection(f):
return False
# Handle 101 Switching Protocols
if f.response.status_code == 101:
# Handle a successful HTTP 101 Switching Protocols Response,
# received after e.g. a WebSocket upgrade request.
# Check for WebSocket handshake
is_websocket = (
websockets.check_handshake(f.request.headers) and
websockets.check_handshake(f.response.headers)
)
if is_websocket and not self.config.options.websocket:
self.log(
"Client requested WebSocket connection, but the protocol is disabled.",
"info"
)
if is_websocket and self.config.options.websocket:
layer = WebSocketLayer(self, f)
else:
layer = self.ctx.next_layer(self)
layer()
return False # should never be reached
except (exceptions.ProtocolException, exceptions.NetlibException) as e:
self.send_error_response(502, repr(e))
if not f.response:
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
else:
raise exceptions.ProtocolException(
"Error in HTTP connection: %s" % repr(e)
)
finally:
if f:
f.live = False
return True
def send_error_response(self, code, message, headers=None) -> None:
try:
response = http.make_error_response(code, message, headers)
self.send_response(response)
except (exceptions.NetlibException, h2.exceptions.H2Error, exceptions.Http2ProtocolException):
self.log("Failed to send error response to client: {}".format(message), "debug")
def change_upstream_proxy_server(self, address):
# Make set_upstream_proxy_server always available,
# even if there's no UpstreamConnectLayer
if hasattr(self.ctx, "change_upstream_proxy_server"):
self.ctx.change_upstream_proxy_server(address)
elif address != self.server_conn.address:
self.log("Changing upstream proxy to {} (not CONNECTed)".format(repr(address)), "debug")
self.set_server(address)
def establish_server_connection(self, host: str, port: int, scheme: str):
tls = (scheme == "https")
if self.mode is HTTPMode.regular or self.mode is HTTPMode.transparent:
# If there's an existing connection that doesn't match our expectations, kill it.
address = (host, port)
if address != self.server_conn.address or tls != self.server_tls:
self.set_server(address)
self.set_server_tls(tls, address[0])
# Establish connection is neccessary.
if not self.server_conn.connected():
self.connect()
else:
if not self.server_conn.connected():
self.connect()
if tls:
raise exceptions.HttpProtocolException("Cannot change scheme in upstream proxy mode.")
|
|
"""
AMAK: 20050515: This module is a brand new test_select module, which gives much wider coverage.
"""
import errno
import time
from test import test_support
import unittest
import socket
import select
SERVER_ADDRESS = ("localhost", 0)
DATA_CHUNK_SIZE = 1000
DATA_CHUNK = "." * DATA_CHUNK_SIZE
#
# The timing of these tests depends on the how the underlying OS socket library
# handles buffering. These values may need tweaking for different platforms
#
# The fundamental problem is that there is no reliable way to fill a socket with bytes
# To address this for running on Netty, we arbitrarily send 10000 bytes
SELECT_TIMEOUT = 0
READ_TIMEOUT = 5
class AsynchronousServer:
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setblocking(0)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind(SERVER_ADDRESS)
self.server_socket.listen(5)
self.server_addr = self.server_socket.getsockname()
try:
self.server_socket.accept()
except socket.error, e:
pass # at this point, always gets EWOULDBLOCK - nothing to accept
def select_acceptable(self):
return select.select([self.server_socket], [self.server_socket], [], SELECT_TIMEOUT)[0]
def verify_acceptable(self):
start = time.time()
while True:
if self.select_acceptable():
return
elif (time.time() - start) > READ_TIMEOUT:
raise Exception('Server socket did not accept in time')
time.sleep(0.1)
def verify_not_acceptable(self):
assert not self.select_acceptable(), "Server socket should not be acceptable"
def accept(self):
self.verify_acceptable()
new_socket, address = self.server_socket.accept()
return AsynchronousHandler(new_socket)
def close(self):
self.server_socket.close()
class AsynchronousHandler:
def __init__(self, new_socket):
self.socket = new_socket
self.socket.setblocking(0)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def write(self):
"""
Writes on this channel until select no longer reports it as writable.
Returns the number of bytes written
"""
total_bytes = 0
while 1:
try:
if self.select_writable():
bytes_sent = self.socket.send(DATA_CHUNK)
total_bytes += bytes_sent
if test_support.is_jython and total_bytes > 10000:
# Netty will buffer indefinitely, so just pick an arbitrary cutoff
return total_bytes
else:
return total_bytes
except socket.error, se:
if se.value == 10035:
continue
raise se
def read(self, expected):
"""
Reads expected bytes from this socket
An Exception is raised if expected bytes aren't read before READ_TIMEOUT
is reached.
"""
results = ""
start = time.time()
while 1:
if self.select_readable():
recvd_bytes = self.socket.recv(expected - len(results))
if len(recvd_bytes):
results += recvd_bytes
if len(results) == expected:
return results
else:
stop = time.time()
if (stop - start) > READ_TIMEOUT:
raise Exception("Got %d bytes but %d bytes were written." %
(len(results), expected))
def select_readable(self):
return select.select([self.socket], [], [], SELECT_TIMEOUT)[0]
def verify_readable(self):
assert self.select_readable(), "Socket should be ready for reading"
def verify_not_readable(self):
assert not self.select_readable(), "Socket should not be ready for reading"
def select_writable(self):
return select.select([], [self.socket], [], SELECT_TIMEOUT)[1]
def verify_writable(self):
assert self.select_writable(), "Socket should be ready for writing"
def verify_not_writable(self):
assert not self.select_writable(), "Socket should not be ready for writing"
def verify_only_writable(self):
self.verify_writable()
self.verify_not_readable()
def close(self):
self.socket.close()
class AsynchronousClient(AsynchronousHandler):
def __init__(self, server_addr):
self.server_addr = server_addr
AsynchronousHandler.__init__(self, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self.connected = 0
def start_connect(self):
result = self.socket.connect_ex(self.server_addr)
if result == errno.EISCONN:
self.connected = True
else:
assert result in [errno.EINPROGRESS, errno.ENOTCONN], \
"connect_ex returned %s (%s)" % (result, errno.errorcode.get(result, "Unknown errno"))
def finish_connect(self):
if self.connected:
return
start = time.time()
while True:
self.start_connect()
if self.connected:
break
elif (time.time() - start) > READ_TIMEOUT:
raise Exception('Client socket incomplete connect')
time.sleep(0.1)
class TestSelectOnAccept(unittest.TestCase):
def setUp(self):
self.server = AsynchronousServer()
self.client = AsynchronousClient(self.server.server_addr)
self.handler = None
@test_support.retry(Exception)
def testSelectOnAccept(self):
self.server.verify_not_acceptable()
self.client.start_connect()
self.server.verify_acceptable()
self.handler = self.server.accept()
self.client.finish_connect()
self.server.verify_not_acceptable()
def tearDown(self):
self.client.close()
if self.handler:
self.handler.close()
self.server.close()
class TestSelect(unittest.TestCase):
@test_support.retry(Exception)
def setUp(self):
self.server = AsynchronousServer()
self.client = AsynchronousClient(self.server.server_addr)
self.client.start_connect()
self.handler = self.server.accept()
self.client.finish_connect()
def tearDown(self):
self.client.close()
self.handler.close()
self.server.close()
@test_support.retry(Exception)
def testClientOut(self):
self.client.verify_only_writable()
self.handler.verify_only_writable()
written = self.client.write()
self.handler.verify_readable()
self.handler.read(written/2)
self.handler.verify_readable()
self.handler.read(written/2)
self.handler.verify_not_readable()
@test_support.retry(Exception)
def testHandlerOut(self):
written = self.handler.write()
self.client.verify_readable()
self.client.read(written/2)
self.client.verify_readable()
self.client.read(written/2)
self.client.verify_not_readable()
@test_support.retry(Exception)
def testBothOut(self):
client_written = self.client.write()
handler_written = self.handler.write()
self.client.verify_readable()
self.handler.verify_readable()
self.client.read(handler_written/2)
self.handler.read(client_written/2)
self.client.verify_readable()
self.handler.verify_readable()
self.client.read(handler_written/2)
self.handler.read(client_written/2)
self.client.verify_only_writable()
self.handler.verify_only_writable()
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Alert.status'
db.add_column('sentry_alert', 'status',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Alert.status'
db.delete_column('sentry_alert', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Group']", 'through': "orm['sentry.AffectedUserByGroup']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
#!/usr/bin/env python
#
# Copyright 2014 Knowledge Economy Developments Ltd
# Copyright 2014 David Wells
#
# Henry Gomersall
# [email protected]
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
'''
A set of utility functions for use with the builders. Users should
not need to use the functions directly, but they are included here for
completeness and to aid with understanding of what is happening behind
the scenes.
Certainly, users may encounter instances of
:class:`~pyfftw.builders._utils._FFTWWrapper`.
Everything documented in this module is *not* part of the public API
and may change in future versions.
'''
import multiprocessing
import pyfftw
import numpy
import warnings
from .. import _threading_type
from .. import config
__all__ = ['_FFTWWrapper', '_rc_dtype_pairs', '_default_dtype', '_Xfftn',
'_setup_input_slicers', '_compute_array_shapes', '_precook_1d_args',
'_cook_nd_args']
_valid_efforts = ('FFTW_ESTIMATE', 'FFTW_MEASURE',
'FFTW_PATIENT', 'FFTW_EXHAUSTIVE')
_real_to_real_dtypes = [numpy.dtype('float32'), numpy.dtype('float64'),
numpy.dtype('longdouble')]
# Looking up a real dtype in here returns the complex complement of the same
# precision, and vice versa.
# It is necessary to use .char as the keys due to MSVC mapping long
# double to double and the way that numpy handles this.
_rc_dtype_pairs = {}
_default_dtype = None
# Double precision is the default default precision. Prefer casting to higher precision if
# possible. If missing, long double is mapped to double and so we lose less
# precision than by converting to single.
if '64' in pyfftw._supported_types:
_default_dtype = numpy.dtype('float64')
_rc_dtype_pairs.update({
numpy.dtype('float64').char: numpy.dtype('complex128'),
numpy.dtype('complex128').char: numpy.dtype('float64')})
if 'ld' in pyfftw._supported_types:
if _default_dtype is None:
_default_dtype = numpy.dtype('longdouble')
_rc_dtype_pairs.update({
numpy.dtype('longdouble').char: numpy.dtype('clongdouble'),
numpy.dtype('clongdouble').char: numpy.dtype('longdouble')})
if '32' in pyfftw._supported_types:
if _default_dtype is None:
_default_dtype = numpy.dtype('float32')
_rc_dtype_pairs.update({
numpy.dtype('float32').char: numpy.dtype('complex64'),
numpy.dtype('complex64').char: numpy.dtype('float32')})
if _default_dtype is None:
raise NotImplementedError("No default precision available")
def _default_effort(effort):
if effort is None:
return config.PLANNER_EFFORT
else:
return effort
def _default_threads(threads):
if threads is None:
if config.NUM_THREADS <= 0:
return multiprocessing.cpu_count()
return config.NUM_THREADS
else:
if threads > 1 and _threading_type is None:
raise ValueError("threads > 1 requested, but pyFFTW was not built "
"with multithreaded FFTW.")
elif threads <= 0:
return multiprocessing.cpu_count()
return threads
def _unitary(norm):
"""_unitary() utility copied from numpy"""
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def _norm_args(norm):
""" pass the proper normalization-related keyword arguments. """
if _unitary(norm):
ortho = True
normalise_idft = False
else:
ortho = False
normalise_idft = True
return dict(normalise_idft=normalise_idft, ortho=ortho)
def _Xfftn(a, s, axes, overwrite_input,
planner_effort, threads, auto_align_input, auto_contiguous,
avoid_copy, inverse, real, normalise_idft=True, ortho=False,
real_direction_flag=None):
'''Generic transform interface for all the transforms. No
defaults exist. The transform must be specified exactly.
The argument ``real_direction_flag`` is a slight exception to this
rule: for backwards compatibility this function defaults to standard
Fourier transforms (and not the specialized real to real variants).
If this flag is set to one of the standard real transform types
(e.g., 'FFTW_RODFT00') then the arguments ``inverse`` and ``real``
are ignored.
'''
a_orig = a
invreal = inverse and real
if real_direction_flag is not None:
direction = real_direction_flag
real_to_real = True
elif inverse:
direction = 'FFTW_BACKWARD'
real_to_real = False
else:
direction = 'FFTW_FORWARD'
real_to_real = False
if planner_effort not in _valid_efforts:
raise ValueError('Invalid planner effort: ', planner_effort)
s, axes = _cook_nd_args(a, s, axes, invreal)
input_shape, output_shape = _compute_array_shapes(
a, s, axes, inverse, real)
a_is_complex = numpy.iscomplexobj(a)
# Make the input dtype correct by transforming to an available type
if real_to_real:
if a.dtype not in _real_to_real_dtypes:
a = numpy.asarray(a, dtype=_default_dtype)
else:
if a.dtype.char not in _rc_dtype_pairs:
dtype = _default_dtype
if a.dtype == numpy.dtype('float16') and '32' in pyfftw._supported_types:
# convert half-precision to single precision, if available
dtype = numpy.dtype('float32')
# warn when losing precision but not when using a higher precision
if dtype.itemsize < a.dtype.itemsize:
warnings.warn("Narrowing conversion from %s to %s precision" % (a.dtype, dtype))
if not real or inverse:
# It's going to be complex
dtype = numpy.dtype(_rc_dtype_pairs[dtype.char])
# finally convert the input array
a = numpy.asarray(a, dtype=dtype)
elif not (real and not inverse) and not a_is_complex:
# We need to make it a complex dtype
a = numpy.asarray(a, dtype=_rc_dtype_pairs[a.dtype.char])
elif (real and not inverse) and a_is_complex:
# It should be real
a = numpy.asarray(a, dtype=_rc_dtype_pairs[a.dtype.char])
# Make the output dtype correct
if not real: # 'real' implies c2r or r2c; hence 'not real' means r2r or c2c.
output_dtype = a.dtype
else:
output_dtype = _rc_dtype_pairs[a.dtype.char]
if not avoid_copy:
a_copy = a.copy()
output_array = pyfftw.empty_aligned(output_shape, output_dtype)
flags = [planner_effort]
if not auto_align_input:
flags.append('FFTW_UNALIGNED')
if overwrite_input:
flags.append('FFTW_DESTROY_INPUT')
if not a.shape == input_shape:
if avoid_copy:
raise ValueError('Cannot avoid copy: '
'The transform shape is not the same as the array size. '
'(from avoid_copy flag)')
# This means we need to use an _FFTWWrapper object
# and so need to create slicers.
update_input_array_slicer, FFTW_array_slicer = (
_setup_input_slicers(a.shape, input_shape))
# Also, the input array will be a different shape to the shape of
# `a`, so we need to create a new array.
input_array = pyfftw.empty_aligned(input_shape, a.dtype)
FFTW_object = _FFTWWrapper(input_array, output_array, axes, direction,
flags, threads, input_array_slicer=update_input_array_slicer,
FFTW_array_slicer=FFTW_array_slicer,
normalise_idft=normalise_idft, ortho=ortho)
# We copy the data back into the internal FFTW object array
internal_array = FFTW_object.input_array
internal_array[:] = 0
internal_array[FFTW_array_slicer] = (
a_copy[update_input_array_slicer])
else:
# Otherwise we can use `a` as-is
input_array = a
if auto_contiguous:
# We only need to create a new array if it's not already
# contiguous
if not (a.flags['C_CONTIGUOUS'] or a.flags['F_CONTIGUOUS']):
if avoid_copy:
raise ValueError('Cannot avoid copy: '
'The input array is not contiguous and '
'auto_contiguous is set. (from avoid_copy flag)')
input_array = pyfftw.empty_aligned(a.shape, a.dtype)
if (auto_align_input and not pyfftw.is_byte_aligned(input_array)):
if avoid_copy:
raise ValueError('Cannot avoid copy: '
'The input array is not aligned and '
'auto_align is set. (from avoid_copy flag)')
input_array = pyfftw.byte_align(input_array)
FFTW_object = pyfftw.FFTW(input_array, output_array, axes, direction,
flags, threads, normalise_idft=normalise_idft, ortho=ortho)
if not avoid_copy:
# Copy the data back into the (likely) destroyed array
FFTW_object.input_array[:] = a_copy
return FFTW_object
class _FFTWWrapper(pyfftw.FFTW):
''' A class that wraps :class:`pyfftw.FFTW`, providing a slicer on the input
stage during calls to :meth:`~pyfftw.builders._utils._FFTWWrapper.__call__`.
'''
def __init__(self, input_array, output_array, axes=[-1],
direction='FFTW_FORWARD', flags=['FFTW_MEASURE'],
threads=1, input_array_slicer=None, FFTW_array_slicer=None,
normalise_idft=True, ortho=False):
'''The arguments are as per :class:`pyfftw.FFTW`, but with the addition
of 2 keyword arguments: ``input_array_slicer`` and
``FFTW_array_slicer``.
These arguments represent 2 slicers: ``input_array_slicer`` slices
the input array that is passed in during a call to instances of this
class, and ``FFTW_array_slicer`` slices the internal array.
The arrays that are returned from both of these slicing operations
should be the same size. The data is then copied from the sliced
input array into the sliced internal array.
'''
self._input_array_slicer = input_array_slicer
self._FFTW_array_slicer = FFTW_array_slicer
self._normalise_idft = normalise_idft
self._ortho = ortho
if 'FFTW_DESTROY_INPUT' in flags:
self._input_destroyed = True
else:
self._input_destroyed = False
pyfftw.FFTW.__init__(self, input_array, output_array,
axes, direction, flags, threads)
def __call__(self, input_array=None, output_array=None,
normalise_idft=None, ortho=None):
'''Wrap :meth:`pyfftw.FFTW.__call__` by firstly slicing the
passed-in input array and then copying it into a sliced version
of the internal array. These slicers are set at instantiation.
When input array is not ``None``, this method always results in
a copy. Consequently, the alignment and dtype are maintained in
the internal array.
``output_array`` and ``normalise_idft`` are passed through to
:meth:`pyfftw.FFTW.__call__` untouched.
'''
if input_array is not None:
# Do the update here (which is a copy, so it's alignment
# safe etc).
internal_input_array = self.input_array
input_array = numpy.asanyarray(input_array)
if self._input_destroyed:
internal_input_array[:] = 0
sliced_internal = internal_input_array[self._FFTW_array_slicer]
sliced_input = input_array[self._input_array_slicer]
if sliced_internal.shape != sliced_input.shape:
raise ValueError('Invalid input shape: '
'The new input array should be the same shape '
'as the input array used to instantiate the '
'object.')
sliced_internal[:] = sliced_input
if normalise_idft is None:
normalise_idft = self._normalise_idft
if ortho is None:
ortho = self._ortho
output = super(_FFTWWrapper, self).__call__(input_array=None,
output_array=output_array, normalise_idft=normalise_idft,
ortho=ortho)
return output
def _setup_input_slicers(a_shape, input_shape):
''' This function returns two slicers that are to be used to
copy the data from the input array to the FFTW object internal
array, which can then be passed to _FFTWWrapper:
``(update_input_array_slicer, FFTW_array_slicer)``
On calls to :class:`~pyfftw.builders._utils._FFTWWrapper` objects,
the input array is copied in as:
``FFTW_array[FFTW_array_slicer] = input_array[update_input_array_slicer]``
'''
# default the slicers to include everything
update_input_array_slicer = (
[slice(None)]*len(a_shape))
FFTW_array_slicer = [slice(None)]*len(a_shape)
# iterate over each dimension and modify the slicer and FFTW dimension
for axis in range(len(a_shape)):
if a_shape[axis] > input_shape[axis]:
update_input_array_slicer[axis] = (
slice(0, input_shape[axis]))
elif a_shape[axis] < input_shape[axis]:
FFTW_array_slicer[axis] = (
slice(0, a_shape[axis]))
update_input_array_slicer[axis] = (
slice(0, a_shape[axis]))
else:
# If neither of these, we use the whole dimension.
update_input_array_slicer[axis] = (
slice(0, a_shape[axis]))
return tuple(update_input_array_slicer), tuple(FFTW_array_slicer)
def _compute_array_shapes(a, s, axes, inverse, real):
'''Given a passed in array ``a``, and the rest of the arguments
(that have been fleshed out with
:func:`~pyfftw.builders._utils._cook_nd_args`), compute
the shape the input and output arrays need to be in order
to satisfy all the requirements for the transform. The input
shape *may* be different to the shape of a.
returns:
``(input_shape, output_shape)``
'''
# Start with the shape of a
orig_domain_shape = list(a.shape)
fft_domain_shape = list(a.shape)
try:
for n, axis in enumerate(axes):
orig_domain_shape[axis] = s[n]
fft_domain_shape[axis] = s[n]
if real:
fft_domain_shape[axes[-1]] = s[-1]//2 + 1
except IndexError:
raise IndexError('Invalid axes: '
'At least one of the passed axes is invalid.')
if inverse:
input_shape = fft_domain_shape
output_shape = orig_domain_shape
else:
input_shape = orig_domain_shape
output_shape = fft_domain_shape
return tuple(input_shape), tuple(output_shape)
def _precook_1d_args(a, n, axis):
'''Turn ``*(n, axis)`` into ``(s, axes)``
'''
if n is not None:
s = [int(n)]
else:
s = None
# Force an error with an invalid axis
a.shape[axis]
return s, (axis,)
def _cook_nd_args(a, s=None, axes=None, invreal=False):
'''Similar to :func:`numpy.fft.fftpack._cook_nd_args`.
'''
if axes is None:
if s is None:
len_s = len(a.shape)
else:
len_s = len(s)
axes = list(range(-len_s, 0))
if s is None:
s = list(numpy.take(a.shape, axes))
if invreal:
s[-1] = (a.shape[axes[-1]] - 1) * 2
if len(s) != len(axes):
raise ValueError('Shape error: '
'Shape and axes have different lengths.')
if len(s) > len(a.shape):
raise ValueError('Shape error: '
'The length of s or axes cannot exceed the dimensionality '
'of the input array, a.')
return tuple(s), tuple(axes)
|
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
import io
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes, sync_mempools
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-mempoolreplacement=1",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
if True:
raise SkipTest("Litecoin doesn't support RBF.")
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='p2sh-segwit'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# (32-byte p2sh-pwpkh output size + 148 p2pkh spend estimate) * 10k(discard_rate) / 1000 = 1800
# P2SH outputs are slightly "over-discarding" due to the IsDust calculation assuming it will
# be spent as a P2PKH.
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000 - 1800})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
def test_settxfee(rbf_node, dest_address):
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_maxtxfee_fails(test, rbf_node, dest_address):
test.restart_node(1, ['-maxtxfee=0.00003'] + test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Specified or calculated fee 0.0000332 is too high (cannot be higher than maxTxFee 0.00003)", rbf_node.bumpfee, rbfid)
test.restart_node(1, test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time, version=0x20000000)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
|
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes snapshots api."""
from oslo_log import log as logging
from oslo_utils import strutils
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _, _LI
from cinder import utils
from cinder import volume
LOG = logging.getLogger(__name__)
def _translate_snapshot_detail_view(snapshot):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(snapshot)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot['id']
d['created_at'] = snapshot['created_at']
d['display_name'] = snapshot['display_name']
d['display_description'] = snapshot['display_description']
d['volume_id'] = snapshot['volume_id']
d['status'] = snapshot['status']
d['size'] = snapshot['volume_size']
if snapshot.get('metadata') and isinstance(snapshot.get('metadata'),
dict):
d['metadata'] = snapshot['metadata']
else:
d['metadata'] = {}
return d
class SnapshotsController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self, ext_mgr=None):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(SnapshotsController, self).__init__()
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
req.cache_db_snapshot(snapshot)
return {'snapshot': _translate_snapshot_detail_view(snapshot)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete snapshot with id: %s"), id)
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
return webob.Response(status_int=202)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['cinder.context']
# pop out limit and offset , they are not search_opts
search_opts = req.GET.copy()
search_opts.pop('limit', None)
search_opts.pop('offset', None)
# filter out invalid option
allowed_search_options = ('status', 'volume_id', 'display_name')
utils.remove_invalid_filter_options(context, search_opts,
allowed_search_options)
snapshots = self.volume_api.get_all_snapshots(context,
search_opts=search_opts)
limited_list = common.limited(snapshots.objects, req)
req.cache_db_snapshots(limited_list)
res = [entity_maker(snapshot) for snapshot in limited_list]
return {'snapshots': res}
def create(self, req, body):
"""Creates a new snapshot."""
kwargs = {}
context = req.environ['cinder.context']
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
kwargs['metadata'] = snapshot.get('metadata', None)
try:
volume_id = snapshot['volume_id']
except KeyError:
msg = _("'volume_id' must be specified")
raise exc.HTTPBadRequest(explanation=msg)
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
msg = _LI("Create snapshot from volume %s")
LOG.info(msg, volume_id)
if not utils.is_valid_boolstr(force):
msg = _("Invalid value '%s' for force. ") % force
raise exception.InvalidParameterValue(err=msg)
if strutils.bool_from_string(force):
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'),
**kwargs)
else:
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'),
**kwargs)
req.cache_db_snapshot(new_snapshot)
retval = _translate_snapshot_detail_view(new_snapshot)
return {'snapshot': retval}
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['cinder.context']
if not body:
raise exc.HTTPUnprocessableEntity()
if 'snapshot' not in body:
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
update_dict = {}
valid_update_keys = (
'display_name',
'display_description',
)
for key in valid_update_keys:
if key in snapshot:
update_dict[key] = snapshot[key]
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.update_snapshot(context, snapshot, update_dict)
snapshot.update(update_dict)
req.cache_db_snapshot(snapshot)
return {'snapshot': _translate_snapshot_detail_view(snapshot)}
def create_resource(ext_mgr):
return wsgi.Resource(SnapshotsController(ext_mgr))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Program: Update Dashboard
Programmer: Michael Fryar, Research Fellow, EPoD
Date created: January 5, 2017
Purpose: Establish SSH tunnel to edX Analytics API, download learner
data, and write data to Google Sheets via Sheets API.
First time: Must run get_credentials.py first
"""
# Standard library imports
import csv # For reading data in comma separated value format
import os # For manipulating paths and changing directory
import subprocess # For spawning ssh tunnel
import time # For calculating run time
# Third-party imports
import httplib2 # "A comprehensive HTTP client library"
import requests # "HTTP for Humans"
from apiclient import discovery # For acessing Google Sheets API
# User-written imports
import secrets # Token for edX Analytics API authentication
# For getting OAuth2 credentials to interact with Google Sheets API
from get_credentials import get_credentials
# Start timer
START_TIME = time.time()
# Get token for edX Analytics API authentication
HKS_SECRET_TOKEN = secrets.HKS_SECRET_TOKEN
def ssh():
"""SSH tunnel to EPoDX API"""
# Change to directory containing configuration files.
home_dir = os.path.expanduser('~')
epodx_dir = os.path.join(home_dir, 'epodx')
os.chdir(epodx_dir)
# Establish SHH tunnel in background that auto-closes.
# -f "fork into background"
# -F "use configuration file"
# -o ExistOnForwardFailure=yes "wait until connection and port
# forwardings are set up before placing in background"
# sleep 10 "give Python script 10 seconds to start using tunnel and
# close tunnel after python script stops using it"
# Ref 1: https://www.g-loaded.eu/2006/11/24/auto-closing-ssh-tunnels/
# Ref 2: https://gist.github.com/scy/6781836
config = "-F ./ssh-config epodx-analytics-api"
option = "-o ExitOnForwardFailure=yes"
command = "ssh -f {} {} sleep 10".format(config, option)
subprocess.run(command, shell=True)
def write_to_g_sheet(course, partner, data_selection='both'):
"""Downloads learner data from EPoDx and writes to Google Sheets.
edX stores identifiable information about learners separately from
problem response data, which is identifiable by user_id only. This
function downloads learner data and problem response data via the
edX Analytics API and then writes this data to a Google Sheet via
the Sheets API.
Args:
course (str): Three letter course code. Known values are:
AGG - Aggregating Evidence
COM - Commissioning Evidence
CBA - Cost-Benefit Analysis
DES - Descriptive Evidence
IMP - Impact Evaluations
SYS - Systematic Approaches to Policy Decisions
partner (str): Separate dashboards are required for each partner
because of the static nature of the dashboards means that a single
dashboard cannot be used by two different partners simultaneously.
Known values are:
HKS - Write to master sheet reserved for HKS trainings
LBSNAA - Write to master sheet reserved for LBSNAA trainings
NSPP1 - Write to 1st master sheet reserved for NSPP trainings
NSPP2 - Write to 2nd master sheet reserved for NSPP trainings
data_selection (str): Specifies whether to download and write only
learner profiles, only problem responses or both. Known values are:
both - Download and write both learner profiles & problem responses
problems - Only download problem responses
profiles - Only download learner profiles
"""
if course == "DTA":
course_id = "course-v1:epodx+BCURE-{}+2018_v1".format(course)
else:
course_id = "course-v1:epodx+BCURE-{}+2016_v1".format(course)
spreadsheetId = secrets.PARTNER_SHEET_KEYS["{}_{}".format(course, partner)]
if data_selection == "both":
message_to_print = ("Downloading and writing {} learner profiles and "
"problem responses.".format(course)
)
print(message_to_print)
if data_selection in ("both", "profiles"):
# Define parameters for extracting learner profile data.
learner_profile_report_url = "http://localhost:18100/api/v0/learners/"
headers = {
"Authorization": "Token {}".format(HKS_SECRET_TOKEN),
"Accept": "text/csv",
}
# The list of fields you've requested.
# Leave this parameter off to see the full list of fields.
fields = ','.join(["user_id", "username", "name", "email", "language",
"location", "year_of_birth", "gender",
"level_of_education", "mailing_address", "goals",
"enrollment_mode", "segments", "cohort", "city",
"country", "enrollment_date", "last_updated"])
params = {
"course_id": course_id,
"fields": fields,
}
# Download learner data.
with requests.Session() as s:
download = s.get(
learner_profile_report_url, headers=headers, params=params)
# Decode learner data.
decoded_content = download.content.decode('ascii', 'ignore')
# Extract data from CSV into list.
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
learner_profiles = list(cr)
# TODO: Explore deleting all but specified cohort. Be sure to plan.
elif data_selection == "problems":
message_to_print = ("Downloading and writing {} problem responses "
"only.".format(course)
)
print(message_to_print)
if data_selection in ("both", "problems"):
# Define parameters for extracting problem response data.
problem_api_url = ("http://localhost:18100/api/v0/courses/"
"{}/reports/problem_response".format(course_id))
headers = {"Authorization": "Token {}".format(HKS_SECRET_TOKEN)}
problem_data = requests.get(problem_api_url, headers=headers).json()
problem_download_url = problem_data['download_url']
# Download the CSV from download_url.
with requests.Session() as s:
download = s.get(problem_download_url)
# Decode problem response data.
decoded_content = download.content.decode('ascii', 'ignore')
# Extract data from CSV into list.
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
problem_responses = list(cr)
# TODO: Explore deleting all responses older than 31 days
elif data_selection == "profiles":
message_to_print = ("Downloading and writing {} learner profiles "
"only.".format(course)
)
print(message_to_print)
# This section builds on Google quickstart template.
# https://developers.google.com/sheets/api/quickstart/python
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
if data_selection in ("both", "profiles"):
learners_range = 'student_profile_info'
if data_selection in ("both", "problems"):
problem_range = 'problem_responses'
if data_selection == "both":
data = [
{
'range': learners_range,
'values': learner_profiles
},
{
'range': problem_range,
'values': problem_responses
}
]
elif data_selection == "profiles":
data = [
{
'range': learners_range,
'values': learner_profiles
}
]
elif data_selection == "problems":
data = [
{
'range': problem_range,
'values': problem_responses
}
]
body = {'valueInputOption': 'RAW', 'data': data}
result = service.spreadsheets().values().batchUpdate(
spreadsheetId=spreadsheetId, body=body).execute()
def tunnel_and_write_to_g_sheet(dashboard):
"""Establish SSH tunnel, download data, and write to Google Sheet"""
ssh()
course = dashboard[0]
partner = dashboard[1]
if "profiles" in dashboard:
data_selection = "profiles"
elif "problems" in dashboard:
data_selection = "problems"
else:
data_selection = "both"
write_to_g_sheet(course, partner, data_selection)
print("Upload {} to {} {} master sheet complete".format(
data_selection, course, partner))
if __name__ == '__main__':
dashboards = [
["IMP", "LBSNAA"],
]
for dashboard in dashboards:
tunnel_and_write_to_g_sheet(dashboard)
TOTAL_TIME = round((time.time() - START_TIME), 2)
print("Total run time: {} seconds".format(TOTAL_TIME))
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Script which checks Java API compatibility between two revisions of the
# Java client.
#
# Originally sourced from Apache Kudu, which was based on the
# compatibility checker from the Apache HBase project, but ported to
# Python for better readability.
# The script can be invoked as follows:
# $ ./checkcompatibility.py ${SOURCE_GIT_REVISION} ${GIT_BRANCH_OR_TAG}
# or with some options:
# $ ./dev-support/checkcompatibility.py \
# --annotation org.apache.yetus.audience.InterfaceAudience.Public \
# --annotation org.apache.yetus.audience.InterfaceAudience.LimitedPrivate \
# --include-file "hbase-*" \
# --known_problems_path ~/known_problems.json \
# rel/1.0.0 branch-1.2
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import urllib2
from collections import namedtuple
try:
import argparse
except ImportError:
logging.error(
"Please install argparse, e.g. via `pip install argparse`.")
sys.exit(2)
# Various relative paths
REPO_DIR = os.getcwd()
def check_output(*popenargs, **kwargs):
""" Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2 """
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def get_repo_dir():
""" Return the path to the top of the repo. """
dirname, _ = os.path.split(os.path.abspath(__file__))
dirname = os.path.dirname(dirname)
logging.debug("Repo dir is %s", dirname)
return dirname
def get_scratch_dir():
""" Return the path to the scratch dir that we build within. """
scratch_dir = os.path.join(get_repo_dir(), "target", "compat-check")
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
return scratch_dir
def get_java_acc_dir():
""" Return the path where we check out the Java API Compliance Checker. """
return os.path.join(get_repo_dir(), "target", "java-acc")
def clean_scratch_dir(scratch_dir):
""" Clean up and re-create the scratch directory. """
if os.path.exists(scratch_dir):
logging.info("Removing scratch dir %s ", scratch_dir)
shutil.rmtree(scratch_dir)
logging.info("Creating empty scratch dir %s ", scratch_dir)
os.makedirs(scratch_dir)
def checkout_java_tree(rev, path):
""" Check out the Java source tree for the given revision into
the given path. """
logging.info("Checking out %s in %s", rev, path)
os.makedirs(path)
# Extract java source
subprocess.check_call(["bash", '-o', 'pipefail', "-c",
("git archive --format=tar %s | "
"tar -C '%s' -xf -") % (rev, path)],
cwd=get_repo_dir())
def get_git_hash(revname):
""" Convert 'revname' to its SHA-1 hash. """
try:
return check_output(["git", "rev-parse", revname],
cwd=get_repo_dir()).strip()
except:
revname = "origin/" + revname
return check_output(["git", "rev-parse", revname],
cwd=get_repo_dir()).strip()
def get_repo_name(remote_name="origin"):
""" Get the name of the repo based on the git remote."""
remote = check_output(["git", "config", "--get", "remote.{0}.url".format(remote_name)],
cwd=get_repo_dir()).strip()
remote = remote.split("/")[-1]
return remote[:-4] if remote.endswith(".git") else remote
def build_tree(java_path, verbose):
""" Run the Java build within 'path'. """
logging.info("Building in %s ", java_path)
mvn_cmd = ["mvn", "--batch-mode", "-DskipTests",
"-Dmaven.javadoc.skip=true", "package"]
if not verbose:
mvn_cmd.insert(-1, "--quiet")
subprocess.check_call(mvn_cmd, cwd=java_path)
def checkout_java_acc(force):
""" Check out the Java API Compliance Checker. If 'force' is true, will
re-download even if the directory exists. """
acc_dir = get_java_acc_dir()
if os.path.exists(acc_dir):
logging.info("Java ACC is already downloaded.")
if not force:
return
logging.info("Forcing re-download.")
shutil.rmtree(acc_dir)
logging.info("Downloading Java ACC...")
url = "https://github.com/lvc/japi-compliance-checker/archive/2.4.tar.gz"
scratch_dir = get_scratch_dir()
path = os.path.join(scratch_dir, os.path.basename(url))
jacc = urllib2.urlopen(url)
with open(path, 'wb') as w:
w.write(jacc.read())
subprocess.check_call(["tar", "xzf", path],
cwd=scratch_dir)
shutil.move(os.path.join(scratch_dir, "japi-compliance-checker-2.4"),
os.path.join(acc_dir))
def find_jars(path):
""" Return a list of jars within 'path' to be checked for compatibility. """
all_jars = set(check_output(["find", path, "-name", "*.jar"]).splitlines())
return [j for j in all_jars if (
"-tests" not in j and
"-sources" not in j and
"-with-dependencies" not in j)]
def write_xml_file(path, version, jars):
""" Write the XML manifest file for JACC. """
with open(path, "wt") as f:
f.write("<version>%s</version>\n" % version)
f.write("<archives>")
for j in jars:
f.write("%s\n" % j)
f.write("</archives>")
def ascii_encode_dict(data):
""" Iterate through a dictionary of data and convert all unicode to ascii.
This method was taken from
stackoverflow.com/questions/9590382/forcing-python-json-module-to-work-with-ascii """
ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x
return dict(map(ascii_encode, pair) for pair in data.items())
def process_json(path):
""" Process the known problems json file. The program raises an uncaught exception
if it can't find the file or if the json is invalid """
path = os.path.abspath(os.path.expanduser(path))
try:
with open(path) as f:
return json.load(f, object_hook=ascii_encode_dict)
except ValueError as e:
logging.error("File: %s\nInvalid JSON:\n%s", str(path), str(e))
raise
except IOError as io:
logging.error("Provided json file path does not exist %s", str(path))
raise
def compare_results(tool_results, known_issues, compare_warnings):
""" Compare the number of problems found with the allowed number. If
compare_warnings is true then also compare the number of warnings found.
tool_results = results from the JACC tool - a dictionary
known_issues = dictionary of expected issue count
compare_warnings = boolean - if true also compare warnings as well as problems """
logging.info("Results: %s", str(tool_results))
unexpected_issue = namedtuple('unexpected_issue', ['check', 'issue_type',
'known_count', 'observed_count'])
unexpected_issues = [unexpected_issue(check=check, issue_type=issue_type,
known_count=known_count,
observed_count=tool_results[check][issue_type])
for check, known_issue_counts in known_issues.items()
for issue_type, known_count in known_issue_counts.items()
if tool_results[check][issue_type] > known_count]
if not compare_warnings:
unexpected_issues = [tup for tup in unexpected_issues
if tup.issue_type != 'warnings']
for issue in unexpected_issues:
logging.error('Found %s during %s check (known issues: %d, observed issues: %d)',
issue.issue_type, issue.check, issue.known_count, issue.observed_count)
return bool(unexpected_issues)
def process_java_acc_output(output):
""" Process the output string to find the problems and warnings in both the
binary and source compatibility. This is done in a way that is admittedly
brittle; we are open to better implementations.
We expect a line containing the relevant information to look something like:
"total binary compatibility problems: 123, warnings: 16" """
return_value = {}
output = output.split("\n")
for line in output:
# Line has relevant info
if line.lower().startswith("total"):
values = {}
# Remove "total" keyword
line = line[6:]
# Seperate the two valuable parts
line_list = line.split(",")
for segment in line_list:
part = segment.split(":")
# Extract key and value
values[part[0][-8:]] = int(part[1])
return_value[line[:6]] = values
return return_value
def log_java_acc_version():
java_acc_path = os.path.join(
get_java_acc_dir(), "japi-compliance-checker.pl")
args = ["perl", java_acc_path, "-dumpversion"]
logging.info("Java ACC version: " + check_output(args))
def run_java_acc(src_name, src_jars, dst_name, dst_jars, annotations, skip_annotations, name):
""" Run the compliance checker to compare 'src' and 'dst'. """
logging.info("Will check compatibility between original jars:\n\t%s\n"
"and new jars:\n\t%s",
"\n\t".join(src_jars),
"\n\t".join(dst_jars))
java_acc_path = os.path.join(
get_java_acc_dir(), "japi-compliance-checker.pl")
src_xml_path = os.path.join(get_scratch_dir(), "src.xml")
dst_xml_path = os.path.join(get_scratch_dir(), "dst.xml")
write_xml_file(src_xml_path, src_name, src_jars)
write_xml_file(dst_xml_path, dst_name, dst_jars)
out_path = os.path.join(get_scratch_dir(), "report.html")
args = ["perl", java_acc_path,
"-l", name,
"-d1", src_xml_path,
"-d2", dst_xml_path,
"-report-path", out_path]
if annotations is not None:
logging.info("Annotations are: %s", annotations)
annotations_path = os.path.join(get_scratch_dir(), "annotations.txt")
logging.info("Annotations path: %s", annotations_path)
with file(annotations_path, "w") as f:
f.write('\n'.join(annotations))
args.extend(["-annotations-list", annotations_path])
if skip_annotations is not None:
skip_annotations_path = os.path.join(
get_scratch_dir(), "skip_annotations.txt")
with file(skip_annotations_path, "w") as f:
f.write('\n'.join(skip_annotations))
args.extend(["-skip-annotations-list", skip_annotations_path])
try:
output = check_output(args)
except subprocess.CalledProcessError as e:
# The program returns a nonzero error code if issues are found. We
# almost always expect some issues and want to process the results.
output = e.output
acc_processed = process_java_acc_output(output)
return acc_processed
def get_known_problems(json_path, src_rev, dst_rev):
""" The json file should be in the following format: a dictionary with the
keys in the format source_branch/destination_branch and the values
dictionaries with binary and source problems and warnings
Example:
{'branch-1.0.0': {
'rel/1.0.0': {'binary': {'problems': 123, 'warnings': 16},
'source': {'problems': 167, 'warnings': 1}},
'branch-1.2.0': {'binary': {'problems': 0, 'warnings': 0},
'source': {'problems': 0, 'warnings': 0}}
},
'branch-1.2.0': {
'rel/1.2.1': {'binary': {'problems': 13, 'warnings': 1},
'source': {'problems': 23, 'warnings': 0}}
}
} """
# These are the default values for allowed problems and warnings
known_problems = {"binary": {"problems": 0, "warnings": 0},
"source": {"problems": 0, "warnings": 0}}
if src_rev.startswith("origin/"):
src_rev = src_rev[7:]
if dst_rev.startswith("origin/"):
dst_rev = dst_rev[7:]
if json_path is not None:
known_problems = process_json(json_path)
try:
return known_problems[src_rev][dst_rev]
except KeyError:
logging.error(("Known Problems values for %s %s are not in "
"provided json file. If you are trying to run "
"the test with the default values, don't "
"provide the --known_problems_path argument")
% (src_rev, dst_rev))
raise
return known_problems
def filter_jars(jars, include_filters, exclude_filters):
""" Filter the list of JARs based on include and exclude filters. """
filtered = []
# Apply include filters
for j in jars:
basename = os.path.basename(j)
for f in include_filters:
if f.match(basename):
filtered += [j]
break
else:
logging.debug("Ignoring JAR %s", j)
# Apply exclude filters
exclude_filtered = []
for j in filtered:
basename = os.path.basename(j)
for f in exclude_filters:
if f.match(basename):
logging.debug("Ignoring JAR %s", j)
break
else:
exclude_filtered += [j]
return exclude_filtered
def main():
""" Main function. """
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description="Run Java API Compliance Checker.")
parser.add_argument("-f", "--force-download",
action="store_true",
help="Download dependencies (i.e. Java JAVA_ACC) "
"even if they are already present")
parser.add_argument("-i", "--include-file",
action="append",
dest="include_files",
help="Regex filter for JAR files to be included. "
"Applied before the exclude filters. "
"Can be specified multiple times.")
parser.add_argument("-e", "--exclude-file",
action="append",
dest="exclude_files",
help="Regex filter for JAR files to be excluded. "
"Applied after the include filters. "
"Can be specified multiple times.")
parser.add_argument("-a", "--annotation",
action="append",
dest="annotations",
help="Fully-qualified Java annotation. "
"Java ACC will only check compatibility of "
"annotated classes. Can be specified multiple times.")
parser.add_argument("--skip-annotation",
action="append",
dest="skip_annotations",
help="Fully-qualified Java annotation. "
"Java ACC will not check compatibility of "
"these annotated classes. Can be specified multiple "
"times.")
parser.add_argument("-p", "--known_problems_path",
default=None, dest="known_problems_path",
help="Path to file with json 'known_problems "
"dictionary.' Path can be relative or absolute. An "
"examples file can be seen in the pydocs for the "
"get_known_problems method.")
parser.add_argument("--skip-clean",
action="store_true",
help="Skip cleaning the scratch directory.")
parser.add_argument("--compare-warnings", dest="compare_warnings",
action="store_true", default=False,
help="Compare warnings as well as problems.")
parser.add_argument("--skip-build",
action="store_true",
help="Skip building the projects.")
parser.add_argument("--verbose",
action="store_true",
help="more output")
parser.add_argument("-r", "--remote", default="origin", dest="remote_name",
help="Name of remote to use. e.g. its repo name will be used as the name "
"we pass to Java ACC for the library.")
parser.add_argument("src_rev", nargs=1, help="Source revision.")
parser.add_argument("dst_rev", nargs="?", default="HEAD",
help="Destination revision. "
"If not specified, will use HEAD.")
args = parser.parse_args()
src_rev, dst_rev = args.src_rev[0], args.dst_rev
logging.info("Source revision: %s", src_rev)
logging.info("Destination revision: %s", dst_rev)
# Configure the expected numbers
known_problems = get_known_problems(
args.known_problems_path, src_rev, dst_rev)
# Construct the JAR regex patterns for filtering.
include_filters = []
if args.include_files is not None:
for f in args.include_files:
logging.info("Applying JAR filename include filter: %s", f)
include_filters += [re.compile(f)]
else:
include_filters = [re.compile(".*")]
exclude_filters = []
if args.exclude_files is not None:
for f in args.exclude_files:
logging.info("Applying JAR filename exclude filter: %s", f)
exclude_filters += [re.compile(f)]
# Construct the annotation list
if args.annotations is not None:
logging.info("Filtering classes using %d annotation(s):",
len(args.annotations))
for a in args.annotations:
logging.info("\t%s", a)
skip_annotations = args.skip_annotations
if skip_annotations is not None:
logging.info("Skipping classes with %d annotation(s):",
len(skip_annotations))
for a in skip_annotations:
logging.info("\t%s", a)
# Download deps.
checkout_java_acc(args.force_download)
log_java_acc_version()
# Set up the build.
scratch_dir = get_scratch_dir()
src_dir = os.path.join(scratch_dir, "src")
dst_dir = os.path.join(scratch_dir, "dst")
if args.skip_clean:
logging.info("Skipping cleaning the scratch directory")
else:
clean_scratch_dir(scratch_dir)
# Check out the src and dst source trees.
checkout_java_tree(get_git_hash(src_rev), src_dir)
checkout_java_tree(get_git_hash(dst_rev), dst_dir)
# Run the build in each.
if args.skip_build:
logging.info("Skipping the build")
else:
build_tree(src_dir, args.verbose)
build_tree(dst_dir, args.verbose)
# Find the JARs.
src_jars = find_jars(src_dir)
dst_jars = find_jars(dst_dir)
# Filter the JARs.
src_jars = filter_jars(src_jars, include_filters, exclude_filters)
dst_jars = filter_jars(dst_jars, include_filters, exclude_filters)
if not src_jars or not dst_jars:
logging.error("No JARs found! Are your filters too strong?")
sys.exit(1)
output = run_java_acc(src_rev, src_jars, dst_rev,
dst_jars, args.annotations, skip_annotations,
get_repo_name(args.remote_name))
sys.exit(compare_results(output, known_problems,
args.compare_warnings))
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2013 Mattias Svala
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dmpayton
# Copyright (c) 2014 dequis
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import math
from .base import Layout
class Matrix(Layout):
"""
This layout divides the screen into a matrix of equally sized cells and
places one window in each cell. The number of columns is configurable and
can also be changed interactively.
"""
defaults = [
("border_focus", "#0000ff", "Border colour for the focused window."),
("border_normal", "#000000", "Border colour for un-focused windows."),
("border_width", 1, "Border width."),
("name", "matrix", "Name of this layout."),
("margin", 0, "Margin of the layout"),
]
def __init__(self, columns=2, **config):
Layout.__init__(self, **config)
self.add_defaults(Matrix.defaults)
self.current_window = None
self.columns = columns
self.clients = []
def info(self):
d = Layout.info(self)
d["rows"] = [
[win.name for win in self.get_row(i)]
for i in range(self.get_num_rows())
]
d["current_window"] = self.current_window
d["clients"] = [x.name for x in self.clients]
return d
def clone(self, group):
c = Layout.clone(self, group)
c.clients = []
return c
def get_current_window(self):
c, r = self.current_window
return self.clients[r * self.columns + c]
def get_num_rows(self):
return int(math.ceil(len(self.clients) / self.columns))
def get_row(self, row):
assert row < self.get_num_rows()
return self.clients[
row * self.columns: row * self.columns + self.columns
]
def get_column(self, column):
assert column < self.columns
return [
self.clients[i]
for i in range(column, len(self.clients), self.columns)
]
def add(self, client):
self.clients.append(client)
def remove(self, client):
if client not in self.clients:
return
self.clients.remove(client)
def focus(self, client):
if client not in self.clients:
return
idx = self.clients.index(client)
self.current_window = (idx % self.columns, idx // self.columns)
def focus_first(self):
if self.clients:
return self.clients[0]
def focus_last(self):
if self.clients:
return self.clients[-1]
def focus_next(self, window):
if not self.clients:
return
idx = self.clients.index(window)
if idx + 1 < len(self.clients):
return self.clients[idx + 1]
def focus_previous(self, window):
if not self.clients:
return
idx = self.clients.index(window)
if idx > 0:
return self.clients[idx - 1]
def configure(self, client, screen):
if client not in self.clients:
return
idx = self.clients.index(client)
column = idx % self.columns
row = idx // self.columns
column_size = int(math.ceil(len(self.clients) / self.columns))
if client.has_focus:
px = self.group.qtile.colorPixel(self.border_focus)
else:
px = self.group.qtile.colorPixel(self.border_normal)
column_width = int(screen.width / float(self.columns))
row_height = int(screen.height / float(column_size))
xoffset = screen.x + column * column_width
yoffset = screen.y + row * row_height
win_width = column_width - 2 * self.border_width
win_height = row_height - 2 * self.border_width
client.place(
xoffset,
yoffset,
win_width,
win_height,
self.border_width,
px,
margin=self.margin,
)
client.unhide()
def cmd_next(self):
client = self.focus_next(self.get_current_window()) or \
self.focus_first()
self.group.focus(client)
def cmd_previous(self):
client = self.focus_previous(self.get_current_window()) or \
self.focus_last()
self.group.focus(client)
def cmd_left(self):
"""Switch to the next window on current row"""
column, row = self.current_window
self.current_window = ((column - 1) % len(self.get_row(row)), row)
self.group.focus(self.get_current_window())
def cmd_right(self):
"""Switch to the next window on current row"""
column, row = self.current_window
self.current_window = ((column + 1) % len(self.get_row(row)), row)
self.group.focus(self.get_current_window())
def cmd_down(self):
"""Switch to the next window in current column"""
column, row = self.current_window
self.current_window = (
column,
(row + 1) % len(self.get_column(column))
)
self.group.focus(self.get_current_window())
def cmd_up(self):
"""Switch to the previous window in current column"""
column, row = self.current_window
self.current_window = (
column,
(row - 1) % len(self.get_column(column))
)
self.group.focus(self.get_current_window())
def cmd_delete(self):
"""Decrease number of columns"""
self.columns -= 1
self.group.layoutAll()
def cmd_add(self):
"""Increase number of columns"""
self.columns += 1
self.group.layoutAll()
|
|
from functools import partial
from django.db import connections, models, router
from django.db.models.deletion import Collector
import bleach
import olympia.core.logger
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.amo.urlresolvers import linkify_bounce_url_callback
from . import utils
log = olympia.core.logger.getLogger('z.translations')
class TranslationManager(ManagerBase):
def remove_for(self, obj, locale):
"""Remove a locale for the given object."""
ids = [getattr(obj, f.attname) for f in obj._meta.translated_fields]
qs = Translation.objects.filter(id__in=filter(None, ids),
locale=locale)
qs.update(localized_string=None, localized_string_clean=None)
class Translation(ModelBase):
"""
Translation model.
Use :class:`translations.fields.TranslatedField` instead of a plain foreign
key to this model.
"""
autoid = PositiveAutoField(primary_key=True)
id = models.PositiveIntegerField()
locale = models.CharField(max_length=10)
localized_string = models.TextField(null=True)
localized_string_clean = models.TextField(null=True)
objects = TranslationManager()
class Meta:
db_table = 'translations'
unique_together = ('id', 'locale')
def __unicode__(self):
return self.localized_string and unicode(self.localized_string) or ''
def __nonzero__(self):
# __nonzero__ is called to evaluate an object in a boolean context. We
# want Translations to be falsy if their string is empty.
return (bool(self.localized_string) and
bool(self.localized_string.strip()))
def __eq__(self, other):
# Django implements an __eq__ that only checks pks. We need to check
# the strings if we're dealing with existing vs. unsaved Translations.
return self.__cmp__(other) == 0
def __cmp__(self, other):
if hasattr(other, 'localized_string'):
return cmp(self.localized_string, other.localized_string)
else:
return cmp(self.localized_string, other)
def clean(self):
if self.localized_string:
self.localized_string = self.localized_string.strip()
def save(self, **kwargs):
self.clean()
return super(Translation, self).save(**kwargs)
def delete(self, using=None):
# FIXME: if the Translation is the one used as default/fallback,
# then deleting it will mean the corresponding field on the related
# model will stay empty even if there are translations in other
# languages!
cls = self.__class__
using = using or router.db_for_write(cls, instance=self)
# Look for all translations for the same string (id=self.id) except the
# current one (autoid=self.autoid).
qs = cls.objects.filter(id=self.id).exclude(autoid=self.autoid)
if qs.using(using).exists():
# If other Translations for the same id exist, we just need to
# delete this one and *only* this one, without letting Django
# collect dependencies (it'd remove the others, which we want to
# keep).
assert self._get_pk_val() is not None
collector = Collector(using=using)
collector.collect([self], collect_related=False)
# In addition, because we have FK pointing to a non-unique column,
# we need to force MySQL to ignore constraints because it's dumb
# and would otherwise complain even if there are remaining rows
# that matches the FK.
with connections[using].constraint_checks_disabled():
collector.delete()
else:
# If no other Translations with that id exist, then we should let
# django behave normally. It should find the related model and set
# the FKs to NULL.
return super(Translation, self).delete(using=using)
delete.alters_data = True
@classmethod
def new(cls, string, locale, id=None):
"""
Jumps through all the right hoops to create a new translation.
If ``id`` is not given a new id will be created using
``translations_seq``. Otherwise, the id will be used to add strings to
an existing translation.
To increment IDs we use a setting on MySQL. This is to support multiple
database masters -- it's just crazy enough to work! See bug 756242.
"""
if id is None:
# Get a sequence key for the new translation.
with connections['default'].cursor() as cursor:
cursor.execute("""
UPDATE translations_seq
SET id=LAST_INSERT_ID(
id + @@global.auto_increment_increment
)
""")
# The sequence table should never be empty. But alas, if it is,
# let's fix it.
if not cursor.rowcount > 0:
cursor.execute("""
INSERT INTO translations_seq (id)
VALUES(LAST_INSERT_ID(
id + @@global.auto_increment_increment
))
""")
cursor.execute('SELECT LAST_INSERT_ID()')
id = cursor.fetchone()[0]
# Update if one exists, otherwise create a new one.
q = {'id': id, 'locale': locale}
try:
trans = cls.objects.get(**q)
trans.localized_string = string
except cls.DoesNotExist:
trans = cls(localized_string=string, **q)
return trans
class PurifiedTranslation(Translation):
"""Run the string through bleach to get a safe version."""
allowed_tags = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
allowed_attributes = {
'a': ['href', 'title', 'rel'],
'abbr': ['title'],
'acronym': ['title'],
}
class Meta:
proxy = True
def __unicode__(self):
if not self.localized_string_clean:
self.clean()
return unicode(self.localized_string_clean)
def __html__(self):
return unicode(self)
def __truncate__(self, length, killwords, end):
return utils.truncate(unicode(self), length, killwords, end)
def clean(self):
from olympia.amo.utils import clean_nl
super(PurifiedTranslation, self).clean()
cleaned = self.clean_localized_string()
self.localized_string_clean = clean_nl(cleaned).strip()
def clean_localized_string(self):
# All links (text and markup) are normalized.
linkify_filter = partial(
bleach.linkifier.LinkifyFilter,
callbacks=[linkify_bounce_url_callback, bleach.callbacks.nofollow])
# Keep only the allowed tags and attributes, escape the rest.
cleaner = bleach.Cleaner(
tags=self.allowed_tags, attributes=self.allowed_attributes,
filters=[linkify_filter])
return cleaner.clean(unicode(self.localized_string))
class LinkifiedTranslation(PurifiedTranslation):
"""Run the string through bleach to get a linkified version."""
allowed_tags = ['a']
class Meta:
proxy = True
class NoLinksNoMarkupTranslation(LinkifiedTranslation):
"""Run the string through bleach, escape markup and strip all the links."""
class Meta:
proxy = True
def clean_localized_string(self):
# First pass: bleach everything, but leave links untouched.
cleaned = super(LinkifiedTranslation, self).clean_localized_string()
# Second pass: call linkify to empty the inner text of all links.
emptied_links = bleach.linkify(
cleaned, callbacks=[lambda attrs, new: {'_text': ''}])
# Third pass: now strip links (only links will be stripped, other
# forbidden tags are already bleached/escaped.
allowed_tags = self.allowed_tags[:] # Make a copy.
allowed_tags.remove('a')
return bleach.clean(emptied_links, tags=allowed_tags, strip=True)
class TranslationSequence(models.Model):
"""
The translations_seq table, so migrations will create it during testing.
"""
id = models.IntegerField(primary_key=True)
class Meta:
db_table = 'translations_seq'
def delete_translation(obj, fieldname):
field = obj._meta.get_field(fieldname)
trans_id = getattr(obj, field.attname)
obj.update(**{field.name: None})
if trans_id:
Translation.objects.filter(id=trans_id).delete()
|
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NFS driver module."""
import ddt
import errno
import os
import mock
from oslo_utils import units
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import nfs
from cinder.volume.drivers import remotefs
class DumbVolume(object):
# TODO(eharney): replace this with an autospecced mock class
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class RemoteFsDriverTestCase(test.TestCase):
TEST_FILE_NAME = 'test.txt'
TEST_EXPORT = 'nas-host1:/export'
TEST_MNT_POINT = '/mnt/nas'
def setUp(self):
super(RemoteFsDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSDriver()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self._driver = remotefs.RemoteFSDriver(
configuration=self.configuration)
mock_exc = mock.patch.object(self._driver, '_execute')
self._execute = mock_exc.start()
self.addCleanup(mock_exc.stop)
def test_create_sparsed_file(self):
self._driver._create_sparsed_file('/path', 1)
self._execute.assert_called_once_with('truncate', '-s', '1G',
'/path', run_as_root=True)
def test_create_regular_file(self):
self._driver._create_regular_file('/path', 1)
self._execute.assert_called_once_with('dd', 'if=/dev/zero',
'of=/path', 'bs=1M',
'count=1024', run_as_root=True)
def test_create_qcow2_file(self):
file_size = 1
self._driver._create_qcow2_file('/path', file_size)
self._execute.assert_called_once_with('qemu-img', 'create', '-f',
'qcow2', '-o',
'preallocation=metadata',
'/path', '%s' %
str(file_size * units.Gi),
run_as_root=True)
def test_set_rw_permissions_for_all(self):
self._driver._set_rw_permissions_for_all('/path')
self._execute.assert_called_once_with('chmod', 'ugo+rw', '/path',
run_as_root=True)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_with_secure_file_permissions(self, LOG):
self._driver._mounted_shares = [self.TEST_EXPORT]
self.configuration.nas_secure_file_permissions = 'true'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertFalse(LOG.warning.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
self.configuration.nas_secure_file_permissions = 'false'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertTrue(LOG.warning.called)
warn_msg = "%(path)s is being set with open permissions: %(perm)s"
LOG.warning.assert_called_once_with(
warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'})
@mock.patch('os.path.join')
@mock.patch('os.path.isfile', return_value=False)
def test_determine_nas_security_options_when_auto_and_new_install(
self,
mock_isfile,
mock_join):
"""Test the setting of the NAS Security Option
In this test case, we will create the marker file. No pre-exxisting
Cinder volumes found during bootup.
"""
self._driver._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = True
self._driver._ensure_shares_mounted = mock.Mock()
nas_mount = self._driver._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
mock_join.return_value = file_path
secure_file_permissions = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_new_install_exists(
self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file already exists. Cinder volumes
found during bootup.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = True
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_old_install(self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file does not exist. There are also
pre-existing Cinder volumes.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = False
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
def test_determine_nas_security_options_when_admin_set_true(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'true'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
def test_determine_nas_security_options_when_admin_set_false(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'false'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
@mock.patch.object(remotefs, 'LOG')
def test_set_nas_security_options(self, LOG):
"""Test setting of NAS Security options.
The RemoteFS driver will force set options to false. The derived
objects will provide an inherited interface to properly set options.
"""
drv = self._driver
is_new_install = False
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warning.called)
def test_secure_file_operations_enabled_true(self):
"""Test nas_secure_file_operations = 'true'
Networked file system based drivers may support secure file
operations. This test verifies the settings when secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'true'
ret_flag = drv.secure_file_operations_enabled()
self.assertTrue(ret_flag)
def test_secure_file_operations_enabled_false(self):
"""Test nas_secure_file_operations = 'false'
Networked file system based drivers may support secure file
operations. This test verifies the settings when not secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'false'
ret_flag = drv.secure_file_operations_enabled()
self.assertFalse(ret_flag)
@ddt.ddt
class NfsDriverTestCase(test.TestCase):
"""Test case for NFS driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_NFS_EXPORT2_OPTIONS = '-o intr'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this'
TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo'
def setUp(self):
super(NfsDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self.configuration.nfs_shares_config = None
self.configuration.nfs_sparsed_volumes = True
self.configuration.nfs_reserved_percentage = 5.0
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.configuration.nfs_mount_options = None
self.configuration.nfs_mount_attempts = 3
self.configuration.nfs_qcow2_volumes = False
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.nas_ip = None
self.configuration.nas_share_path = None
self.configuration.nas_mount_options = None
self.configuration.volume_dd_blocksize = '1M'
self._driver = nfs.NfsDriver(configuration=self.configuration)
self._driver.shares = {}
mock_exc = mock.patch.object(self._driver, '_execute')
self._execute = mock_exc.start()
self.addCleanup(mock_exc.stop)
def test_local_path(self):
"""local_path common use case."""
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
volume = DumbVolume()
volume['provider_location'] = self.TEST_NFS_EXPORT1
volume['name'] = 'volume-123'
self.assertEqual(
'/mnt/test/2f4f60214cf43c595666dd815f0360a4/volume-123',
drv.local_path(volume))
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch.object(image_utils, 'resize_image')
@mock.patch.object(image_utils, 'fetch_to_raw')
def test_copy_image_to_volume(self, mock_fetch, mock_resize, mock_qemu):
"""resize_image common case usage."""
drv = self._driver
TEST_IMG_SOURCE = 'foo.img'
volume = {'size': self.TEST_SIZE_IN_GB, 'name': TEST_IMG_SOURCE}
with mock.patch.object(drv, 'local_path',
return_value=TEST_IMG_SOURCE):
data = mock.Mock()
data.virtual_size = 1 * units.Gi
mock_qemu.return_value = data
drv.copy_image_to_volume(None, volume, None, None)
mock_fetch.assert_called_once_with(
None, None, None, TEST_IMG_SOURCE, mock.ANY, run_as_root=True,
size=self.TEST_SIZE_IN_GB)
mock_resize.assert_called_once_with(TEST_IMG_SOURCE,
self.TEST_SIZE_IN_GB,
run_as_root=True)
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should calculate correct value."""
drv = self._driver
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_mount_point_for_share_given_extra_slash_in_state_path(self):
"""_get_mount_point_for_share should calculate correct value."""
# This test gets called with the extra slash
self.configuration.nfs_mount_point_base = (
self.TEST_MNT_POINT_BASE_EXTRA_SLASH)
# The driver gets called with the correct configuration and removes
# the extra slash
drv = nfs.NfsDriver(configuration=self.configuration)
self.assertEqual('/opt/stack/data/cinder/mnt', drv.base)
self.assertEqual(
'/opt/stack/data/cinder/mnt/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_capacity_info(self):
"""_get_capacity_info should calculate correct value."""
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT
self._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(self.TEST_NFS_EXPORT1))
mock_get_mount.assert_called_once_with(self.TEST_NFS_EXPORT1)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT, run_as_root=True)]
self._execute.assert_has_calls(calls)
def test_get_capacity_info_for_share_and_mount_point_with_spaces(self):
"""_get_capacity_info should calculate correct value."""
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT_SPACES
self._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(
self.TEST_NFS_EXPORT_SPACES))
mock_get_mount.assert_called_once_with(
self.TEST_NFS_EXPORT_SPACES)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT_SPACES, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT_SPACES, run_as_root=True)]
self._execute.assert_has_calls(calls)
def test_load_shares_config(self):
drv = self._driver
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
config_data.append('#' + self.TEST_NFS_EXPORT2)
config_data.append('')
config_data.append(self.TEST_NFS_EXPORT2 + ' ' +
self.TEST_NFS_EXPORT2_OPTIONS)
config_data.append('broken:share_format')
mock_read_config.return_value = config_data
drv._load_shares_config(drv.configuration.nfs_shares_config)
mock_read_config.assert_called_once_with(
self.TEST_SHARES_CONFIG_FILE)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertIn(self.TEST_NFS_EXPORT2, drv.shares)
self.assertEqual(2, len(drv.shares))
self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS,
drv.shares[self.TEST_NFS_EXPORT2])
def test_load_shares_config_nas_opts(self):
drv = self._driver
drv.configuration.nas_ip = self.TEST_NFS_HOST
drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
drv._load_shares_config(drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertEqual(1, len(drv.shares))
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value = config_data
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
mock_ensure.assert_called_once_with(self.TEST_NFS_EXPORT1)
@mock.patch.object(remotefs, 'LOG')
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self,
LOG):
"""_ensure_shares_mounted should not save share if failed to mount."""
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value = config_data
drv._ensure_share_mounted()
self.assertEqual(0, len(drv._mounted_shares))
mock_ensure.assert_called_once_with()
def test_find_share_should_throw_error_if_there_is_no_mounted_share(self):
"""_find_share should throw error if there is no mounted shares."""
drv = self._driver
drv._mounted_shares = []
self.assertRaises(exception.NfsNoSharesMounted, drv._find_share,
self.TEST_SIZE_IN_GB)
def test_find_share(self):
"""_find_share simple use case."""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(5 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(10 * units.Gi, 3 * units.Gi, 1 * units.Gi)]
self.assertEqual(self.TEST_NFS_EXPORT2,
drv._find_share(self.TEST_SIZE_IN_GB))
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertEqual(2, mock_get_capacity_info.call_count)
def test_find_share_should_throw_error_if_there_is_not_enough_space(self):
"""_find_share should throw error if there is no share to host vol."""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(5 * units.Gi, 0, 5 * units.Gi),
(10 * units.Gi, 0, 10 * units.Gi)]
self.assertRaises(exception.NfsNoSuitableShareFound,
drv._find_share, self.TEST_SIZE_IN_GB)
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertEqual(2, mock_get_capacity_info.call_count)
def _simple_volume(self):
volume = DumbVolume()
volume['provider_location'] = '127.0.0.1:/mnt'
volume['name'] = 'volume_name'
volume['size'] = 10
return volume
def test_create_sparsed_volume(self):
drv = self._driver
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', True)
with mock.patch.object(
drv, '_create_sparsed_file') as mock_create_sparsed_file:
with mock.patch.object(
drv, '_set_rw_permissions') as mock_set_rw_permissions:
drv._do_create_volume(volume)
mock_create_sparsed_file.assert_called_once_with(mock.ANY,
mock.ANY)
mock_set_rw_permissions.assert_called_once_with(mock.ANY)
def test_create_nonsparsed_volume(self):
drv = self._driver
self.configuration.nfs_sparsed_volumes = False
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', False)
with mock.patch.object(
drv, '_create_regular_file') as mock_create_regular_file:
with mock.patch.object(
drv, '_set_rw_permissions') as mock_set_rw_permissions:
drv._do_create_volume(volume)
mock_create_regular_file.assert_called_once_with(mock.ANY,
mock.ANY)
mock_set_rw_permissions.assert_called_once_with(mock.ANY)
@mock.patch.object(nfs, 'LOG')
def test_create_volume_should_ensure_nfs_mounted(self, mock_log):
"""create_volume ensures shares provided in config are mounted."""
drv = self._driver
drv._find_share = mock.Mock()
drv._do_create_volume = mock.Mock()
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure_share:
drv._ensure_share_mounted()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
drv.create_volume(volume)
mock_ensure_share.assert_called_once_with()
@mock.patch.object(nfs, 'LOG')
def test_create_volume_should_return_provider_location(self, mock_log):
"""create_volume should return provider_location with found share."""
drv = self._driver
drv._ensure_shares_mounted = mock.Mock()
drv._do_create_volume = mock.Mock()
with mock.patch.object(drv, '_find_share') as mock_find_share:
mock_find_share.return_value = self.TEST_NFS_EXPORT1
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
result = drv.create_volume(volume)
self.assertEqual(self.TEST_NFS_EXPORT1,
result['provider_location'])
mock_find_share.assert_called_once_with(self.TEST_SIZE_IN_GB)
def test_delete_volume(self):
"""delete_volume simple test case."""
drv = self._driver
drv._ensure_share_mounted = mock.Mock()
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = self.TEST_NFS_EXPORT1
with mock.patch.object(drv, 'local_path') as mock_local_path:
mock_local_path.return_value = self.TEST_LOCAL_PATH
drv.delete_volume(volume)
mock_local_path.assert_called_once_with(volume)
self._execute.assert_called_once_with('rm', '-f',
self.TEST_LOCAL_PATH,
run_as_root=True)
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted."""
drv = self._driver
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = self.TEST_NFS_EXPORT1
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure_share:
drv.delete_volume(volume)
mock_ensure_share.assert_called_once_with(self.TEST_NFS_EXPORT1)
def test_delete_should_not_delete_if_provider_location_not_provided(self):
"""delete_volume shouldn't delete if provider_location missed."""
drv = self._driver
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = None
with mock.patch.object(drv, '_ensure_share_mounted'):
drv.delete_volume(volume)
self.assertFalse(self._execute.called)
def test_get_volume_stats(self):
"""get_volume_stats must fill the correct values."""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_ensure_shares_mounted') as mock_ensure_share:
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(10 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(20 * units.Gi, 3 * units.Gi, 3 * units.Gi)]
drv._ensure_shares_mounted()
drv.get_volume_stats()
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertTrue(mock_ensure_share.called)
self.assertEqual(30.0, drv._stats['total_capacity_gb'])
self.assertEqual(5.0, drv._stats['free_capacity_gb'])
self.assertEqual(5, drv._stats['reserved_percentage'])
self.assertTrue(drv._stats['sparse_copy_volume'])
def test_get_volume_stats_with_non_zero_reserved_percentage(self):
"""get_volume_stats must fill the correct values."""
self.configuration.reserved_percentage = 10.0
drv = nfs.NfsDriver(configuration=self.configuration)
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_ensure_shares_mounted') as mock_ensure_share:
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(10 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(20 * units.Gi, 3 * units.Gi, 3 * units.Gi)]
drv._ensure_shares_mounted()
drv.get_volume_stats()
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertTrue(mock_ensure_share.called)
self.assertEqual(30.0, drv._stats['total_capacity_gb'])
self.assertEqual(5.0, drv._stats['free_capacity_gb'])
self.assertEqual(10.0, drv._stats['reserved_percentage'])
@ddt.data(True, False)
def test_update_volume_stats(self, thin):
self._driver.configuration.max_over_subscription_ratio = 20.0
self._driver.configuration.reserved_percentage = 5.0
self._driver.configuration.nfs_sparsed_volumes = thin
remotefs_volume_stats = {
'volume_backend_name': 'fake_backend_name',
'vendor_name': 'fake_vendor',
'driver_version': 'fake_version',
'storage_protocol': 'NFS',
'total_capacity_gb': 100.0,
'free_capacity_gb': 20.0,
'reserved_percentage': 5.0,
'QoS_support': False,
}
self.mock_object(remotefs.RemoteFSDriver, '_update_volume_stats')
self._driver._stats = remotefs_volume_stats
mock_get_provisioned_capacity = self.mock_object(
self._driver, '_get_provisioned_capacity',
mock.Mock(return_value=25.0))
self._driver._update_volume_stats()
nfs_added_volume_stats = {
'provisioned_capacity_gb': 25.0 if thin else 80.0,
'max_over_subscription_ratio': 20.0,
'reserved_percentage': 5.0,
'thin_provisioning_support': thin,
'thick_provisioning_support': not thin,
}
expected = remotefs_volume_stats
expected.update(nfs_added_volume_stats)
self.assertEqual(expected, self._driver._stats)
self.assertEqual(thin, mock_get_provisioned_capacity.called)
def _check_is_share_eligible(self, total_size, total_available,
total_allocated, requested_volume_size):
with mock.patch.object(self._driver, '_get_capacity_info')\
as mock_get_capacity_info:
mock_get_capacity_info.return_value = (total_size,
total_available,
total_allocated)
return self._driver._is_share_eligible('fake_share',
requested_volume_size)
def test_is_share_eligible(self):
total_size = 100.0 * units.Gi
total_available = 90.0 * units.Gi
total_allocated = 10.0 * units.Gi
requested_volume_size = 1 # GiB
self.assertTrue(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_share_eligibility_with_reserved_percentage(self):
total_size = 100.0 * units.Gi
total_available = 4.0 * units.Gi
total_allocated = 96.0 * units.Gi
requested_volume_size = 1 # GiB
# Check used > used_ratio statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_above_oversub_ratio(self):
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 90.0 * units.Gi
requested_volume_size = 10 # GiB
# Check apparent_available <= requested_volume_size statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_reserved_space_above_oversub_ratio(self):
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 100.0 * units.Gi
requested_volume_size = 1 # GiB
# Check total_allocated / total_size >= oversub_ratio
# statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_extend_volume(self):
"""Extend a volume by 1."""
drv = self._driver
volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1,
'provider_location': 'nfs_share'}
path = 'path'
newSize = volume['size'] + 1
with mock.patch.object(image_utils, 'resize_image') as resize:
with mock.patch.object(drv, 'local_path', return_value=path):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=True):
drv.extend_volume(volume, newSize)
resize.assert_called_once_with(path, newSize,
run_as_root=True)
def test_extend_volume_failure(self):
"""Error during extend operation."""
drv = self._driver
volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1,
'provider_location': 'nfs_share'}
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_extend_volume_insufficient_space(self):
"""Insufficient space on nfs_share during extend operation."""
drv = self._driver
volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1,
'provider_location': 'nfs_share'}
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=False):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_is_file_size_equal(self):
"""File sizes are equal."""
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = size * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertTrue(drv._is_file_size_equal(path, size))
def test_is_file_size_equal_false(self):
"""File sizes are not equal."""
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = (size + 1) * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertFalse(drv._is_file_size_equal(path, size))
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_true(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = True
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='true')
drv.set_nas_security_options(is_new_install)
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
self.assertFalse(LOG.warning.called)
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_false(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='false')
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warning.called)
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
"""Ensure proper exception is raised if there are no mounted shares."""
drv = self._driver
drv._ensure_shares_mounted = mock.Mock()
drv._mounted_shares = []
is_new_cinder_install = 'does not matter'
self.assertRaises(exception.NfsNoSharesMounted,
drv.set_nas_security_options,
is_new_cinder_install)
def test_ensure_share_mounted(self):
"""Case where the mount works the first time."""
self.mock_object(self._driver._remotefsclient, 'mount')
drv = self._driver
drv.configuration.nfs_mount_attempts = 3
drv.shares = {self.TEST_NFS_EXPORT1: ''}
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
drv._remotefsclient.mount.called_once()
@mock.patch('time.sleep')
def test_ensure_share_mounted_exception(self, _mock_sleep):
"""Make the configured number of attempts when mounts fail."""
num_attempts = 3
self.mock_object(self._driver._remotefsclient, 'mount',
mock.Mock(side_effect=Exception))
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(num_attempts, drv._remotefsclient.mount.call_count)
def test_ensure_share_mounted_at_least_one_attempt(self):
"""Make at least one mount attempt even if configured for less."""
min_num_attempts = 1
num_attempts = 0
self.mock_object(self._driver._remotefsclient, 'mount',
mock.Mock(side_effect=Exception))
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(min_num_attempts,
drv._remotefsclient.mount.call_count)
class NfsDriverDoSetupTestCase(test.TestCase):
def setUp(self):
super(NfsDriverDoSetupTestCase, self).setUp()
self.context = mock.Mock()
self.create_configuration()
def create_configuration(self):
config = conf.Configuration(None)
config.append_config_values(nfs.nfs_opts)
self.configuration = config
def test_setup_should_throw_error_if_shares_config_not_configured(self):
"""do_setup should throw error if shares config is not configured."""
self.override_config('nfs_shares_config', None)
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
with self.assertRaisesRegex(exception.NfsException,
".*no NFS config file configured.*"):
drv.do_setup(self.context)
self.assertEqual(0, mock_os_path_exists.call_count)
def test_setup_should_throw_error_if_shares_file_does_not_exist(self):
"""do_setup should throw error if shares file does not exist."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.side_effect = OSError(
errno.ENOENT, 'No such file or directory.')
with self.assertRaisesRegex(exception.NfsException,
'mount.nfs is not installed'):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
mock_execute.assert_has_calls(
[mock.call('mount.nfs',
check_exit_code=False,
run_as_root=True)])
def test_setup_should_throw_exception_if_mount_nfs_command_fails(self):
"""do_setup should throw error if mount.nfs fails with OSError
This test covers the OSError path when mount.nfs is installed.
"""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.side_effect = OSError(
errno.EPERM, 'Operation... BROKEN')
with self.assertRaisesRegex(OSError, '.*Operation... BROKEN'):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
mock_execute.assert_has_calls(
[mock.call('mount.nfs',
check_exit_code=False,
run_as_root=True)])
@mock.patch.object(os, 'rename')
def test_update_migrated_available_volume(self, rename_volume):
self._test_update_migrated_volume('available', rename_volume)
@mock.patch.object(os, 'rename')
def test_update_migrated_available_volume_rename_fail(self, rename_volume):
self._test_update_migrated_volume('available', rename_volume,
rename_exception=True)
@mock.patch.object(os, 'rename')
def test_update_migrated_in_use_volume(self, rename_volume):
self._test_update_migrated_volume('in-use', rename_volume)
def _test_update_migrated_volume(self, volume_status, rename_volume,
rename_exception=False):
drv = nfs.NfsDriver(configuration=self.configuration)
fake_volume_id = 'vol1'
fake_new_volume_id = 'vol2'
fake_provider_source = 'fake_provider_source'
fake_provider = 'fake_provider'
base_dir = '/dir_base/'
volume_name_template = 'volume-%s'
original_volume_name = volume_name_template % fake_volume_id
current_name = volume_name_template % fake_new_volume_id
original_volume_path = base_dir + original_volume_name
current_path = base_dir + current_name
fake_volume = {'size': 1, 'id': fake_volume_id,
'provider_location': fake_provider_source,
'_name_id': None}
fake_new_volume = {'size': 1, 'id': fake_new_volume_id,
'provider_location': fake_provider,
'_name_id': None}
with mock.patch.object(drv, 'local_path') as local_path:
local_path.return_value = base_dir + current_name
if volume_status == 'in-use':
update = drv.update_migrated_volume(self.context,
fake_volume,
fake_new_volume,
volume_status)
self.assertEqual({'_name_id': fake_new_volume_id,
'provider_location': fake_provider}, update)
elif rename_exception:
rename_volume.side_effect = OSError
update = drv.update_migrated_volume(self.context,
fake_volume,
fake_new_volume,
volume_status)
rename_volume.assert_called_once_with(current_path,
original_volume_path)
self.assertEqual({'_name_id': fake_new_volume_id,
'provider_location': fake_provider}, update)
else:
update = drv.update_migrated_volume(self.context,
fake_volume,
fake_new_volume,
volume_status)
rename_volume.assert_called_once_with(current_path,
original_volume_path)
self.assertEqual({'_name_id': None,
'provider_location': fake_provider}, update)
def test_retype_is_there(self):
"Ensure that driver.retype() is there."""
drv = nfs.NfsDriver(configuration=self.configuration)
v1 = DumbVolume()
ret = drv.retype(self.context,
v1,
mock.sentinel.new_type,
mock.sentinel.diff,
mock.sentinel.host)
self.assertEqual((False, None), ret)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from oslo.config import cfg
from cinder.brick.remotefs import remotefs
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
from cinder import units
from cinder import utils
from cinder.volume import driver
VERSION = '1.1.0'
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('nfs_shares_config',
default='/etc/cinder/nfs_shares',
help='File with the list of available nfs shares'),
cfg.BoolOpt('nfs_sparsed_volumes',
default=True,
help=('Create volumes as sparsed files which take no space.'
'If set to False volume is created as regular file.'
'In such case volume creation takes a lot of time.')),
cfg.FloatOpt('nfs_used_ratio',
default=0.95,
help=('Percent of ACTUAL usage of the underlying volume '
'before no new volumes can be allocated to the volume '
'destination.')),
cfg.FloatOpt('nfs_oversub_ratio',
default=1.0,
help=('This will compare the allocated to available space on '
'the volume destination. If the ratio exceeds this '
'number, the destination will no longer be valid.')),
cfg.StrOpt('nfs_mount_point_base',
default='$state_path/mnt',
help=('Base dir containing mount points for nfs shares.')),
cfg.StrOpt('nfs_mount_options',
default=None,
help=('Mount options passed to the nfs client. See section '
'of the nfs man page for details.')),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class RemoteFsDriver(driver.VolumeDriver):
"""Common base for drivers that work like NFS."""
VERSION = "0.0.0"
def __init__(self, *args, **kwargs):
super(RemoteFsDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume['provider_location'],
'name': volume['name']}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data
}
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions_for_all(volume_path)
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and tries to mount them
locally.
"""
self._mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
LOG.warning(_('Exception during mounting %s') % (exc,))
LOG.debug('Available shares %s' % str(self._mounted_shares))
def create_cloned_volume(self, volume, src_vref):
raise NotImplementedError()
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warn(_('Volume %s does not have provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
mounted_path = self.local_path(volume)
self._execute('rm', '-f', mounted_path, run_as_root=True)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume):
"""Exports the volume. Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _create_sparsed_file(self, path, size):
"""Creates file with 0 disk usage."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=True)
def _create_regular_file(self, path, size):
"""Creates regular file of given size. Takes a lot of time for large
files.
"""
block_size_mb = 1
block_count = size * units.GiB / (block_size_mb * units.MiB)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=True)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.GiB),
run_as_root=True)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path, run_as_root=True)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume
:param volume: volume reference
"""
nfs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume['name'])
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume))
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume['size'])
data = image_utils.qemu_img_info(self.local_path(volume))
virt_size = data.virtual_size / units.GiB
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file):
self.shares = {}
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip().decode('unicode_escape')
share_opts = share_info[1].strip() if len(share_info) > 1 else None
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, update the stats first.
"""
if refresh or not self._stats:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.GiB)
data['free_capacity_gb'] = global_free / float(units.GiB)
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
def _do_mount(self, cmd, ensure, share):
"""Finalize mount command.
:param cmd: command to do the actual mount
:param ensure: boolean to allow remounting a share with a warning
:param share: description of the share for error reporting
"""
try:
self._execute(*cmd, run_as_root=True)
except putils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr:
LOG.warn(_("%s is already mounted"), share)
else:
raise
def _get_capacity_info(self, nfs_share):
raise NotImplementedError()
def _find_share(self, volume_size_in_gib):
raise NotImplementedError()
def _ensure_share_mounted(self, nfs_share):
raise NotImplementedError()
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
raise NotImplementedError()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
raise NotImplementedError()
class NfsDriver(RemoteFsDriver):
"""NFS based cinder driver. Creates file on NFS share for using it
as block device on hypervisor.
"""
driver_volume_type = 'nfs'
driver_prefix = 'nfs'
volume_backend_name = 'Generic_NFS'
VERSION = VERSION
def __init__(self, execute=putils.execute, *args, **kwargs):
self._remotefsclient = None
super(NfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
root_helper = utils.get_root_helper()
base = getattr(self.configuration,
'nfs_mount_point_base',
CONF.nfs_mount_point_base)
opts = getattr(self.configuration,
'nfs_mount_options',
CONF.nfs_mount_options)
self._remotefsclient = remotefs.RemoteFsClient(
'nfs', root_helper, execute=execute,
nfs_mount_point_base=base,
nfs_mount_options=opts)
def set_execute(self, execute):
super(NfsDriver, self).set_execute(execute)
if self._remotefsclient:
self._remotefsclient.set_execute(execute)
def do_setup(self, context):
"""Any initialization the volume driver does while starting"""
super(NfsDriver, self).do_setup(context)
config = self.configuration.nfs_shares_config
if not config:
msg = (_("There's no NFS config file configured (%s)") %
'nfs_shares_config')
LOG.warn(msg)
raise exception.NfsException(msg)
if not os.path.exists(config):
msg = (_("NFS config file at %(config)s doesn't exist") %
{'config': config})
LOG.warn(msg)
raise exception.NfsException(msg)
if not self.configuration.nfs_oversub_ratio > 0:
msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
"%s") % self.configuration.nfs_oversub_ratio
LOG.error(msg)
raise exception.NfsException(msg)
if ((not self.configuration.nfs_used_ratio > 0) and
(self.configuration.nfs_used_ratio <= 1)):
msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 "
"and <= 1.0: %s") % self.configuration.nfs_used_ratio
LOG.error(msg)
raise exception.NfsException(msg)
self.shares = {} # address : options
# Check if mount.nfs is installed
try:
self._execute('mount.nfs', check_exit_code=False, run_as_root=True)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.NfsException('mount.nfs is not installed')
else:
raise exc
def _ensure_share_mounted(self, nfs_share):
mnt_flags = []
if self.shares.get(nfs_share) is not None:
mnt_flags = self.shares[nfs_share].split()
self._remotefsclient.mount(nfs_share, mnt_flags)
def _find_share(self, volume_size_in_gib):
"""Choose NFS share among available ones for given volume size.
For instances with more than one share that meets the criteria, the
share with the least "allocated" space will be selected.
:param volume_size_in_gib: int size in GB
"""
if not self._mounted_shares:
raise exception.NfsNoSharesMounted()
target_share = None
target_share_reserved = 0
for nfs_share in self._mounted_shares:
if not self._is_share_eligible(nfs_share, volume_size_in_gib):
continue
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
if target_share is not None:
if target_share_reserved > total_allocated:
target_share = nfs_share
target_share_reserved = total_allocated
else:
target_share = nfs_share
target_share_reserved = total_allocated
if target_share is None:
raise exception.NfsNoSuitableShareFound(
volume_size=volume_size_in_gib)
LOG.debug(_('Selected %s as target nfs share.'), target_share)
return target_share
def _is_share_eligible(self, nfs_share, volume_size_in_gib):
"""Verifies NFS share is eligible to host volume with given size.
First validation step: ratio of actual space (used_space / total_space)
is less than 'nfs_used_ratio'. Second validation step: apparent space
allocated (differs from actual space used when using sparse files)
and compares the apparent available
space (total_available * nfs_oversub_ratio) to ensure enough space is
available for the new volume.
:param nfs_share: nfs share
:param volume_size_in_gib: int size in GB
"""
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
requested_volume_size = volume_size_in_gib * units.GiB
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
apparent_size = max(0, total_size * oversub_ratio)
apparent_available = max(0, apparent_size - total_allocated)
used = (total_size - total_available) / total_size
if used > used_ratio:
# NOTE(morganfainberg): We check the used_ratio first since
# with oversubscription it is possible to not have the actual
# available space but be within our oversubscription limit
# therefore allowing this share to still be selected as a valid
# target.
LOG.debug(_('%s is above nfs_used_ratio'), nfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug(_('%s is above nfs_oversub_ratio'), nfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
LOG.debug(_('%s reserved space is above nfs_oversub_ratio'),
nfs_share)
return False
return True
def _get_mount_point_for_share(self, nfs_share):
"""Needed by parent class."""
return self._remotefsclient.get_mount_point(nfs_share)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
mount_point = self._get_mount_point_for_share(nfs_share)
df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point,
run_as_root=True)
block_size, blocks_total, blocks_avail = map(float, df.split())
total_available = block_size * blocks_avail
total_size = block_size * blocks_total
du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude',
'*snapshot*', mount_point, run_as_root=True)
total_allocated = float(du.split()[0])
return total_size, total_available, total_allocated
|
|
import sys
import os
home = os.path.expanduser("~")
sys.path.append(os.path.join(home, 'gnumpy'))
import gnumpy as gp
import numpy as np
#import scipy
import scipy.optimize
#import deepnet
class NeuralNet(object):
'''
Implementation of a Multi-Layer Perception trained by backprop. This class
accepts pre-trained networks for use in a deep neural network. Pre-trained
nets should consist of a list of objects, where each object has a W and hbias
variables containing numpy arrays, n_hidden containing the number of
hidden units, and hidtype containing a string with the activation type, i.e.
"sigmoid".
'''
def __init__(self, network=None, layer_sizes=None, layer_types=None, stream=sys.stdout):
layers = []
if (network != None):
# copy the weights from the given network onto the GPU
for rbm in network:
l = Layer(rbm.W, rbm.hbias, rbm.n_hidden, rbm.hidtype)
layers.append(l)
else:
# if no pre-trained network is given, initialize random weights
assert layer_sizes is not None
assert layer_types is not None
assert len(layer_sizes) == len(layer_types)
# randomize the network weights according to the Bottou proposition
# this is borrowed from the ffnet project:
# http://ffnet.sourceforge.net/_modules/ffnet.html#ffnet.randomweights
n = 0
for i in range(len(layer_sizes)-1):
n += layer_sizes[i]*layer_sizes[i+1]
n += layer_sizes[i+1]
bound = 2.38 / np.sqrt(n)
for i in range(len(layer_sizes)-1):
W = np.zeros((layer_sizes[i+1]*layer_sizes[i],))
for j in range(W.size):
W[j] = np.random.uniform(-bound, bound)
W = W.reshape((layer_sizes[i+1], layer_sizes[i]))
hbias = np.zeros((layer_sizes[i+1],))
for j in range(hbias.size):
hbias[j] = np.random.uniform(-bound, bound)
hbias = hbias.reshape((layer_sizes[i+1],1))
l = Layer(W, hbias, layer_sizes[i+1], layer_types[i+1])
layers.append(l)
self.network = layers
self.stream = stream
def run_through_network(self, data, net=None):
'''
Gets the output of the top layer of the network given input data on the
bottom.
args:
array data: the input data
obj net: the network to use, default is self.network
returns:
array hid: the activation of the top layer
'''
if net is None:
net = self.network
hid = data
for layer in net:
vis = gp.garray(hid)
hid = self.get_activation(layer, vis)
gp.free_reuse_cache()
return hid
def get_activation(self, layer, data):
'''
Gets the activation of a single layer given input data
args:
obj layer: the layer object
array data: the input data
returns:
array hid: the output of the layer
'''
if not hasattr(layer, 'n_hidden'):
layer = layer[0]
hid = np.zeros((data.shape[0], layer.n_hidden))
breaks = range(0, hid.shape[0], 128)
breaks.append(hid.shape[0])
for i in range(len(breaks)-1):
s = breaks[i]
e = breaks[i+1]
act = gp.dot(data[s:e], layer.W.T) + layer.hbias.T
if layer.hidtype == 'sigmoid':
hid[s:e] = (act.logistic()).as_numpy_array()
else:
hid[s:e] = act.as_numpy_array()
return hid
def train(self, network, data, targets, validX=None, validT=None, max_iter=100,
validErrFunc='classification', targetCost='linSquaredErr', initialfit=5,
cg_iter=20):
'''
Trains the network using backprop
args:
list[obj] network: the network
array data: the training data
array targets: the training labels
array validX: the validation data (optional)
array validT: the validation labels (optional)
int max_iter: the maximum number of backprop iterations
string validErrFunc: determines which kind of network to train,
i.e. classification or reconstruction
string targetCost: determines which cost function to use, i.e.
linSquaredErr, crossEntropy, or softMax
linSquaredErr works only for gaussian output units
softmax works only for exp output units (not implemented)
int initialfit: if n>0, top layer only will be trained for n iterations
int cg_iter: the max number of iterations for conjugate gradient
optimization, default=20
'''
# initialize parameteres
self.validErrFunc = validErrFunc
self.targetCost = targetCost
self.n, self.m = data.shape
self.cg_iter = cg_iter
numunits = 0
for i in range(len(self.network)):
numunits = numunits + self.network[i].W.shape[1] + \
self.network[i].hbias.shape[0]
self.numunits = numunits
self.batch_size = 1024
self.weights = np.ones((self.n,1))
# For estimating test error
tindex = np.arange(self.n)
np.random.shuffle(tindex)
tinds = tindex[:(np.min([self.batch_size, self.n]))]
# Perform gradient descent
self.stream.write("Starting {} iterations of backprop.".format(max_iter))
if (initialfit>0):
# This gets the activation of next to last layer to train top layer
transformedX = self.run_through_network(data, network[:-1])
for i in range(max_iter):
trainerr = self.getError(network, data[tinds,:], targets[tinds,:],
self.weights[tinds])
if validX is not None:
validerr = self.getError(network, validX, validT,
np.ones((validX.shape[0],)))
self.stream.write("Iteration %3d: TrainErr = %4.3f, ValidErr = %4.3f" % \
(i+1, trainerr, validerr))
else:
self.stream.write("Iteration %3d: TrainErr = %4.3f" %(i+1, trainerr))
# Train the top layer only for initialfit iters
if (i < initialfit):
toplayer = self.doBackprop(transformedX, targets, [network[-1]])
network[-1] = toplayer[0]
else:
network = self.doBackprop(data, targets, network)
# Print the final training error
trainerr = self.getError(network, data[tinds,:], targets[tinds,:],
self.weights[tinds])
if validX is not None:
validerr = self.getError(network, validX, validT,
np.ones((validX.shape[0],)))
self.stream.write("Final : TrainErr = %4.3f, ValidErr = %4.3f" % \
(trainerr, validerr))
else:
self.stream.write("Final : TrainErr = %4.3f" %(trainerr))
return network
def getError(self, network, X, T, weights):
'''
Calculates the error for either classification or reconstruction during
backprop
args:
list[obj] network: the network to use
X: the input data
T: the input targets
weights: weights used for backprop
This function is designed to be called by the train() method
'''
err = 0
result = self.run_through_network(X, network)
if self.validErrFunc == 'classification':
for i in range(X.shape[0]):
ind = np.argmax(result[i,:])
targ = np.argmax(T[i,:])
if ind != targ:
err = err + weights[i]
else:
for i in range(X.shape[0]):
err = err + np.sqrt(np.sum(np.square(result[i,:]-T[i,:])))*weights[i]
validerr = err / np.sum(weights)
return validerr
def doBackprop(self, data, targets, network):
'''
Executes 1 iteration of backprop
args:
array data: the training data
array targets: the training targets
list[obj] network: the network
This function is designed to be called by the train() method
'''
no_layers = len(network)
index = np.arange(self.n)
np.random.shuffle(index)
nbatches = len(range(0,self.n, self.batch_size))
count = 0
for batch in range(0, self.n, self.batch_size):
if batch + 2*self.batch_size > self.n:
batchend = self.n
else:
batchend = batch + self.batch_size
# Select current batch
tmpX = data[index[batch:batchend],:]
tmpT = targets[index[batch:batchend],:]
tmpW = self.weights[index[batch:batchend],:]
# flatten out the weights and store them in v
v = []
for i in range(no_layers):
w = network[i].W.as_numpy_array()
b = network[i].hbias.as_numpy_array()
v.extend((w.reshape((w.shape[0]*w.shape[1],))).tolist())
v.extend((b.reshape((b.shape[0]*b.shape[1],))).tolist())
v = np.asarray(v)
# Conjugate gradient minimiziation
result = scipy.optimize.minimize(self.backprop_gradient, v,
args=(network, tmpX, tmpT, tmpW),
method='CG', jac=True, options={'maxiter': self.cg_iter})
self.stream.write("batch %d of %d" %(count+1, nbatches))
count += 1
v = result.x
# unflatten v and put new weights back
ind =0
for i in range(no_layers):
h,w = network[i].W.shape
network[i].W = gp.garray((v[ind:(ind+h*w)]).reshape((h,w)))
ind += h*w
b = len(network[i].hbias)
network[i].hbias = gp.garray((v[ind:(ind+b)]).reshape((b,1)))
ind += b
# debugging help
#print "=================="
#print "W 1", network[0].W.shape
#print network[0].W
#print "bias 1", network[0].hbias.shape
#print network[0].hbias
#print "W 2", network[1].W.shape
#print network[1].W
#print "bias 2", network[1].hbias.shape
#print network[1].hbias
#print "=================="
return network
def backprop_gradient(self, v, network, X, targets, weights):
'''
Calculates the value of the cost function and the gradient for CG
optimization.
args:
array v: the 1d vector of weights
list[obj] network: the network
array X: training data
array targets: the training targets
array weights: the backprop weights
returns:
array cost: the value of the cost function
array grad: the value of the gradient
This function is called by scipy's minimize function during optimization
'''
if len(v.shape) == 1:
v = v.reshape((v.shape[0],1))
# initialize variables
n = X.shape[0]
numHiddenLayers = len(network)
# put the v weights back into the network
ind =0
for i in range(numHiddenLayers):
h,w = network[i].W.shape
network[i].W = gp.garray((v[ind:(ind+h*w)]).reshape((h,w)))
ind += h*w
b = network[i].hbias.shape[0]
network[i].hbias = gp.garray(v[ind:(ind+b)]).reshape((b,1))
ind += b
# Run data through the network, keeping activations of each layer
acts = [X] # a list of numpy arrays
hid = X
for layer in network:
vis = gp.garray(hid)
hid = self.get_activation(layer, vis)
acts.append(hid)
gp.free_reuse_cache()
# store the gradients
dW = []
db = []
# Compute the value of the cost function
if self.targetCost == 'crossEntropy':
# see www.stanford.edu/group/pdplab/pdphandbook/handbookch6.html
cost = (-1.0/n) * np.sum(np.sum(targets * np.log(acts[-1]) + \
(1.0 - targets) * np.log(1.0 - acts[-1]), axis=1) * weights.T)
Ix = (acts[-1] - targets) / n
else: #self.targetCost == 'linSquaredErr':
cost = 0.5 * np.sum(np.sum(np.square(acts[-1] - targets), axis=1) * \
weights.T)
Ix = (acts[-1] - targets)
Ix *= np.tile(weights, (1, Ix.shape[1])).reshape((Ix.shape[0],Ix.shape[1]))
Ix = gp.garray(Ix)
# Compute the gradients
for i in range(numHiddenLayers-1,-1,-1):
# augment activations with ones
acts[i] = gp.garray(acts[i])
acts[i] = gp.concatenate((acts[i], gp.ones((n,1))), axis=1)
# compute delta in next layer
delta = gp.dot(acts[i].T, Ix)
# split delta into weights and bias parts
dW.append(delta[:-1,:].T)
db.append(delta[-1,:].T)
# backpropagate the error
if i > 0:
if network[i-1].hidtype == 'sigmoid':
Ix = gp.dot(Ix,gp.concatenate((network[i].W,network[i].hbias),
axis=1)) * acts[i] * (1.0 - acts[i])
elif network[i-1].hidtype == 'gaussian':
Ix = gp.dot(Ix,gp.concatenate((network[i].W,network[i].hbias),
axis=1))
Ix = Ix[:,:-1]
gp.free_reuse_cache()
dW.reverse()
db.reverse()
# Convert gradient information
grad = np.zeros_like(v)
ind = 0
for i in range(numHiddenLayers):
grad[ind:(ind+dW[i].size)] = \
(dW[i].reshape((dW[i].shape[0]*dW[i].shape[1],1))).as_numpy_array()
ind += dW[i].size
grad[ind:(ind+db[i].size),0] = db[i].as_numpy_array()
ind += db[i].size
grad = grad.reshape((grad.shape[0],))
return cost, grad
class Layer(object):
'''
A hidden layer object
args:
array W: the weight array
array hbias: the bias weights
int n_hidden: the number of hidden units
string hidtype: the activation function "sigmoid" or "gaussian"
'''
def __init__(self, W, hbias, n_hidden, hidtype):
self.W = gp.garray(W)
# convert 1d arrays to 2d
if len(hbias.shape) == 1:
hbias = hbias.reshape((hbias.shape[0],1))
self.hbias = gp.garray(hbias)
self.n_hidden = n_hidden
self.hidtype = hidtype
def demo_xor(stream=sys.stdout):
'''Demonstration of backprop with classic XOR example
'''
#stream = es.EmittingStream()
data = np.array([[0.,0.],[0.,1.],[1.,0.],[1.,1.]])
targets = np.array([[0.],[1.],[1.],[0.]])
nn = NeuralNet(layer_sizes=[2,2,1], layer_types=['sigmoid','sigmoid','sigmoid'], stream=stream)
stream.write("initial parameters")
stream.write("==================")
stream.write("W 1 {}".format(nn.network[0].W.shape))
stream.write("{}".format(nn.network[0].W))
stream.write("bias 1 {}".format(nn.network[0].hbias.shape))
stream.write("{}".format(nn.network[0].hbias))
stream.write("W 2 {}".format(nn.network[1].W.shape))
stream.write("{}".format(nn.network[1].W))
stream.write("bias 2 {}".format(nn.network[1].hbias.shape))
stream.write("{}".format(nn.network[1].hbias))
stream.write("==================")
net = nn.train(nn.network, data, targets, max_iter=10, targetCost='crossEntropy',
initialfit=0, cg_iter=100)
stream.write("network test:")
output = nn.run_through_network(data, net)
stream.write(output)
if __name__ == "__main__":
demo_xor()
|
|
from __future__ import print_function, division, absolute_import
from contextlib import contextmanager
from glob import glob
import logging
from multiprocessing import Process, Queue
import os
import shutil
import signal
import socket
from subprocess import Popen, PIPE
import sys
from time import time, sleep
import uuid
import mock
from toolz import merge
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.iostream import StreamClosedError
from .core import connect, read, write, rpc
from .utils import ignoring, log_errors, sync
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp('data').join('file.py')
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope='session')
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp('data').join('distributed_script.py')
lines = ("from distributed import Client", "e = Client('127.0.0.1:8989')",
'print(e)')
local_file.write('\n'.join(lines))
return local_file
@pytest.fixture(scope='session')
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp('data').join('file.py')
local_file.write("a+1")
return local_file
@pytest.yield_fixture
def current_loop():
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
yield loop
if loop._running:
sync(loop, loop.stop)
for i in range(5):
try:
loop.close(all_fds=True)
return
except Exception as e:
f = e
print(f)
IOLoop.clear_instance()
@pytest.yield_fixture
def loop():
loop = IOLoop()
yield loop
if loop._running:
sync(loop, loop.stop)
for i in range(5):
try:
loop.close(all_fds=True)
return
except Exception as e:
f = e
print(f)
@pytest.yield_fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def mock_ipython():
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
get_ip = lambda : ip
with mock.patch('IPython.get_ipython', get_ip), \
mock.patch('distributed._ipython_utils.get_ipython', get_ip):
yield ip
def inc(x):
return x + 1
def dec(x):
return x - 1
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise Exception('hello!')
def double(x):
return x * 2
def slowinc(x, delay=0.02):
from time import sleep
sleep(delay)
return x + 1
def randominc(x, scale=1):
from time import sleep
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
from time import sleep
sleep(delay)
return x + y
def run_scheduler(q, scheduler_port=0, **kwargs):
from distributed import Scheduler
from tornado.ioloop import IOLoop, PeriodicCallback
import logging
IOLoop.clear_instance()
loop = IOLoop(); loop.make_current()
PeriodicCallback(lambda: None, 500).start()
logging.getLogger("tornado").setLevel(logging.CRITICAL)
scheduler = Scheduler(loop=loop, validate=True, **kwargs)
done = scheduler.start(scheduler_port)
q.put(scheduler.port)
try:
loop.start()
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_port, **kwargs):
from distributed import Worker
from tornado.ioloop import IOLoop, PeriodicCallback
import logging
with log_errors():
IOLoop.clear_instance()
loop = IOLoop(); loop.make_current()
PeriodicCallback(lambda: None, 500).start()
logging.getLogger("tornado").setLevel(logging.CRITICAL)
worker = Worker('127.0.0.1', scheduler_port, ip='127.0.0.1',
loop=loop, **kwargs)
loop.run_sync(lambda: worker._start(0))
q.put(worker.port)
try:
loop.start()
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_port, **kwargs):
from distributed import Nanny
from tornado.ioloop import IOLoop, PeriodicCallback
import logging
with log_errors():
IOLoop.clear_instance()
loop = IOLoop(); loop.make_current()
PeriodicCallback(lambda: None, 500).start()
logging.getLogger("tornado").setLevel(logging.CRITICAL)
worker = Nanny('127.0.0.1', scheduler_port, ip='127.0.0.1',
loop=loop, **kwargs)
loop.run_sync(lambda: worker._start(0))
q.put(worker.port)
try:
loop.start()
finally:
loop.run_sync(worker._close)
loop.close(all_fds=True)
@contextmanager
def cluster(nworkers=2, nanny=False, worker_kwargs={}):
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
scheduler_q = Queue()
scheduler = Process(target=run_scheduler, args=(scheduler_q,))
scheduler.daemon = True
scheduler.start()
sport = scheduler_q.get()
workers = []
for i in range(nworkers):
q = Queue()
fn = '_test_worker-%s' % uuid.uuid1()
proc = Process(target=_run_worker, args=(q, sport),
kwargs=merge({'ncores': 1, 'local_dir': fn},
worker_kwargs))
workers.append({'proc': proc, 'queue': q, 'dir': fn})
for worker in workers:
worker['proc'].start()
for worker in workers:
worker['port'] = worker['queue'].get()
loop = IOLoop()
s = rpc(ip='127.0.0.1', port=sport)
start = time()
try:
while True:
ncores = loop.run_sync(s.ncores)
if len(ncores) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
yield {'proc': scheduler, 'port': sport}, workers
finally:
logger.debug("Closing out test cluster")
with ignoring(socket.error, TimeoutError, StreamClosedError):
loop.run_sync(lambda: disconnect('127.0.0.1', sport), timeout=0.5)
scheduler.terminate()
scheduler.join(timeout=2)
for port in [w['port'] for w in workers]:
with ignoring(socket.error, TimeoutError, StreamClosedError):
loop.run_sync(lambda: disconnect('127.0.0.1', port),
timeout=0.5)
for proc in [w['proc'] for w in workers]:
with ignoring(Exception):
proc.terminate()
proc.join(timeout=2)
for q in [w['queue'] for w in workers]:
q.close()
for fn in glob('_test_worker-*'):
shutil.rmtree(fn)
loop.close(all_fds=True)
@gen.coroutine
def disconnect(ip, port):
stream = yield connect(ip, port)
try:
yield write(stream, {'op': 'terminate', 'close': True})
response = yield read(stream)
finally:
stream.close()
import pytest
try:
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run")
except (AttributeError, ValueError):
def slow(*args):
pass
from tornado import gen
from tornado.ioloop import IOLoop
def gen_test(timeout=10):
""" Coroutine test
@gen_test(timeout=5)
def test_foo():
yield ... # use tornado coroutines
"""
def _(func):
def test_func():
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
cor = gen.coroutine(func)
try:
loop.run_sync(cor, timeout=timeout)
finally:
loop.stop()
loop.close(all_fds=True)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
from .client import Client
@gen.coroutine
def start_cluster(ncores, loop, Worker=Worker, scheduler_kwargs={}):
s = Scheduler(ip='127.0.0.1', loop=loop, validate=True, **scheduler_kwargs)
done = s.start(0)
workers = [Worker(s.ip, s.port, ncores=v, ip=k, name=i, loop=loop)
for i, (k, v) in enumerate(ncores)]
for w in workers:
w.rpc = workers[0].rpc
yield [w._start() for w in workers]
start = time()
while len(s.ncores) < len(ncores):
yield gen.sleep(0.01)
if time() - start > 5:
raise Exception("Cluster creation timeout")
raise gen.Return((s, workers))
@gen.coroutine
def end_cluster(s, workers):
logger.debug("Closing out test cluster")
for w in workers:
with ignoring(TimeoutError, StreamClosedError, OSError):
yield w._close(report=False)
if w.local_dir and os.path.exists(w.local_dir):
shutil.rmtree(w.local_dir)
s.stop()
def gen_cluster(ncores=[('127.0.0.1', 1), ('127.0.0.1', 2)], timeout=10,
Worker=Worker, client=False, scheduler_kwargs={}):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
def test_foo(scheduler, worker1, worker2):
yield ... # use tornado coroutines
See also:
start
end
"""
def _(func):
cor = gen.coroutine(func)
def test_func():
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
s, workers = loop.run_sync(lambda: start_cluster(ncores, loop,
Worker=Worker, scheduler_kwargs=scheduler_kwargs))
args = [s] + workers
if client:
e = Client((s.ip, s.port), loop=loop, start=False)
loop.run_sync(e._start)
args = [e] + args
try:
loop.run_sync(lambda: cor(*args), timeout=timeout)
finally:
if client:
loop.run_sync(e._shutdown)
loop.run_sync(lambda: end_cluster(s, workers))
loop.stop()
loop.close(all_fds=True)
return test_func
return _
@contextmanager
def make_hdfs():
from hdfs3 import HDFileSystem
hdfs = HDFileSystem(host='localhost', port=8020)
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
hdfs.mkdir('/tmp/test')
try:
yield hdfs
finally:
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
@contextmanager
def popen(*args, **kwargs):
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
proc = Popen(*args, **kwargs)
try:
yield proc
except Exception:
line = '\n\nPrint from stderr\n=================\n'
while line:
print(line)
line = proc.stderr.readline()
line = '\n\nPrint from stdout\n=================\n'
while line:
print(line)
line = proc.stdout.readline()
raise
finally:
os.kill(proc.pid, signal.SIGINT)
if sys.version_info[0] == 3:
proc.wait(10)
else:
proc.wait()
with ignoring(OSError):
proc.terminate()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2020_08_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
|
|
"""
PatientFinders are used to find OpenMRS patients that correspond to
CommCare cases if none of the patient identifiers listed in
OpenmrsCaseConfig.match_on_ids have successfully matched a patient.
See `README.md`__ for more context.
"""
import logging
from collections import namedtuple
from functools import partial
from pprint import pformat
from dimagi.ext.couchdbkit import (
DecimalProperty,
DictProperty,
DocumentSchema,
ListProperty,
)
from corehq.motech.finders import (
MATCH_FUNCTIONS,
PropertyWeight,
)
from corehq.motech.openmrs.const import OPENMRS_DATA_TYPE_BOOLEAN
from corehq.motech.value_source import (
deserialize,
recurse_subclasses,
)
logger = logging.getLogger(__name__)
constant_false = {
"value": 'False',
# We are fetching from a case property or a form question value, and
# we want `get_value()` to return False (bool). `get_value()`
# serialises case properties and form question values as external
# data types. OPENMRS_DATA_TYPE_BOOLEAN is useful because it is a
# bool, not a string, so `constant_false.get_value()` will return
# False (not 'False')
"external_data_type": OPENMRS_DATA_TYPE_BOOLEAN,
}
class PatientFinder(DocumentSchema):
"""
The ``PatientFinder`` base class was developed as a way to
handle situations where patient cases are created in CommCare
instead of being imported from OpenMRS.
When patients are imported from OpenMRS, they will come with at
least one identifier that MOTECH can use to match the case in
CommCare with the corresponding patient in OpenMRS. But if the case
is registered in CommCare then we may not have an ID, or the ID
could be wrong. We need to search for a corresponding OpenMRS
patient.
Different projects may focus on different kinds of case properties,
so it was felt that a base class would allow some flexibility.
The ``PatientFinder.wrap()`` method allows you to wrap documents of
subclasses.
The ``PatientFinder.find_patients()`` method must be implemented by
subclasses. It returns a list of zero, one, or many patients. If it
returns one patient, the OpenmrsRepeater.find_or_create_patient()
will accept that patient as a true match.
.. NOTE:: The consequences of a false positive (a Type II error) are
severe: A real patient will have their valid values
overwritten by those of someone else. So ``PatientFinder``
subclasses should be written and configured to skew
towards false negatives (Type I errors). In other words,
it is much better not to choose a patient than to choose
the wrong patient.
"""
# Whether to create a new patient if no patients are found
create_missing = DictProperty(default=constant_false)
@classmethod
def wrap(cls, data):
if 'create_missing' in data and isinstance(data['create_missing'], bool):
data['create_missing'] = {
'external_data_type': OPENMRS_DATA_TYPE_BOOLEAN,
'value': str(data['create_missing'])
}
if cls is PatientFinder:
subclass = {
sub._doc_type: sub for sub in recurse_subclasses(cls)
}.get(data['doc_type'])
return subclass.wrap(data) if subclass else None
else:
return super(PatientFinder, cls).wrap(data)
def find_patients(self, requests, case, case_config):
"""
Given a case, search OpenMRS for possible matches. Return the
best results. Subclasses must define "best". If just one result
is returned, it will be chosen.
"""
raise NotImplementedError
PatientScore = namedtuple('PatientScore', ['patient', 'score'])
class WeightedPropertyPatientFinder(PatientFinder):
"""
The ``WeightedPropertyPatientFinder`` class finds OpenMRS patients
that match CommCare cases by assigning weights to case properties,
and adding the weights of matching patient properties to calculate a
confidence score.
"""
# Identifiers that are searchable in OpenMRS. e.g.
# [ 'bahmni_id', 'household_id', 'last_name']
searchable_properties = ListProperty()
# The weight assigned to a matching property.
# [
# {"case_property": "bahmni_id", "weight": 0.9},
# {"case_property": "household_id", "weight": 0.9},
# {
# "case_property": "dob",
# "weight": 0.75,
# "match_type": "days_diff",
# // days_diff matches based on days difference from given date
# "match_params": [364]
# },
# {
# "case_property": "first_name",
# "weight": 0.025,
# "match_type": "levenshtein",
# // levenshtein function takes edit_distance / len
# "match_params": [0.2]
# // i.e. 20% is one edit for every 5 characters
# // e.g. "Riyaz" matches "Riaz" but not "Riazz"
# },
# {"case_property": "last_name", "weight": 0.025},
# {"case_property": "municipality", "weight": 0.2},
# ]
property_weights = ListProperty(PropertyWeight)
# The threshold that the sum of weights must pass for a CommCare case to
# be considered a match to an OpenMRS patient
threshold = DecimalProperty(default=1.0)
# If more than one patient passes `threshold`, the margin by which the
# weight of the best match must exceed the weight of the second-best match
# to be considered correct.
confidence_margin = DecimalProperty(default=0.667) # Default: Matches two thirds better than second-best
def __init__(self, *args, **kwargs):
super(WeightedPropertyPatientFinder, self).__init__(*args, **kwargs)
self._property_map = {}
def get_score(self, patient, case):
"""
Return the sum of weighted properties to give an OpenMRS
patient a score of how well they match a CommCare case.
"""
def weights():
for property_weight in self.property_weights:
prop = property_weight['case_property']
jsonpath, value_source_dict = self._property_map[prop]
weight = property_weight['weight']
matches = jsonpath.find(patient)
for match in matches:
patient_value = match.value
case_value = case.get_case_property(prop)
match_type = property_weight['match_type']
match_params = property_weight['match_params']
match_function = partial(MATCH_FUNCTIONS[match_type], *match_params)
is_equivalent = match_function(deserialize(value_source_dict, patient_value), case_value)
yield weight if is_equivalent else 0
return sum(weights())
def find_patients(self, requests, case, case_config):
"""
Matches cases to patients. Returns a list of patients, each
with a confidence score >= self.threshold
"""
from corehq.motech.openmrs.openmrs_config import get_property_map
from corehq.motech.openmrs.repeater_helpers import search_patients
self._property_map = get_property_map(case_config)
candidates = {} # key on OpenMRS UUID to filter duplicates
for prop in self.searchable_properties:
value = case.get_case_property(prop)
if value:
response_json = search_patients(requests, value)
for patient in response_json['results']:
score = self.get_score(patient, case)
if score >= self.threshold:
candidates[patient['uuid']] = PatientScore(patient, score)
if not candidates:
logger.info(
'Unable to match case "%s" (%s): No candidate patients found.',
case.name, case.get_id,
)
return []
if len(candidates) == 1:
patient = list(candidates.values())[0].patient
logger.info(
'Matched case "%s" (%s) to ONLY patient candidate: \n%s',
case.name, case.get_id, pformat(patient, indent=2),
)
return [patient]
patients_scores = sorted(candidates.values(), key=lambda candidate: candidate.score, reverse=True)
if patients_scores[0].score / patients_scores[1].score > 1 + self.confidence_margin:
# There is more than a `confidence_margin` difference
# (defaults to 66.7%) in score between the best-ranked
# patient and the second-best-ranked patient. Let's go with
# Patient One.
patient = patients_scores[0].patient
logger.info(
'Matched case "%s" (%s) to BEST patient candidate: \n%s',
case.name, case.get_id, pformat(patients_scores, indent=2),
)
return [patient]
# We can't be sure. Just send them all.
logger.info(
'Unable to match case "%s" (%s) to patient candidates: \n%s',
case.name, case.get_id, pformat(patients_scores, indent=2),
)
return [ps.patient for ps in patients_scores]
|
|
# Copyright (c) 2014, Andre Severo Pereira Gomes
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from determinant import *
class electronic_state:
"""
electronic state class:
in composition, we'll add determiants (=elements of the determinant class)
"""
def __init__ (self) :
self.label = "E"
self.energy = 0.0
self.relative_energy = 0.0
self.index = 0.0
self.fs_sector = "00"
self.composition = [ ]
self.max_index_p = {}
self.min_index_p = {}
self.max_index_h = {}
self.min_index_h = {}
self.symmetries = []
def set_sector (self,sector) :
self.fs_sector = sector
self.max_index_h[sector] = 0
self.min_index_h[sector] = 10000
self.max_index_p[sector] = 0
self.min_index_p[sector] = 10000
def get_sector (self) :
return self.fs_sector
def set_index (self,index) :
self.index = index
def get_index (self) :
return self.index
def set_relative_energy (self,relative_energy):
self.relative_energy = relative_energy
def set_energy (self,energy):
self.energy = energy
def get_relative_energy (self) :
return self.relative_energy
def get_energy (self) :
return self.energy
def set_label (self,label) :
self.label = label
def get_label (self) :
return self.label
def add_symmetry(self,new) :
if new not in self.symmetries :
self.symmetries.append(new)
def add_determinant (self,ca,cb,index_h,spinor_h,index_p,spinor_p,symmetry_h,symmetry_p) :
det = determinant()
det.set_coef_r(ca)
det.set_coef_i(cb)
det.set_from_index(index_h)
det.set_to_index(index_p)
det.set_from_ener(spinor_h)
det.set_to_ener(spinor_p)
det.set_from_symmetry(symmetry_h)
det.set_to_symmetry(symmetry_p)
det.set_weight()
if self.max_index_h[self.fs_sector] < int(index_h) :
self.max_index_h[self.fs_sector] = int(index_h)
if self.min_index_h[self.fs_sector] > int(index_h) :
self.min_index_h[self.fs_sector] = int(index_h)
if self.max_index_p[self.fs_sector] < int(index_p) :
self.max_index_p[self.fs_sector] = int(index_p)
if self.min_index_p[self.fs_sector] > int(index_p) :
self.min_index_p[self.fs_sector] = int(index_p)
self.composition.append(det)
self.add_symmetry(symmetry_h)
self.add_symmetry(symmetry_p)
def print_min_max_indexes(self,sector) :
print "maximum and minimum indexes for sector ",sector
print "h: max ",self.max_index_h[sector]," min ",self.min_index_h[sector]
print "p: max ",self.max_index_p[sector]," min ",self.min_index_p[sector]
def setup_template_dets(self,sector):
# setup templates for unique determinants that span all possible
# index combinations for the model (P=Pm+Pi) spaces
template_dets = []
for sym1 in self.symmetries :
if sector == "10" :
for i_h in range(self.min_index_h[sector],(self.max_index_h[sector])+1) :
new_det = [ i_h, 0, 0, 0, 0, sym1, "" ]
template_dets.append(new_det)
if sector == "01" :
for i_p in range(self.min_index_p[sector],(self.max_index_p[sector])+1) :
new_det = [ 0, i_p, 0, 0, 0, "", sym1 ]
template_dets.append(new_det)
if sector == "11" or sector == "02" or sector == "20" :
for i_h in range(self.min_index_h[sector],(self.max_index_h[sector])+1) :
for sym2 in self.symmetries :
for i_p in range(self.min_index_p[sector],(self.max_index_p[sector])+1) :
new_det = [ i_h, i_p, 0, 0, 0, sym1, sym2 ]
template_dets.append(new_det)
return template_dets
def get_non_unique_dets (self,sector):
non_unique_dets = []
#
# fill the templates with the content of the determiants read from the output
# and where we've accumulted the weights whenever we have the same indexes.
#
# todo: verify the boson symmetry of each index, to be sure that we're actually finding the same determinants
#
for d in range(len(self.composition)) :
e_h = self.composition[d].get_from_ener()
e_p = self.composition[d].get_to_ener()
i_h = self.composition[d].get_from_index()
i_p = self.composition[d].get_to_index()
s_h = self.composition[d].get_from_symmetry()
s_p = self.composition[d].get_to_symmetry()
w = self.composition[d].get_weight()
new_det = [ i_h, i_p, w, e_h, e_p, s_h, s_p ]
non_unique_dets.append(new_det)
return non_unique_dets
def get_unique_dets (self,sector,group_by_energy=False) :
unique_dets = self.setup_template_dets(sector)
#
# fill the templates with the content of the determiants read from the output
# and where we've accumulted the weights whenever we have the same indexes.
#
# todo: verify the boson symmetry of each index, to be sure that we're actually finding the same determinants
#
for d in range(len(self.composition)) :
e_h = self.composition[d].get_from_ener()
e_p = self.composition[d].get_to_ener()
i_h = self.composition[d].get_from_index()
i_p = self.composition[d].get_to_index()
s_h = self.composition[d].get_from_symmetry()
s_p = self.composition[d].get_to_symmetry()
w = self.composition[d].get_weight()
for p in range(len(unique_dets)) :
ud = unique_dets[p]
if group_by_energy:
if ud[3] == e_h and ud[4] == e_p :
ud[2]+= w
ud[3] = e_h
ud[4] = e_p
else :
if i_h == ud[0] and i_p == ud[1] and ud[5] == s_h and ud[6] == s_p :
ud[2]+= w
ud[3] = e_h
ud[4] = e_p
return unique_dets
def print_determinant(self,det) :
i_h = det[0]
i_p = det[1]
w = det[2]
e_h = det[3]
e_p = det[4]
s_h = det[5]
s_p = det[6]
# add here code to translate the symmetry and index in symmetry to a global identifier
i_global_p = -1
i_global_h = -1
if (i_h == 0) :
print " % 5.1f % 3d (%3d %3s, % 6.4f) " % (w*100,i_global_p,i_p,s_p,e_p)
elif (i_p == 0) :
print " % 5.1f % 3d (%3d %3s, % 6.4f) " % (w*100,i_global_h,i_h,s_h,e_h)
else :
print " % 5.1f % 3d (%3d %3s, % 6.4f); % 3d (%3d %3s, % 6.4f) " % (w*100,i_global_h,i_h,s_h,e_h,i_global_p,i_p,s_p,e_p)
def print_list (self,sector,threshold,max_states,unique=True) :
if self.index > max_states:
return
print "\n electronic state #",self.index," in symmetry ",self.label," energy: ",self.energy
total_w = 0.0
if not unique:
dets = self.get_non_unique_dets(sector)
else:
dets = self.get_unique_dets(sector)
for p in range(len(dets)) :
d = dets[p]
if d[2] >= threshold :
self.print_determinant(d)
total_w += d[2]
print " ----\n % 5.1f\n" % (total_w*100)
def print_table (self,sector,threshold,max_states,range_h=[],range_p=[],unique=True) :
if self.index > max_states:
return
print "\n electronic state #",self.index," in symmetry ",self.label," energy: ",self.energy
total_w = 0.0
dets = self.get_unique_dets(sector)
if range_h == []:
range_h = range(1,(self.max_index_h[sector])+1)
if range_p == []:
range_p = range(1,(self.max_index_p[sector])+1)
if sector == "11" or sector == "02" or sector == "20" :
relative_energy = self.get_relative_energy()
print "\n printing electronic state composition in table format"
print "% 8.0f %6s % 2d |" % (relative_energy*219474.631280634, self.label, self.index),
count = 35
for i_h in range_h :
for i_p in range_p :
for p in range(len(dets)) :
d = dets[p]
if i_h == d[0] and i_p == d[1] and s_h == d[5] and s_p == d[6] and d[2] >= threshold :
print " % 3d % 3d |" % (i_h,i_p),
count += 10
print "\n","-"*count
print "% 8.0f %6s % 2d |" % (relative_energy*219474.631280634, self.label, self.index),
for i_h in range_h :
for i_p in range_p :
for p in range(len(dets)) :
d = dets[p]
if i_h == d[0] and i_p == d[1] and s_h == d[5] and s_p == d[6] and d[2] >= threshold :
print " % 5.1f |" % (d[2]*100),
print "\n"
def print_list_and_table(self,sector,threshold,max_states,range_h=[],range_p=[],unique=True) :
if self.index > max_states:
return
print "\n electronic state #",self.index," in symmetry ",self.label," energy: ",self.energy
total_w = 0.0
if not unique:
dets = self.get_non_unique_dets(sector)
else:
dets = self.get_unique_dets(sector)
for p in range(len(dets)) :
d = dets[p]
if d[2] >= threshold :
self.print_determinant(d)
total_w += d[2]
print " ----\n % 5.1f\n" % (total_w*100)
#
# additional step, print in a table format
#
if sector == "11" :
relative_energy = self.get_relative_energy()
print "% 8.0f %6s % 2d |" % (relative_energy*219474.631280634, self.label, self.index),
count = 30
for i_h in range(1,(self.max_index_h[sector])+1) :
for i_p in range(1,(self.max_index_p[sector])+1) :
for p in range(len(dets)) :
d = dets[p]
if i_h == d[0] and i_p == d[1] and d[2] >= threshold :
print " % 3d % 3d |" % (i_h,i_p),
count += 10
print "\n","-"*count
print "% 8.0f %6s % 2d |" % (relative_energy*219474.631280634, self.label, self.index),
for i_h in range(1,(self.max_index_h[sector])+1) :
for i_p in range(1,(self.max_index_p[sector])+1) :
for p in range(len(dets)) :
d = dets[p]
if i_h == d[0] and i_p == d[1] and d[2] >= threshold :
print " % 5.1f |" % (d[2]*100),
print "\n"
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from decimal import Decimal
import random
from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
connect_nodes,
satoshi_round,
sync_blocks,
sync_mempools,
)
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)."""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(ToHex(tx), True)
unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee})
unconflist.append({"txid": txid, "vout": 1, "amount": amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split=False):
"""Generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed."""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"] / 2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change * COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change * COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split):
completetx = from_node.signrawtransactionwithwallet(ToHex(tx))["hex"]
else:
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
def check_estimates(node, fees_seen, max_invalid):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
for i, e in enumerate(all_smart_estimates): # estimate is for i+1
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
if feerate - delta > last_feerate:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
% (feerate, last_feerate))
last_feerate = feerate
if i == 0:
assert_equal(e["blocks"], 2)
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"],
["-blockmaxsize=17000", "-maxorphantx=1000"],
["-blockmaxsize=8000", "-maxorphantx=1000"]])
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee) / tx_kbytes)
sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0], True)["tx"]
sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
# Double txouts to txouts2
while (len(self.txouts) > 0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Double txouts2 to txouts
while (len(self.txouts2) > 0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 14)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
|
|
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils._testing import (assert_array_almost_equal,
assert_allclose)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.datasets import make_blobs
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils.validation import _check_psd_eigenvalues
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert kwargs == {} # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert X_fit_transformed.size != 0
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert (X_pred_transformed.shape[1] ==
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert X_pred2.shape == X_pred.shape
def test_kernel_pca_invalid_parameters():
with pytest.raises(ValueError):
KernelPCA(10, fit_inverse_transform=True, kernel='precomputed')
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_deterministic_output():
rng = np.random.RandomState(0)
X = rng.rand(10, 10)
eigen_solver = ('arpack', 'dense')
for solver in eigen_solver:
transformed_X = np.zeros((20, 2))
for i in range(20):
kpca = KernelPCA(n_components=2, eigen_solver=solver,
random_state=rng)
transformed_X[i, :] = kpca.fit_transform(X)[0]
assert_allclose(
transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert (X_pred_transformed.shape[1] ==
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert X_pred2.shape == X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert shape == (2, c)
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 0)
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 2)
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 0)
def test_leave_zero_eig():
"""This test checks that fit().transform() returns the same result as
fit_transform() in case of non-removed zero eigenvalue.
Non-regression test for issue #12141 (PR #12143)"""
X_fit = np.array([[1, 1], [0, 0]])
# Assert that even with all np warnings on, there is no div by zero warning
with pytest.warns(None) as record:
with np.errstate(all='warn'):
k = KernelPCA(n_components=2, remove_zero_eig=False,
eigen_solver="dense")
# Fit, then transform
A = k.fit(X_fit).transform(X_fit)
# Do both at once
B = k.fit_transform(X_fit)
# Compare
assert_array_almost_equal(np.abs(A), np.abs(B))
for w in record:
# There might be warnings about the kernel being badly conditioned,
# but there should not be warnings about division by zero.
# (Numpy division by zero warning can have many message variants, but
# at least we know that it is a RuntimeWarning so lets check only this)
assert not issubclass(w.category, RuntimeWarning)
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
with pytest.raises(ValueError):
kpca.fit(X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca),
("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert grid_search.best_score_ == 1
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca),
("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert grid_search.best_score_ == 1
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron(max_iter=5).fit(X, y).score(X, y)
assert train_score < 0.8
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y)
assert train_score == 1.0
def test_kernel_conditioning():
""" Test that ``_check_psd_eigenvalues`` is correctly called
Non-regression test for issue #12140 (PR #12145)"""
# create a pathological X leading to small non-zero eigenvalue
X = [[5, 1],
[5+1e-8, 1e-8],
[5+1e-8, 0]]
kpca = KernelPCA(kernel="linear", n_components=2,
fit_inverse_transform=True)
kpca.fit(X)
# check that the small non-zero eigenvalue was correctly set to zero
assert kpca.lambdas_.min() == 0
assert np.all(kpca.lambdas_ == _check_psd_eigenvalues(kpca.lambdas_))
@pytest.mark.parametrize("kernel",
["linear", "poly", "rbf", "sigmoid", "cosine"])
def test_kernel_pca_inverse_transform(kernel):
X, *_ = make_blobs(n_samples=100, n_features=4, centers=[[1, 1, 1, 1]],
random_state=0)
kp = KernelPCA(n_components=2, kernel=kernel, fit_inverse_transform=True)
X_trans = kp.fit_transform(X)
X_inv = kp.inverse_transform(X_trans)
assert_allclose(X, X_inv)
def test_32_64_decomposition_shape():
""" Test that the decomposition is similar for 32 and 64 bits data """
# see https://github.com/scikit-learn/scikit-learn/issues/18146
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
cluster_std=0.1
)
X = StandardScaler().fit_transform(X)
X -= X.min()
# Compare the shapes (corresponds to the number of non-zero eigenvalues)
kpca = KernelPCA()
assert (kpca.fit_transform(X).shape ==
kpca.fit_transform(X.astype(np.float32)).shape)
# TODO: Remove in 0.26
def test_kernel_pcc_pairwise_is_deprecated():
kp = KernelPCA(kernel='precomputed')
msg = r"Attribute _pairwise was deprecated in version 0\.24"
with pytest.warns(FutureWarning, match=msg):
kp._pairwise
|
|
# -*- coding: utf8 -*-
"""Contains the tools to produce a wavelength solution
This module gets the extracted data to produce a wavelength solution, linearize
the spectrum and write the solution to the image's header following the FITS
standard.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import logging
import os
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
from astropy.stats import sigma_clip
from ccdproc import CCDData
from matplotlib.backends.backend_pdf import PdfPages
from ..wcs.wcs import WCS
from ..core import (add_linear_wavelength_solution,
bin_reference_data,
cross_correlation,
evaluate_wavelength_solution,
get_lines_in_lamp,
linearize_spectrum,
write_fits)
from ..core import (ReferenceData, NoMatchFound)
log = logging.getLogger(__name__)
class WavelengthCalibration(object):
"""Wavelength Calibration Class
The WavelengthCalibration class is instantiated for each of the science
images, which are treated as a "science object". In this first release it
can find a wavelength solution for a given comparison lamp using an
interactive GUI based on Matplotlib. Although it works very good, for the
next release there is a plan for creating an independent GUI based on QT in
order to work better in different screen sizes and other topic such as
showing warnings, messages and help.
This class takes 1D spectrum with no wavelength calibration and returns fits
files with wavelength solutions using the FITS standard for linear
solutions. Goodman spectra are slightly non-linear therefore they are
linearized and smoothed before they are returned for the user.
"""
def __init__(self):
"""Wavelength Calibration Class Initialization
A WavelengthCalibration class is instantiated for each science target
being processed, i.e. every science image.
Notes:
This class violates some conventions as for length and number of
attributes is concerned. Solving this is part of a prioritary plans
for next release.
Args:
args (Namespace): Runtime arguments.
"""
self.poly_order = 3
self.wcs = WCS()
self.wsolution = None
self.wcal_lamp_file = None
self.sci_target_file = None
self.n_points = None
self.n_rejections = None
self.rms_error = None
self.cross_corr_tolerance = 5
self.reference_data_dir = None
self.reference_data = None
self.calibration_lamp = ''
self.wcal_lamp_file = ''
# Instrument configuration and spectral characteristics
self.serial_binning = None
self.parallel_binning = None
def __call__(self,
ccd,
comp_list,
save_data_to,
reference_data,
object_number=None,
corr_tolerance=15,
output_prefix='w',
plot_results=False,
save_plots=False,
plots=False,
json_output=False):
"""Call method for the WavelengthSolution Class
It takes extracted data and produces wavelength calibrated 1D FITS file.
The call method takes care of the order and logic needed to call the
different methods. A wavelength solution can be recycled for the next
science object. In that case, the wavelength solution is parsed as an
argument and then there is no need to calculate it again. The recycling
part has to be implemented in the caller function.
Args:
ccd (CCDData) a :class:`~astropy.nddata.CCDData` instance
comp_list (list): Comparison lamps for the science target that will
be processed here. Every element of this list is an instance of
:class:`~astropy.nddata.CCDData`.
save_data_to (str): Path to save processed data.
object_number (int): In case of multiple detections in a single
image this number will be added as a suffix before `.fits` in
order to allow for multiple 1D files. Default value is None.
corr_tolerance (int): `cross_corr_tolerance` stands for cross
correlation tolerance, in other words, how far the cross
correlation can be from the global cross correlation. It usually
increases with the frequency of the grating.
output_prefix (str): Prefix to add to files.
plot_results (bool): Present a plot showing the wavelength
calibrated data.
save_plots (bool): Save any plot shown. They are saved under
`<path>/<save_data_to>/plots/` where `<path>/<save_data_to>` is
the full path to the folder that `save_data_to` is pointing.
plots (bool): Show plots during operation.
Returns:
wavelength_solution (object): The mathematical model of the
wavelength solution. If it fails to create it will return a
None element.
"""
assert isinstance(ccd, CCDData)
assert isinstance(comp_list, list)
json_payload = {'wavelength_solution': [],
'warning': '',
'error': ''}
if os.path.isdir(reference_data):
if self.reference_data_dir != reference_data:
self.reference_data_dir = reference_data
self.reference_data = ReferenceData(
reference_dir=self.reference_data_dir)
self.cross_corr_tolerance = corr_tolerance
self.sci_target_file = ccd.header['GSP_FNAM']
self.i_fig = None
log.info(f"Starting Wavelength calibration of Science Target: {ccd.header['OBJECT']} "
f"File: {self.sci_target_file}.")
if len(comp_list) == 0:
log.warning("No comparison lamps were provided for file {}"
"".format(self.sci_target_file))
log.error("Ending processing of {}".format(self.sci_target_file))
if json_output:
json_payload['error'] ='Unable to process without reference lamps'
return json_payload
else:
return
else:
log.debug(f"Science file {self.sci_target_file} has {len(comp_list)} comparison lamps.")
wavelength_solutions = []
reference_lamp_names = []
for self.lamp in comp_list:
self.calibration_lamp = self.lamp.header['GSP_FNAM']
self.raw_pixel_axis = range(self.lamp.shape[0])
self.lamp_name = self.lamp.header['OBJECT']
log.info(f"Using Comparison lamp {self.lamp_name} {self.calibration_lamp}")
self.lines_center = get_lines_in_lamp(
ccd=self.lamp, plots=plots)
try:
self._automatic_wavelength_solution(
save_data_to=save_data_to,
corr_tolerance=self.cross_corr_tolerance)
except NoMatchFound as message:
raise NoMatchFound(message)
if self.wsolution is not None:
ccd.header.set('GSP_WRMS', value=self.rms_error)
ccd.header.set('GSP_WPOI', value=self.n_points)
ccd.header.set('GSP_WREJ', value=self.n_rejections)
linear_x_axis, self.lamp.data = linearize_spectrum(
self.lamp.data,
wavelength_solution=self.wsolution)
self.lamp = self.wcs.write_gsp_wcs(ccd=self.lamp,
model=self.wsolution)
self.lamp = add_linear_wavelength_solution(
ccd=self.lamp,
x_axis=linear_x_axis,
reference_lamp=self.calibration_lamp)
self.wcal_lamp_file = self._save_wavelength_calibrated(
ccd=self.lamp,
original_filename=self.calibration_lamp,
save_data_to=save_data_to,
output_prefix=output_prefix,
index=object_number,
lamp=True)
wavelength_solutions.append(self.wsolution)
reference_lamp_names.append(self.wcal_lamp_file)
else:
log.error(f"It was not possible to get a wavelength solution from lamp {self.lamp_name} "
f"{self.calibration_lamp}.")
continue
if len(wavelength_solutions) > 1:
log.warning(f"Multiple ({len(wavelength_solutions)}) wavelength solutions found.")
warning_message = str("The current version of the pipeline "
"does not combine multiple solution "
"instead it saves a single version of "
"the science file for each wavelength "
"solution calculated.")
log.warning(warning_message)
all_solution_info = []
for i in range(len(wavelength_solutions)):
# TODO (simon): Combine Multiple solutions
self.wsolution = wavelength_solutions[i]
self.wcal_lamp_file = reference_lamp_names[i]
ccd = self.wcs.write_gsp_wcs(ccd=ccd, model=self.wsolution)
saved_file_name = self._save_science_data(
ccd=ccd,
wavelength_solution=self.wsolution,
save_to=save_data_to,
index=i + 1,
plot_results=plot_results,
save_plots=save_plots,
plots=plots)
all_solution_info.append({
'solution_info': {'rms_error': "{:.4f}".format(self.rms_error),
'npoints': "{:d}".format(self.n_points),
'nrjections': "{:d}".format(self.n_rejections)},
'file_name': saved_file_name,
'reference_lamp': self.wcal_lamp_file})
if json_output:
json_payload['warning'] = warning_message
json_payload['wavelength_solution'] = all_solution_info
return json_payload
elif len(wavelength_solutions) == 1:
self.wsolution = wavelength_solutions[0]
self.wcal_lamp_file = reference_lamp_names[0]
ccd = self.wcs.write_gsp_wcs(ccd=ccd, model=self.wsolution)
saved_file_name = self._save_science_data(
ccd=ccd,
wavelength_solution=self.wsolution,
save_to=save_data_to,
plot_results=plot_results,
save_plots=save_plots,
index=object_number,
plots=plots)
if json_output:
json_payload['wavelength_solution'] = [
{'solution_info': {'rms_error': "{:.4f}".format(self.rms_error),
'npoints': "{:d}".format(self.n_points),
'nrjections': "{:d}".format(self.n_rejections)},
'file_name': saved_file_name,
'reference_lamp': self.wcal_lamp_file}]
return json_payload
else:
log.error("Unable to obtain wavelength solution.")
if json_output:
json_payload['error'] = "Unable to obtain wavelength solution."
return json_payload
def _automatic_wavelength_solution(self,
save_data_to,
corr_tolerance=15,
plot_results=False,
save_plots=False,
plots=False):
"""Finds a Wavelength Solution Automatically
This method uses a library of previously wavelength-calibrated
comparison lamps. It will only process them if they are the exact match.
A workflow summary is presented below:
- Identify the exactly matching reference comparison lamp. If it
doesn't exist it will return None. If it does exist the reference
lamp will be loaded and it's wavelength solution read.
- Identify lines in the new lamp, the lamp data has been already
loaded at the initialization of the class
- According to the lines detected it will split both spectrum in the
same number of pieces and same respective sizes and then will do
cross correlation for each of them.
- The line's pixel value is stored
- Using the reference lamp's wavelength solution mathematical model,
the corresponding value in angstrom is calculated using the offset
obtained from the cross correlation something like this:
angstrom = model(pixel + offset)
- As a first order filter one-iteration of a two-sigma clipping is
applied to the cross-correlation offsets, this is necessary to
eliminate mismatched lines.
- A new wavelength solution is calculated using the points collected
above.
- Using the Angstrom values previously found and the detected lines
plus the newly calculated solution, the differences in angstrom are
calculated to which values a new sigma-clipping is applied, again
one iteration two-sigmas, since the distributions are not
necessarily normal distributions.
- Once these values are cleaned of rejected values the final solution
is calculated.
Returns:
None in case it is not possible to find a suitable template lamp or
if is not possible to calculate the solution.
"""
try:
reference_lamp_ccd = self.reference_data.get_reference_lamp(
header=self.lamp.header)
log.debug('Found reference lamp: '
'{:s}'.format(reference_lamp_ccd.header['GSP_FNAM']))
except NoMatchFound as error:
raise NoMatchFound(error)
except NotImplementedError as error:
raise NotImplemented(error)
# TODO (simon): Evaluate possibility to read iraf wcs. [#304]
reference_lamp_wav_axis, reference_lamp_ccd.data = \
self.wcs.read_gsp_wcs(ccd=reference_lamp_ccd)
self.serial_binning, self.parallel_binning = [
int(x) for x in self.lamp.header['CCDSUM'].split()]
if self.serial_binning != 1:
reference_lamp_wav_axis, reference_lamp_ccd.data = \
bin_reference_data(wavelength=reference_lamp_wav_axis,
intensity=reference_lamp_ccd.data,
serial_binning=self.serial_binning)
self.wcs.binning = self.serial_binning
'''detect lines in comparison lamp (not reference)'''
lamp_lines_pixel = get_lines_in_lamp(ccd=self.lamp,
plots=plots)
lamp_lines_angst = self.wcs.model(lamp_lines_pixel)
pixel_values = []
angstrom_values = []
correlation_values = []
angstrom_differences = []
log.debug('Length {:d}'.format(len(self.lamp.data)))
log.debug('NLines {:d}'.format(len(lamp_lines_pixel)))
log.debug('Length / NLines {:.3f}'.format(
len(self.lamp.data) / float(len(lamp_lines_pixel))))
slit_size = float(re.sub('["A-Za-z_ ]', '', self.lamp.header['SLIT']))
global_cross_corr = cross_correlation(
reference=reference_lamp_ccd.data,
compared=self.lamp.data,
slit_size=slit_size,
serial_binning=self.serial_binning,
selection_bias='center')
log.debug(f"Found global cross-correlation value of: {global_cross_corr}")
if plots:
plt.title(f"Comparison of New to Reference Lamp\nGlobal Cross Correlation Value: {global_cross_corr}")
plt.plot(reference_lamp_ccd.data, label='Reference Lamp')
plt.plot(self.lamp.data, label='New Lamp')
plt.legend(loc='best')
plt.show()
half_width = np.max(
[int((len(self.lamp.data) / float(len(lamp_lines_pixel)))),
4 * global_cross_corr])
for i in range(len(lamp_lines_pixel)):
line_value_pixel = lamp_lines_pixel[i]
line_value_angst = lamp_lines_angst[i]
xmin = int(max(0, round(line_value_pixel - half_width)))
xmax = int(min(round(line_value_pixel + half_width),
len(self.lamp.data)))
if xmin >= xmax:
continue
# print(xmin, xmax, self.lamp.data.size)
# TODO (simon): Convolve to match wider lines such as those from
# TODO (cont): the slit of 5 arseconds
ref_sample = reference_lamp_ccd.data[xmin:xmax]
# ref_wavele = reference_lamp_wav_axis[xmin:xmax]
lamp_sample = self.lamp.data[xmin:xmax]
slit_size = float(re.sub('["A-Za-z_ ]', '', self.lamp.header['SLIT']))
correlation_value = cross_correlation(
reference=ref_sample,
compared=lamp_sample,
slit_size=slit_size,
serial_binning=self.serial_binning)
log.debug(f"Cross correlation value {correlation_value} vs Global Reference value: {global_cross_corr}")
if - corr_tolerance < (global_cross_corr - correlation_value) < \
corr_tolerance:
"""record value for reference wavelength"""
# print(global_cross_corr - correlation_value)
angstrom_value_model = self.wcs.model(
line_value_pixel + correlation_value)
# print(correlation_value, angstrom_value_model)
correlation_values.append(correlation_value)
angstrom_differences.append(angstrom_value_model -
line_value_angst)
angstrom_values.append(angstrom_value_model)
# print(angstrom_values)
pixel_values.append(line_value_pixel)
else:
log.debug("Local cross correlation value {:.3f} is too far "
"from {:.3f}".format(correlation_value,
global_cross_corr))
if plots: # pragma: no cover
# print(global_cross_corr, correlation_value)
plt.ion()
plt.title('Samples after cross correlation\n Shift {:.3f}'
''.format(correlation_value))
plt.xlabel('Pixel Axis')
plt.ylabel('Intensity')
plt.plot(ref_sample,
color='k',
label='Reference Sample')
plt.plot([x + correlation_value for x in
range(len(lamp_sample))],
lamp_sample,
label='New Lamp Sample')
plt.legend(loc='best')
plt.draw()
plt.pause(1)
plt.clf()
plt.ioff()
# This is good and necessary as a first approach for some very wrong
# correlation results
clipped_values = sigma_clip(correlation_values,
sigma=3,
maxiters=1,
cenfunc=np.ma.median)
# print(clipped_values)
if np.ma.is_masked(clipped_values):
_pixel_values = list(pixel_values)
_angstrom_values = list(angstrom_values)
# print(_angstrom_values)
pixel_values = []
angstrom_values = []
for i in range(len(clipped_values)):
if clipped_values[i] is not np.ma.masked:
pixel_values.append(_pixel_values[i])
# print(_angstrom_values[i][0])
angstrom_values.append(_angstrom_values[i])
# Create a wavelength solution
log.info('Creating Wavelength Solution')
self.wsolution = self.wcs.fit(physical=pixel_values,
wavelength=angstrom_values,
model_name='chebyshev',
degree=self.poly_order)
if self.wsolution is None:
log.error('Failed to find wavelength solution using reference '
'file: {:s}'.format(self.calibration_lamp))
return None
# finding differences in order to improve the wavelength solution
wavelength_differences = [angstrom_values[i] -
self.wsolution(pixel_values[i]) for i in
range(len(pixel_values))]
clipped_differences = sigma_clip(wavelength_differences,
sigma=2,
maxiters=3,
cenfunc=np.ma.median)
if np.ma.is_masked(clipped_differences):
log.debug('Cleaning pixel to angstrom match to improve '
'wavelength solution')
_pixel_values = list(pixel_values)
_angstrom_values = list(angstrom_values)
pixel_values = []
angstrom_values = []
for i in range(len(clipped_differences)):
if clipped_differences[i] is not np.ma.masked:
pixel_values.append(_pixel_values[i])
angstrom_values.append(_angstrom_values[i])
log.info('Re-fitting wavelength solution')
self.wsolution = self.wcs.fit(physical=pixel_values,
wavelength=angstrom_values,
model_name='chebyshev',
degree=self.poly_order)
self.rms_error, self.n_points, self.n_rejections = \
evaluate_wavelength_solution(
clipped_differences=clipped_differences)
if plot_results or plots or \
save_plots: # pragma: no cover
plt.close('all')
plt.switch_backend('Qt5Agg')
# print(self.i_fig)
self.i_fig = None
if self.i_fig is None:
self.i_fig = plt.figure()
self.i_fig.canvas.set_window_title(
'Automatic Wavelength Solution')
self.ax1 = self.i_fig.add_subplot(111)
self.ax1.set_rasterization_zorder(1)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not plots:
plt.ion()
# plt.show()
else:
plt.ioff()
self.ax1.plot([], color='m', label='Pixels')
self.ax1.plot([], color='c', label='Angstrom')
for val in pixel_values:
self.ax1.axvline(self.wsolution(val), color='m', zorder=0)
for val2 in angstrom_values:
self.ax1.axvline(val2, color='c', linestyle='--', zorder=0)
self.ax1.plot(reference_lamp_wav_axis,
reference_lamp_ccd.data,
label='Reference',
color='k',
alpha=1, zorder=0)
self.ax1.plot(self.wsolution(self.raw_pixel_axis),
self.lamp.data,
label='Last Solution',
color='r',
alpha=0.7, zorder=0)
try:
wavmode = self.lamp.header['wavmode']
except KeyError as error:
log.debug(error)
wavmode = ''
self.ax1.set_xlabel('Wavelength (Angstrom)')
self.ax1.set_ylabel('Intensity (ADU)')
self.ax1.set_title('Automatic Wavelength Solution\n'
+ self.lamp.header['OBJECT']
+ ' ' + wavmode + '\n'
+ 'RMS Error: {:.3f}'.format(self.rms_error))
self.ax1.legend(loc='best')
self.i_fig.tight_layout()
if save_plots:
plots_path = os.path.join(save_data_to, 'plots')
if not os.path.isdir(plots_path):
os.path.os.makedirs(plots_path)
# saves pdf files of the wavelength solution plot
out_file_name = 'automatic-solution_' + self.lamp.header[
'GSP_FNAM']
out_file_name = re.sub('.fits', '', out_file_name)
file_count = len(glob.glob(
os.path.join(save_data_to,
out_file_name + '*'))) + 1
out_file_name += '_RMS_{:.3f}_{:03d}.pdf'.format(self.rms_error,
file_count)
pdf_pages = PdfPages(
os.path.join(plots_path, out_file_name))
plt.savefig(pdf_pages, format='pdf')
pdf_pages.close()
plot_name = os.path.join(plots_path,
re.sub('pdf', 'png', out_file_name))
plt.savefig(plot_name, rasterized=True, format='png', dpi=300)
plt.ioff()
plt.clf()
if plots or plot_results: # pragma: no cover
manager = plt.get_current_fig_manager()
if plt.get_backend() == u'GTK3Agg':
manager.window.maximize()
elif plt.get_backend() == u'Qt5Agg':
manager.window.showMaximized()
if plots:
plt.show()
elif plot_results:
plt.draw()
plt.pause(1)
plt.ioff()
plt.close()
def _save_science_data(self,
ccd,
wavelength_solution,
save_to,
index=None,
plot_results=False,
save_plots=False,
plots=False):
"""Save wavelength calibrated data
The spectrum is linearized, then the linear solution is recorded in the
ccd's header and finally it calls the method
:func:`~wavelength.WavelengthCalibration._save_wavelength_calibrated`
which performs the actual saving to a file.
Args:
ccd (CCDData): Instance of :class:`~astropy.nddata.CCDData` with a
1D spectrum.
wavelength_solution (object): A :class:`~astropy.modeling.Model`
save_to (str): Path to save location
index (int): If there are more than one target, they are identified
by this index.
plot_results (bool): Whether to show plots or not.
save_plots (bool): Whether to save plots to files.
plots
Returns:
File name of saved file.
"""
ccd = ccd.copy()
linear_x_axis, ccd.data = linearize_spectrum(
data=ccd.data,
wavelength_solution=wavelength_solution)
ccd = add_linear_wavelength_solution(
ccd=ccd,
x_axis=linear_x_axis,
reference_lamp=self.wcal_lamp_file)
save_file_name = self._save_wavelength_calibrated(
ccd=ccd,
original_filename=ccd.header['GSP_FNAM'],
save_data_to=save_to,
index=index)
if plot_results or plots or save_plots: # pragma: no cover
plt.close(1)
if plot_results:
plt.ion()
# plt.show()
elif plots:
plt.ioff()
wavelength_axis = wavelength_solution(range(ccd.data.size))
object_name = ccd.header['OBJECT']
grating = ccd.header['GRATING']
fig_title = 'Wavelength Calibrated Data : ' \
'{:s}\n{:s}'.format(object_name, grating)
fig, ax1 = plt.subplots(1)
fig.canvas.set_window_title(ccd.header['GSP_FNAM'])
# ax1 = fig.add_subplot(111)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax1.set_title(fig_title)
ax1.set_xlabel('Wavelength (Angstrom)')
ax1.set_ylabel('Intensity (ADU)')
ax1.set_xlim((wavelength_axis[0], wavelength_axis[-1]))
# plt.close(1)
ax1.plot(wavelength_axis,
ccd.data,
color='k',
label='Data')
ax1.legend(loc='best')
fig.tight_layout()
if save_plots:
log.info('Saving plots')
plots_dir = os.path.join(save_to,
'plots')
if not os.path.isdir(plots_dir):
os.mkdir(plots_dir)
plot_name = re.sub('.fits',
'.png',
ccd.header['GSP_FNAM'])
plot_path = os.path.join(plots_dir, plot_name)
# print(plot_path)
plt.savefig(plot_path, dpi=300)
log.info('Saved plot as {:s} file '
'DPI=300'.format(plot_name))
if plots or plot_results: # pragma: no cover
manager = plt.get_current_fig_manager()
if plt.get_backend() == u'GTK3Agg':
manager.window.maximize()
elif plt.get_backend() == u'Qt5Agg':
manager.window.showMaximized()
if plots:
plt.show()
elif plot_results:
plt.draw()
plt.pause(2)
plt.ioff()
return save_file_name
def _save_wavelength_calibrated(self,
ccd,
original_filename,
save_data_to,
output_prefix='w',
index=None,
lamp=False):
if index is None:
f_end = '.fits'
else:
f_end = '_ws_{:d}.fits'.format(index)
file_full_path = os.path.join(save_data_to,
output_prefix +
original_filename.replace('.fits', f_end))
if lamp:
log.info('Wavelength-calibrated {:s} file saved to: '
'{:s} for science file {:s}'
''.format(ccd.header['OBSTYPE'],
os.path.basename(file_full_path),
self.sci_target_file))
ccd.header.set('GSP_SCTR',
value=self.sci_target_file,
after='GSP_FLAT')
else:
log.info('Wavelength-calibrated {:s} file saved to: '
'{:s} using reference lamp {:s}'
''.format(ccd.header['OBSTYPE'],
os.path.basename(file_full_path),
self.wcal_lamp_file))
ccd.header.set(
'GSP_LAMP',
value=self.wcal_lamp_file,
comment='Reference lamp used to obtain wavelength solution',
after='GSP_FLAT')
write_fits(ccd=ccd,
full_path=file_full_path,
parent_file=original_filename)
return file_full_path
if __name__ == '__main__': # pragma: no cover
sys.exit('This can not be run on its own.')
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Utility methods and decorators
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Pelix constants
import pelix.constants
# Standard library
import collections
import contextlib
import functools
import logging
import sys
import threading
import traceback
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# Using Python 3
PYTHON_3 = (sys.version_info[0] == 3)
# ------------------------------------------------------------------------------
@contextlib.contextmanager
def use_service(bundle_context, svc_reference):
"""
Utility context to safely use a service in a "with" block.
It looks after the the given service and releases its reference when
exiting the context.
:param bundle_context: The calling bundle context
:param svc_reference: The reference of the service to use
:return: The requested service
:raise BundleException: Service not found
:raise TypeError: Invalid service reference
"""
try:
# Give the service
yield bundle_context.get_service(svc_reference)
finally:
try:
# Release it
bundle_context.unget_service(svc_reference)
except pelix.constants.BundleException:
# Service might have already been unregistered
pass
# ------------------------------------------------------------------------------
class Deprecated(object):
"""
Prints a warning when using the decorated method
"""
def __init__(self, message=None, logger=None):
"""
Sets the deprecation message, e.g. to indicate which method to call
instead.
If a logger is given, its 'warning' method will be called to print the
message; else the standard 'print' method will be used.
:param message: Message to be printed
:param logger: The name of the logger to use, or None.
"""
self.__message = message or "Deprecated method"
self.__logger = logger or None
self.__already_logged = False
def __log(self, method_name):
"""
Logs the deprecation message on first call, does nothing after
:param method_name: Name of the deprecated method
"""
if not self.__already_logged:
# Print only if not already done
stack = '\n\t'.join(traceback.format_stack())
logging.getLogger(self.__logger).warning("%s: %s\n%s",
method_name,
self.__message,
stack)
self.__already_logged = True
def __call__(self, method):
"""
Applies the modifications
:param method: The decorated method
:return: The wrapped method
"""
# Prepare the wrapped call
@functools.wraps(method)
def wrapped(*args, **kwargs):
"""
Wrapped deprecated method
"""
self.__log(method.__name__)
return method(*args, **kwargs)
return wrapped
# ------------------------------------------------------------------------------
class Synchronized(object):
"""
A synchronizer for global methods
"""
def __init__(self, lock=None):
"""
Sets up the decorator. If 'lock' is None, an RLock() is created for
this decorator.
:param lock: The lock to be used for synchronization (can be None)
"""
if not is_lock(lock):
self.__lock = threading.RLock()
else:
self.__lock = lock
def __call__(self, method):
"""
Sets up the decorated method
:param method: The decorated method
:return: The wrapped method
"""
@functools.wraps(method)
def wrapped(*args, **kwargs):
"""
The wrapping method
"""
with self.__lock:
return method(*args, **kwargs)
return wrapped
def SynchronizedClassMethod(*locks_attr_names, **kwargs):
"""
A synchronizer decorator for class methods. An AttributeError can be raised
at runtime if the given lock attribute doesn't exist or if it is None.
If a parameter ``sorted`` is found in ``kwargs`` and its value is True,
then the list of locks names will be sorted before locking.
:param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be
used for synchronization
:return: The decorator method, surrounded with the lock
"""
# Filter the names (remove empty ones)
locks_attr_names = [lock_name
for lock_name in locks_attr_names
if lock_name]
if not locks_attr_names:
raise ValueError("The lock names list can't be empty")
if 'sorted' not in kwargs or kwargs['sorted']:
# Sort the lock names if requested
# (locking always in the same order reduces the risk of dead lock)
locks_attr_names = list(locks_attr_names)
locks_attr_names.sort()
def wrapped(method):
"""
The wrapping method
:param method: The wrapped method
:return: The wrapped method
:raise AttributeError: The given attribute name doesn't exist
"""
@functools.wraps(method)
def synchronized(self, *args, **kwargs):
"""
Calls the wrapped method with a lock
"""
# Raises an AttributeError if needed
locks = [getattr(self, attr_name)
for attr_name in locks_attr_names]
locked = collections.deque()
i = 0
try:
# Lock
for lock in locks:
if lock is None:
# No lock...
raise AttributeError(
"Lock '{0}' can't be None in class {1}"
.format(locks_attr_names[i], type(self).__name__))
# Get the lock
i += 1
lock.acquire()
locked.appendleft(lock)
# Use the method
return method(self, *args, **kwargs)
finally:
# Unlock what has been locked in all cases
for lock in locked:
lock.release()
locked.clear()
del locks[:]
return synchronized
# Return the wrapped method
return wrapped
def is_lock(lock):
"""
Tests if the given lock is an instance of a lock class
"""
if lock is None:
# Don't do useless tests
return False
for attr in ('acquire', 'release', '__enter__', '__exit__'):
if not hasattr(lock, attr):
# Missing something
return False
# Same API as a lock
return True
# ------------------------------------------------------------------------------
def read_only_property(value):
"""
Makes a read-only property that always returns the given value
"""
return property(lambda cls: value)
# ------------------------------------------------------------------------------
def remove_all_occurrences(sequence, item):
"""
Removes all occurrences of item in the given sequence
:param sequence: The items list
:param item: The item to be removed
"""
if sequence is None:
return
while item in sequence:
sequence.remove(item)
# ------------------------------------------------------------------------------
def add_listener(registry, listener):
"""
Adds a listener in the registry, if it is not yet in
:param registry: A registry (a list)
:param listener: The listener to register
:return: True if the listener has been added
"""
if listener is None or listener in registry:
return False
registry.append(listener)
return True
def remove_listener(registry, listener):
"""
Removes a listener from the registry
:param registry: A registry (a list)
:param listener: The listener to remove
:return: True if the listener was in the list
"""
if listener is not None and listener in registry:
registry.remove(listener)
return True
return False
# ------------------------------------------------------------------------------
if PYTHON_3:
# Python 3 interpreter : bytes & str
def is_string(string):
"""
Utility method to test if the given parameter is a string
(Python 2.x, 3.x) or a unicode (Python 2.x) object
:param string: A potential string object
:return: True if the given object is a string object or a Python 2.6
unicode object
"""
# Python 3 only have the str string type
return isinstance(string, str)
def to_bytes(data, encoding="UTF-8"):
"""
Converts the given string to an array of bytes.
Returns the first parameter if it is already an array of bytes.
:param data: A unicode string
:param encoding: The encoding of data
:return: The corresponding array of bytes
"""
if type(data) is bytes:
# Nothing to do
return data
return data.encode(encoding)
def to_str(data, encoding="UTF-8"):
"""
Converts the given parameter to a string.
Returns the first parameter if it is already an instance of ``str``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding string
"""
if type(data) is str:
# Nothing to do
return data
return str(data, encoding)
# Same operation
# pylint: disable=C0103
to_unicode = to_str
else:
# Python 2 interpreter : str & unicode
def is_string(string):
"""
Utility method to test if the given parameter is a string
(Python 2.x, 3.x) or a unicode (Python 2.x) object
:param string: A potential string object
:return: True if the given object is a string object or a Python 2.6
unicode object
"""
# Python 2 also have unicode
# pylint: disable=E0602
return isinstance(string, (str, unicode))
def to_str(data, encoding="UTF-8"):
"""
Converts the given parameter to a string.
Returns the first parameter if it is already an instance of ``str``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding string
"""
if type(data) is str:
# Nothing to do
return data
return data.encode(encoding)
# Same operation
# pylint: disable=C0103
to_bytes = to_str
def to_unicode(data, encoding="UTF-8"):
"""
Converts the given string to an unicode string using ``str.decode()``.
Returns the first parameter if it is already an instance of
``unicode``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding ``unicode`` string
"""
# pylint: disable=E0602
if type(data) is unicode:
# Nothing to do
return data
return data.decode(encoding)
# ------------------------------------------------------------------------------
def to_iterable(value, allow_none=True):
"""
Tries to convert the given value to an iterable, if necessary.
If the given value is a list, a list is returned; if it is a string, a list
containing one string is returned, ...
:param value: Any object
:param allow_none: If True, the method returns None if value is None, else
it returns an empty list
:return: A list containing the given string, or the given value
"""
if value is None:
# None given
if allow_none:
return None
else:
return []
elif isinstance(value, (list, tuple, set, frozenset)):
# Iterable given, return it as-is
return value
# Return a one-value list
return [value]
# ------------------------------------------------------------------------------
class EventData(object):
"""
A threading event with some associated data
"""
def __init__(self):
"""
Sets up the event
"""
self.__event = threading.Event()
self.__data = None
self.__exception = None
@property
def data(self):
"""
Returns the associated value
"""
return self.__data
@property
def exception(self):
"""
Returns the exception used to stop the wait() method
"""
return self.__exception
def clear(self):
"""
Clears the event
"""
self.__event.clear()
self.__data = None
self.__exception = None
def is_set(self):
"""
Checks if the event is set
"""
return self.__event.is_set()
def set(self, data=None):
"""
Sets the event
"""
self.__data = data
self.__exception = None
self.__event.set()
def raise_exception(self, exception):
"""
Raises an exception in wait()
:param exception: An Exception object
"""
self.__data = None
self.__exception = exception
self.__event.set()
def wait(self, timeout=None):
"""
Waits for the event or for the timeout
:param timeout: Wait timeout (in seconds)
:return: True if the event as been set, else False
"""
# The 'or' part is for Python 2.6
result = self.__event.wait(timeout) or self.__event.is_set()
# pylint: disable=E0702
# Pylint seems to miss the "is None" check below
if self.__exception is None:
return result
else:
raise self.__exception
class CountdownEvent(object):
"""
Sets up an Event once the internal integer reaches 0
(kind of the opposite of a semaphore)
"""
def __init__(self, value):
"""
Sets up the counter
:param value: The initial value of the counter, which must be greater
than 0.
:raise ValueError: The value is not greater than 0
"""
if value <= 0:
raise ValueError("Initial value is not greater than 0")
self.__lock = threading.Lock()
self.__value = value
self.__event = threading.Event()
def is_set(self):
"""
Checks if the event is set
"""
return self.__event.is_set()
def step(self):
"""
Decreases the internal counter. Raises an error if the counter goes
below 0
:return: True if this step was the final one, else False
:raise ValueError: The counter has gone below 0
"""
with self.__lock:
self.__value -= 1
if self.__value == 0:
# All done
self.__event.set()
return True
elif self.__value < 0:
# Gone too far
raise ValueError("The counter has gone below 0")
return False
def wait(self, timeout=None):
"""
Waits for the event or for the timeout
:param timeout: Wait timeout (in seconds)
:return: True if the event as been set, else False
"""
# The 'or' part is for Python 2.6
return self.__event.wait(timeout) or self.__event.is_set()
|
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import rospy
import sys
from socket import error
from tornado.ioloop import IOLoop
from tornado.ioloop import PeriodicCallback
from tornado.web import Application
from rosbridge_server import RosbridgeWebSocket, ClientManager
from rosbridge_library.capabilities.advertise import Advertise
from rosbridge_library.capabilities.publish import Publish
from rosbridge_library.capabilities.subscribe import Subscribe
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
from rosbridge_library.capabilities.call_service import CallService
def shutdown_hook():
IOLoop.instance().stop()
if __name__ == "__main__":
rospy.init_node("rosbridge_websocket")
rospy.on_shutdown(shutdown_hook) # register shutdown hook to stop the server
##################################################
# Parameter handling #
##################################################
retry_startup_delay = rospy.get_param('~retry_startup_delay', 2.0) # seconds
RosbridgeWebSocket.use_compression = rospy.get_param('~use_compression', False)
# get RosbridgeProtocol parameters
RosbridgeWebSocket.fragment_timeout = rospy.get_param('~fragment_timeout',
RosbridgeWebSocket.fragment_timeout)
RosbridgeWebSocket.delay_between_messages = rospy.get_param('~delay_between_messages',
RosbridgeWebSocket.delay_between_messages)
RosbridgeWebSocket.max_message_size = rospy.get_param('~max_message_size',
RosbridgeWebSocket.max_message_size)
RosbridgeWebSocket.unregister_timeout = rospy.get_param('~unregister_timeout',
RosbridgeWebSocket.unregister_timeout)
bson_only_mode = rospy.get_param('~bson_only_mode', False)
if RosbridgeWebSocket.max_message_size == "None":
RosbridgeWebSocket.max_message_size = None
# SSL options
certfile = rospy.get_param('~certfile', None)
keyfile = rospy.get_param('~keyfile', None)
# if authentication should be used
RosbridgeWebSocket.authenticate = rospy.get_param('~authenticate', False)
port = rospy.get_param('~port', 9090)
address = rospy.get_param('~address', "")
RosbridgeWebSocket.client_manager = ClientManager()
# Get the glob strings and parse them as arrays.
RosbridgeWebSocket.topics_glob = [
element.strip().strip("'")
for element in rospy.get_param('~topics_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
RosbridgeWebSocket.services_glob = [
element.strip().strip("'")
for element in rospy.get_param('~services_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
RosbridgeWebSocket.params_glob = [
element.strip().strip("'")
for element in rospy.get_param('~params_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
if "--port" in sys.argv:
idx = sys.argv.index("--port")+1
if idx < len(sys.argv):
port = int(sys.argv[idx])
else:
print("--port argument provided without a value.")
sys.exit(-1)
if "--address" in sys.argv:
idx = sys.argv.index("--address")+1
if idx < len(sys.argv):
address = int(sys.argv[idx])
else:
print("--address argument provided without a value.")
sys.exit(-1)
if "--retry_startup_delay" in sys.argv:
idx = sys.argv.index("--retry_startup_delay") + 1
if idx < len(sys.argv):
retry_startup_delay = int(sys.argv[idx])
else:
print("--retry_startup_delay argument provided without a value.")
sys.exit(-1)
if "--fragment_timeout" in sys.argv:
idx = sys.argv.index("--fragment_timeout") + 1
if idx < len(sys.argv):
RosbridgeWebSocket.fragment_timeout = int(sys.argv[idx])
else:
print("--fragment_timeout argument provided without a value.")
sys.exit(-1)
if "--delay_between_messages" in sys.argv:
idx = sys.argv.index("--delay_between_messages") + 1
if idx < len(sys.argv):
RosbridgeWebSocket.delay_between_messages = float(sys.argv[idx])
else:
print("--delay_between_messages argument provided without a value.")
sys.exit(-1)
if "--max_message_size" in sys.argv:
idx = sys.argv.index("--max_message_size") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.max_message_size = None
else:
RosbridgeWebSocket.max_message_size = int(value)
else:
print("--max_message_size argument provided without a value. (can be None or <Integer>)")
sys.exit(-1)
if "--unregister_timeout" in sys.argv:
idx = sys.argv.index("--unregister_timeout") + 1
if idx < len(sys.argv):
unregister_timeout = float(sys.argv[idx])
else:
print("--unregister_timeout argument provided without a value.")
sys.exit(-1)
if "--topics_glob" in sys.argv:
idx = sys.argv.index("--topics_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.topics_glob = []
else:
RosbridgeWebSocket.topics_glob = [element.strip().strip("'") for element in value[1:-1].split(',')]
else:
print("--topics_glob argument provided without a value. (can be None or a list)")
sys.exit(-1)
if "--services_glob" in sys.argv:
idx = sys.argv.index("--services_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.services_glob = []
else:
RosbridgeWebSocket.services_glob = [element.strip().strip("'") for element in value[1:-1].split(',')]
else:
print("--services_glob argument provided without a value. (can be None or a list)")
sys.exit(-1)
if "--params_glob" in sys.argv:
idx = sys.argv.index("--params_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.params_glob = []
else:
RosbridgeWebSocket.params_glob = [element.strip().strip("'") for element in value[1:-1].split(',')]
else:
print("--params_glob argument provided without a value. (can be None or a list)")
sys.exit(-1)
if ("--bson_only_mode" in sys.argv) or bson_only_mode:
RosbridgeWebSocket.bson_only_mode = bson_only_mode
# To be able to access the list of topics and services, you must be able to access the rosapi services.
if RosbridgeWebSocket.services_glob:
RosbridgeWebSocket.services_glob.append("/rosapi/*")
Subscribe.topics_glob = RosbridgeWebSocket.topics_glob
Advertise.topics_glob = RosbridgeWebSocket.topics_glob
Publish.topics_glob = RosbridgeWebSocket.topics_glob
AdvertiseService.services_glob = RosbridgeWebSocket.services_glob
UnadvertiseService.services_glob = RosbridgeWebSocket.services_glob
CallService.services_glob = RosbridgeWebSocket.services_glob
##################################################
# Done with parameter handling #
##################################################
application = Application([(r"/", RosbridgeWebSocket), (r"", RosbridgeWebSocket)])
connected = False
while not connected and not rospy.is_shutdown():
try:
if certfile is not None and keyfile is not None:
application.listen(port, address, ssl_options={ "certfile": certfile, "keyfile": keyfile})
else:
application.listen(port, address)
rospy.loginfo("Rosbridge WebSocket server started on port %d", port)
connected = True
except error as e:
rospy.logwarn("Unable to start server: " + str(e) +
" Retrying in " + str(retry_startup_delay) + "s.")
rospy.sleep(retry_startup_delay)
IOLoop.instance().start()
|
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
import types
from ..errors import ThriftCompilerError
__all__ = ['Scope']
class Scope(object):
"""Maintains the compilation state across steps.
The scope is not exported to the user directly. It's only used to maintain
state of known types and values during the compilation process and holds a
reference to the final generated module.
"""
__slots__ = (
'const_specs', 'type_specs', 'module', 'service_specs',
'included_scopes', 'path'
)
def __init__(self, name, path=None):
"""Initialize the scope.
:param name:
Name of the generated module.
"""
self.path = path
self.type_specs = {}
self.const_specs = {}
self.service_specs = {}
self.included_scopes = {}
self.module = types.ModuleType(str(name))
def __str__(self):
return "Scope(%r)" % {
'const_specs': self.const_specs,
'type_specs': self.type_specs,
'service_specs': self.service_specs,
'module': self.module,
}
__repr__ = __str__
def __in_path(self):
"""Helper for error messages to say "in $path" if the scope has a
non-none path.
"""
if self.path:
return ' in "%s"' % self.path
else:
return ''
def resolve_const_spec(self, name, lineno):
"""Finds and links the ConstSpec with the given name."""
if name in self.const_specs:
return self.const_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 1)
if include_name in self.included_scopes:
return self.included_scopes[include_name].resolve_const_spec(
component, lineno
)
raise ThriftCompilerError(
'Unknown constant "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
)
def resolve_type_spec(self, name, lineno):
"""Finds and links the TypeSpec with the given name."""
if name in self.type_specs:
return self.type_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 1)
if include_name in self.included_scopes:
return self.included_scopes[include_name].resolve_type_spec(
component, lineno
)
raise ThriftCompilerError(
'Unknown type "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
)
def resolve_service_spec(self, name, lineno):
"""Finds and links the ServiceSpec with the given name."""
if name in self.service_specs:
return self.service_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 2)
if include_name in self.included_scopes:
return self.included_scopes[
include_name
].resolve_service_spec(component, lineno)
raise ThriftCompilerError(
'Unknown service "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
)
def add_include(self, name, included_scope, module):
"""Register an imported module into this scope.
Raises ``ThriftCompilerError`` if the name has already been used.
"""
# The compiler already ensures this. If we still get here with a
# conflict, that's a bug.
assert name not in self.included_scopes
self.included_scopes[name] = included_scope
self.add_surface(name, module)
def add_service_spec(self, service_spec):
"""Registers the given ``ServiceSpec`` into the scope.
Raises ``ThriftCompilerError`` if the name has already been used.
"""
assert service_spec is not None
if service_spec.name in self.service_specs:
raise ThriftCompilerError(
'Cannot define service "%s". That name is already taken.'
% service_spec.name
)
self.service_specs[service_spec.name] = service_spec
def add_const_spec(self, const_spec):
"""Adds a ConstSpec to the compliation scope.
If the ConstSpec's ``save`` attribute is True, the constant will be
added to the module at the top-level.
"""
if const_spec.name in self.const_specs:
raise ThriftCompilerError(
'Cannot define constant "%s". That name is already taken.'
% const_spec.name
)
self.const_specs[const_spec.name] = const_spec
def add_surface(self, name, surface):
"""Adds a top-level attribute with the given name to the module."""
assert surface is not None
if hasattr(self.module, name):
raise ThriftCompilerError(
'Cannot define "%s". The name has already been used.' % name
)
setattr(self.module, name, surface)
def add_type_spec(self, name, spec, lineno):
"""Adds the given type to the scope.
:param str name:
Name of the new type
:param spec:
``TypeSpec`` object containing information on the type, or a
``TypeReference`` if this is meant to be resolved during the
``link`` stage.
:param lineno:
Line number on which this type is defined.
"""
assert type is not None
if name in self.type_specs:
raise ThriftCompilerError(
'Cannot define type "%s" at line %d. '
'Another type with that name already exists.'
% (name, lineno)
)
self.type_specs[name] = spec
|
|
"""Renewer tool.
Renewer tool handles autorenewal and autodeployment of renewed certs
within lineages of successor certificates, according to configuration.
.. todo:: Sanity checking consistency, validity, freshness?
.. todo:: Call new installer API to restart servers after deployment
"""
from __future__ import print_function
import argparse
import logging
import os
import sys
import OpenSSL
import zope.component
from letsencrypt import account
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import colored_logging
from letsencrypt import cli
from letsencrypt import client
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt import notify
from letsencrypt import storage
from letsencrypt.display import util as display_util
from letsencrypt.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
class _AttrDict(dict):
"""Attribute dictionary.
A trick to allow accessing dictionary keys as object attributes.
"""
def __init__(self, *args, **kwargs):
super(_AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def renew(cert, old_version):
"""Perform automated renewal of the referenced cert, if possible.
:param letsencrypt.storage.RenewableCert cert: The certificate
lineage to attempt to renew.
:param int old_version: The version of the certificate lineage
relative to which the renewal should be attempted.
:returns: A number referring to newly created version of this cert
lineage, or ``False`` if renewal was not successful.
:rtype: `int` or `bool`
"""
# TODO: handle partial success (some names can be renewed but not
# others)
# TODO: handle obligatory key rotation vs. optional key rotation vs.
# requested key rotation
if "renewalparams" not in cert.configfile:
# TODO: notify user?
return False
renewalparams = cert.configfile["renewalparams"]
if "authenticator" not in renewalparams:
# TODO: notify user?
return False
# Instantiate the appropriate authenticator
plugins = plugins_disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(_AttrDict(renewalparams))
# XXX: this loses type data (for example, the fact that key_size
# was an int, not a str)
config.rsa_key_size = int(config.rsa_key_size)
config.tls_sni_01_port = int(config.tls_sni_01_port)
config.namespace.http01_port = int(config.namespace.http01_port)
zope.component.provideUtility(config)
try:
authenticator = plugins[renewalparams["authenticator"]]
except KeyError:
# TODO: Notify user? (authenticator could not be found)
return False
authenticator = authenticator.init(config)
authenticator.prepare()
acc = account.AccountFileStorage(config).load(
account_id=renewalparams["account"])
le_client = client.Client(config, acc, authenticator, None)
with open(cert.version("cert", old_version)) as f:
sans = crypto_util.get_sans_from_cert(f.read())
new_certr, new_chain, new_key, _ = le_client.obtain_certificate(sans)
if new_chain:
# XXX: Assumes that there was a key change. We need logic
# for figuring out whether there was or not. Probably
# best is to have obtain_certificate return None for
# new_key if the old key is to be used (since save_successor
# already understands this distinction!)
return cert.save_successor(
old_version, OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, new_certr.body),
new_key.pem, crypto_util.dump_pyopenssl_chain(new_chain))
# TODO: Notify results
else:
# TODO: Notify negative results
return False
# TODO: Consider the case where the renewal was partially successful
# (where fewer than all names were renewed)
def _cli_log_handler(args, level, fmt): # pylint: disable=unused-argument
handler = colored_logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
return handler
def _paths_parser(parser):
add = parser.add_argument_group("paths").add_argument
add("--config-dir", default=cli.flag_default("config_dir"),
help=cli.config_help("config_dir"))
add("--work-dir", default=cli.flag_default("work_dir"),
help=cli.config_help("work_dir"))
add("--logs-dir", default=cli.flag_default("logs_dir"),
help="Path to a directory where logs are stored.")
return parser
def _create_parser():
parser = argparse.ArgumentParser()
#parser.add_argument("--cron", action="store_true", help="Run as cronjob.")
parser.add_argument(
"-v", "--verbose", dest="verbose_count", action="count",
default=cli.flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
return _paths_parser(parser)
def main(cli_args=sys.argv[1:]):
"""Main function for autorenewer script."""
# TODO: Distinguish automated invocation from manual invocation,
# perhaps by looking at sys.argv[0] and inhibiting automated
# invocations if /etc/letsencrypt/renewal.conf defaults have
# turned it off. (The boolean parameter should probably be
# called renewer_enabled.)
# TODO: When we have a more elaborate renewer command line, we will
# presumably also be able to specify a config file on the
# command line, which, if provided, should take precedence over
# te default config files
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
args = _create_parser().parse_args(cli_args)
uid = os.geteuid()
le_util.make_or_verify_dir(args.logs_dir, 0o700, uid)
cli.setup_logging(args, _cli_log_handler, logfile='renewer.log')
cli_config = configuration.RenewerConfiguration(args)
# Ensure that all of the needed folders have been created before continuing
le_util.make_or_verify_dir(cli_config.work_dir,
constants.CONFIG_DIRS_MODE, uid)
for renewal_file in os.listdir(cli_config.renewal_configs_dir):
print("Processing " + renewal_file)
try:
# TODO: Before trying to initialize the RenewableCert object,
# we could check here whether the combination of the config
# and the rc_config together disables all autorenewal and
# autodeployment applicable to this cert. In that case, we
# can simply continue and don't need to instantiate a
# RenewableCert object for this cert at all, which could
# dramatically improve performance for large deployments
# where autorenewal is widely turned off.
cert = storage.RenewableCert(renewal_file, cli_config)
except errors.CertStorageError:
# This indicates an invalid renewal configuration file, such
# as one missing a required parameter (in the future, perhaps
# also one that is internally inconsistent or is missing a
# required parameter). As a TODO, maybe we should warn the
# user about the existence of an invalid or corrupt renewal
# config rather than simply ignoring it.
continue
if cert.should_autorenew():
# Note: not cert.current_version() because the basis for
# the renewal is the latest version, even if it hasn't been
# deployed yet!
old_version = cert.latest_common_version()
renew(cert, old_version)
notify.notify("Autorenewed a cert!!!", "root", "It worked!")
# TODO: explain what happened
if cert.should_autodeploy():
cert.update_all_links_to(cert.latest_common_version())
# TODO: restart web server (invoke IInstaller.restart() method)
notify.notify("Autodeployed a cert!!!", "root", "It worked!")
# TODO: explain what happened
|
|
"""
Django settings for open_humans project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import logging
import os
import sys
from distutils import util # pylint: disable=no-name-in-module
import dj_database_url
import django_heroku
from env_tools import apply_env
def to_bool(env, default="false"):
"""
Convert a string to a bool.
"""
return bool(util.strtobool(os.getenv(env, default)))
# Apply the env in the .env file
apply_env()
# Detect when the tests are being run so we can disable certain features
TESTING = "test" in sys.argv
# ON_HEROKU should be true if we are running on heroku.
ON_HEROKU = to_bool("ON_HEROKU")
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PORT = os.getenv("PORT", 8000)
ENV = os.getenv("ENV", "development")
# ENV = 'staging'
DOMAIN = os.getenv("DOMAIN", "localhost:{}".format(PORT))
DEFAULT_HTTP_PROTOCOL = "http"
if ENV in ["production", "staging"]:
# For email template URLs
DEFAULT_HTTP_PROTOCOL = "https"
SECRET_KEY = os.getenv("SECRET_KEY")
DEBUG = to_bool("DEBUG")
OAUTH2_DEBUG = to_bool("OAUTH2_DEBUG")
# This is the default but we need it here to make migrations work
OAUTH2_PROVIDER_APPLICATION_MODEL = "oauth2_provider.Application"
LOG_EVERYTHING = to_bool("LOG_EVERYTHING")
DISABLE_CACHING = to_bool("DISABLE_CACHING")
ALLOW_TOKEN_REFRESH = to_bool("ALLOW_TOKEN_REFRESH")
# The number of hours after which a direct upload is assumed to be incomplete
# if the uploader hasn't hit the completion endpoint
INCOMPLETE_FILE_EXPIRATION_HOURS = 6
if os.getenv("CI_NAME") == "codeship":
DISABLE_CACHING = True
console_at_info = {"handlers": ["console"], "level": "INFO"}
null = {"handlers": ["null"]}
IGNORE_SPURIOUS_WARNINGS = to_bool("IGNORE_SPURIOUS_WARNINGS")
if LOG_EVERYTHING:
LOGGING = {
"disable_existing_loggers": False,
"version": 1,
"handlers": {"console": {"class": "logging.StreamHandler", "level": "DEBUG"}},
"loggers": {
"": {"handlers": ["console"], "level": "DEBUG", "propagate": False},
"django.db": {
# django also has database level logging
},
},
}
elif not TESTING:
LOGGING = {
"disable_existing_loggers": False,
"version": 1,
"formatters": {
"open-humans": {
"()": "open_humans.formatters.LocalFormat",
"format": "%(levelname)s %(asctime)s %(context)s %(message)s",
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "open-humans",
}
},
"loggers": {
"django.request": console_at_info,
# Log our modules at INFO
"common": console_at_info,
"data_import": console_at_info,
"open_humans": console_at_info,
"public_data": console_at_info,
},
}
else:
LOGGING = {
"disable_existing_loggers": True,
"version": 1,
"formatters": {},
"handlers": {"null": {"class": "logging.NullHandler"}},
"loggers": {
"django.request": null,
"common": null,
"data_import": null,
"open_humans": null,
"public_data": null,
},
}
if IGNORE_SPURIOUS_WARNINGS:
LOGGING["handlers"]["null"] = {"class": "logging.NullHandler"}
LOGGING["loggers"]["py.warnings"] = {"handlers": ["null"]}
if OAUTH2_DEBUG:
oauth_log = logging.getLogger("oauthlib")
oauth_log.addHandler(logging.StreamHandler(sys.stdout))
oauth_log.setLevel(logging.DEBUG)
ALLOWED_HOSTS = ["*"]
CELERY_BROKER_URL = os.getenv("REDIS_URL")
CELERY_TASK_SERIALIZER = "json"
MANAGERS = ()
ADMINS = ()
INSTALLED_APPS = (
"open_humans",
# Other local apps
"data_import",
"private_sharing",
"public_data",
"discourse",
# gulp integration
"django_gulp",
# Django built-ins
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.humanize",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# Third-party modules
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.apple",
"allauth.socialaccount.providers.facebook",
"allauth.socialaccount.providers.google",
"bootstrap_pagination",
"captcha",
"corsheaders",
# 'debug_toolbar.apps.DebugToolbarConfig',
"django_extensions",
"django_filters",
"django_forms_bootstrap",
"django_hash_filter",
"oauth2_provider",
"rest_framework",
"s3upload",
"sorl.thumbnail",
"waffle",
)
MIDDLEWARE = (
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
"django.middleware.security.SecurityMiddleware",
"open_humans.middleware.RedirectStealthToProductionMiddleware",
"open_humans.middleware.RedirectStagingToProductionMiddleware",
"django.middleware.cache.UpdateCacheMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
# Must come before AuthenticationMiddleware
"open_humans.middleware.QueryStringAccessTokenToBearerMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"oauth2_provider.middleware.OAuth2TokenMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"open_humans.middleware.AddMemberMiddleware",
"open_humans.middleware.CustomWaffleMiddleware",
"django.middleware.cache.FetchFromCacheMiddleware",
)
template_loaders = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
# Don't cache templates during development
if not DEBUG and not DISABLE_CACHING:
template_loaders = [("django.template.loaders.cached.Loader", template_loaders)]
NOBROWSER = to_bool("NOBROWSER", "false")
if TESTING:
from .testing import InvalidString # pylint: disable=wrong-import-position
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
"debug": DEBUG,
},
}
]
if os.getenv("BULK_EMAIL_TEMPLATE_DIR"):
TEMPLATES[0]["DIRS"].append(os.getenv("BULK_EMAIL_TEMPLATE_DIR"))
ROOT_URLCONF = "open_humans.urls"
WSGI_APPLICATION = "open_humans.wsgi.application"
# Use DATABASE_URL to do database setup, for a local Postgres database it would
# look like: postgres://localhost/database_name
DATABASES = {}
# Only override the default if there's a database URL specified
if os.getenv("CI_NAME") == "codeship":
DATABASES["default"] = {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "test",
"USER": os.getenv("PG_USER"),
"PASSWORD": os.getenv("PG_PASSWORD"),
"HOST": "127.0.0.1",
"PORT": 5434,
}
elif not ON_HEROKU and dj_database_url.config():
DATABASES["default"] = dj_database_url.config()
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = os.path.join(BASE_DIR, "static-files")
STATICFILES_DIRS = (
# Do this one manually since bootstrap wants it in ../fonts/
("fonts", os.path.join(BASE_DIR, "node_modules", "bootstrap", "dist", "fonts")),
("images", os.path.join(BASE_DIR, "static", "images")),
# Local apps
("public-data", os.path.join(BASE_DIR, "public_data", "static")),
("direct-sharing", os.path.join(BASE_DIR, "private_sharing", "static")),
os.path.join(BASE_DIR, "build"),
)
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
LOGIN_URL = "account_login"
LOGIN_REDIRECT_URL = "home"
AUTH_USER_MODEL = "open_humans.User"
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 8},
}
]
ACCOUNT_ADAPTER = "common.adapters.MyAccountAdapter"
ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS = True
# currently ignored due to custom User and ModelBackend (see above)
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_CONFIRM_EMAIL_ON_GET = False
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
ACCOUNT_EMAIL_CONFIRMATION_HMAC = True
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
ACCOUNT_PASSWORD_MIN_LENGTH = 8
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_VALIDATORS = "open_humans.models.ohusernamevalidators"
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USERNAME_BLACKLIST = ["admin", "administrator", "moderator", "openhuman"]
SOCIALACCOUNT_ADAPTER = "common.adapters.MySocialAccountAdapter"
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_EMAIL_VERIFICATION = False
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
"google": {"SCOPE": ["profile", "email"], "AUTH_PARAMS": {"access_type": "online"}},
"facebook": {
"METHOD": "oauth2",
"SCOPE": ["email", "public_profile"],
"AUTH_PARAMS": {"auth_type": "https"},
"INIT_PARAMS": {"cookie": True},
"FIELDS": [
"email",
"name",
"first_name",
"last_name",
"verified",
"locale",
"timezone",
],
"EXCHANGE_TOKEN": True,
"LOCALE_FUNC": "path.to.callable",
"VERIFIED_EMAIL": False,
"VERSION": "v2.12",
},
}
DEFAULT_FROM_EMAIL = "Open Humans <[email protected]>"
EMAIL_USE_TLS = True
EMAIL_HOST = "smtp.mailgun.org"
EMAIL_HOST_USER = "[email protected]"
EMAIL_HOST_PASSWORD = os.getenv("MAILGUN_PASSWORD")
EMAIL_PORT = 587
# Fall back to console emails for development without mailgun set.
if DEBUG and not EMAIL_HOST_PASSWORD:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# TODO: Collect these programatically
OAUTH2_PROVIDER = {
"SCOPES": {
"read": "Read Access",
"write": "Write Access",
"american-gut": "American Gut",
"go-viral": "GoViral",
"pgp": "Harvard Personal Genome Project",
"wildlife": "Wildlife of Our Homes",
"open-humans": "Open Humans",
},
"AUTHORIZATION_CODE_EXPIRE_SECONDS": 60 * 30,
"REQUEST_APPROVAL_PROMPT": "auto",
"ALLOWED_REDIRECT_URI_SCHEMES": [
"http",
"https",
# Redirect URIs that are using iOS or Android app-registered schema
"openhumanshk",
"resilienceproject",
]
+ [
x
for x in os.getenv("OAUTH2_PROVIDER_ALLOWED_REDIRECT_URI_SCHEMES", "").split(
","
)
if x
],
}
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"oauth2_provider.contrib.rest_framework.OAuth2Authentication",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 100,
"TEST_REQUEST_DEFAULT_FORMAT": "json",
}
# Settings for django-waffle.
WAFFLE_FLAG_MODEL = "open_humans.FeatureFlag"
# ModelBackend before allauth + our User -> iexact email/username login
AUTHENTICATION_BACKENDS = (
"oauth2_provider.backends.OAuth2Backend",
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
GO_VIRAL_MANAGEMENT_TOKEN = os.getenv("GO_VIRAL_MANAGEMENT_TOKEN")
DATA_PROCESSING_URL = os.getenv("DATA_PROCESSING_URL")
DEFAULT_FILE_STORAGE = "open_humans.storage.PrivateStorage"
# COLORSPACE and PRESERVE_FORMAT to avoid transparent PNG turning black, see
# https://stackoverflow.com/questions/26762180/sorl-thumbnail-generates-black-square-instead-of-image
THUMBNAIL_STORAGE = "open_humans.storage.PublicStorage"
THUMBNAIL_FORCE_OVERWRITE = True
THUMBNAIL_COLORSPACE = None
THUMBNAIL_PRESERVE_FORMAT = True
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = os.getenv("AWS_S3_STORAGE_BUCKET_NAME")
AWS_DEFAULT_ACL = None # This will become default in django-storages 2.0
LOG_BUCKET = os.getenv("LOG_BUCKET")
# Allow Cross-Origin requests (for our API integrations)
CORS_ORIGIN_ALLOW_ALL = True
# Custom CSRF Failure page
CSRF_FAILURE_VIEW = "open_humans.views.csrf_error"
# ...but only for the API URLs
CORS_URLS_REGEX = r"^/api/.*$"
SITE_ID = 1
# This way of setting the memcache options is advised by MemCachier here:
# https://devcenter.heroku.com/articles/memcachier#django
if ENV in ["production", "staging"]:
memcache_servers = os.getenv("MEMCACHIER_SERVERS", None)
memcache_username = os.getenv("MEMCACHIER_USERNAME", None)
memcache_password = os.getenv("MEMCACHIER_PASSWORD", None)
if memcache_servers and memcache_username and memcache_password:
CACHES = {
"default": {
# Use django-bmemcached
"BACKEND": "django_bmemcached.memcached.BMemcached",
# TIMEOUT is default expiration for keys; None disables expiration.
"TIMEOUT": None,
"LOCATION": memcache_servers,
"OPTIONS": {
"username": memcache_username,
"password": memcache_password,
}
}
}
if DISABLE_CACHING:
CACHES = {"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}}
CACHE_MIDDLEWARE_SECONDS = 30 * 60
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
TEST_RUNNER = "open_humans.OpenHumansDiscoverRunner"
# For redirecting staging URLs with production client IDs to production; this
# helps us transition new integrations from staging to production
PRODUCTION_CLIENT_IDS = os.getenv("PRODUCTION_CLIENT_IDS", "").split(" ")
PRODUCTION_URL = os.getenv("PRODUCTION_URL")
MAILCHIMP_API_KEY = os.getenv("MAILCHIMP_API_KEY")
MAILCHIMP_NEWSLETTER_LIST = os.getenv("MAILCHIMP_NEWSLETTER_LIST")
NOCAPTCHA = True
RECAPTCHA_PUBLIC_KEY = os.getenv("RECAPTCHA_PUBLIC_KEY", "")
RECAPTCHA_PRIVATE_KEY = os.getenv("RECAPTCHA_PRIVATE_KEY", "")
OHLOG_PROJECT_ID = os.getenv("OHLOG_PROJECT_ID", None)
ZAPIER_WEBHOOK_URL = os.getenv("ZAPIER_WEBHOOK_URL")
DISCOURSE_BASE_URL = os.getenv("DISCOURSE_BASE_URL", "https://forums.openhumans.org")
DISCOURSE_SSO_SECRET = os.getenv("DISCOURSE_SSO_SECRET")
MAX_UNAPPROVED_MEMBERS = int(os.getenv("MAX_UNAPPROVED_MEMBERS", "20"))
# Highlighted projects
PROJ_FEATURED = os.getenv("PROJ_FEATURED", None)
# The key used to communicate between this site and data-processing
PRE_SHARED_KEY = os.getenv("PRE_SHARED_KEY")
# Import settings from local_settings.py; these override the above
try:
# pylint: disable=wildcard-import,wrong-import-position
from local_settings import * # NOQA
except ImportError:
pass
if ON_HEROKU:
INSTALLED_APPS = INSTALLED_APPS + ("raven.contrib.django.raven_compat",)
RAVEN_CONFIG = {
"dsn": os.getenv("SENTRY_DSN"),
"processors": (
"common.processors.SanitizeEnvProcessor",
"raven.processors.SanitizePasswordsProcessor",
),
}
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
django_heroku.settings(locals())
|
|
import atexit
from ctypes import *
from ctypes.util import find_library
import sys
import os
import threading
# Determine the library path:
libredisLibPath = os.environ.get('LIBREDIS_SO_PATH')
if libredisLibPath is None:
libredisLibPath = find_library('redis')
if libredisLibPath is None:
raise ImportError('No libredis library available')
libredis = cdll.LoadLibrary(libredisLibPath)
# Create ctypes Connection struct:
class Struct_Connection(Structure):
_fields_ = [('addr', c_char * 255),
('serv', c_char * 20),
('addrinfo', c_void_p),
('sockfd', c_int),
('state', c_int),
('current_batch', c_void_p),
('current_executor', c_void_p),
('parser', c_void_p)]
# Set libredis c-library function parameters and return types (needed to make this work on 64bit):
libredis.Module_new.restype = c_void_p
libredis.Module_init.argtypes = [c_void_p]
libredis.Module_free.argtypes = [c_void_p]
libredis.Executor_new.restype = c_void_p
libredis.Executor_add.argtypes = [c_void_p, c_void_p, c_void_p]
libredis.Executor_execute.restype = c_int
libredis.Executor_execute.argtypes = [c_void_p, c_int]
libredis.Executor_free.argtypes = [c_void_p]
libredis.Connection_new.restype = POINTER(Struct_Connection)
libredis.Connection_new.argtypes = [c_char_p]
libredis.Connection_free.argtypes = [POINTER(Struct_Connection)]
libredis.Batch_new.restype = c_void_p
libredis.Batch_write.argtypes = [c_void_p, c_char_p, c_ulong, c_int]
#libredis.Batch_write_buffer.restype = c_void_p
#libredis.Batch_write_buffer.argtypes = [c_void_p]
libredis.Batch_free.argtypes = [c_void_p]
libredis.Batch_next_reply.argtypes = [c_void_p, c_void_p, POINTER(c_char_p), POINTER(c_ulong)]
#libredis.Buffer_dump.argtypes = [c_void_p, c_ulong]
libredis.Ketama_new.restype = c_void_p
libredis.Ketama_add_server.restype = c_int
libredis.Ketama_add_server.argtypes = [c_void_p, c_char_p, c_int, c_ulong]
libredis.Ketama_create_continuum.argtypes = [c_void_p]
#libredis.Ketama_print_continuum.argtypes = [c_void_p]
libredis.Ketama_get_server_ordinal.restype = c_int
libredis.Ketama_get_server_ordinal.argtypes = [c_void_p, c_char_p, c_ulong]
libredis.Ketama_get_server_address.restype = c_char_p
libredis.Ketama_get_server_address.argtypes = [c_void_p, c_int]
libredis.Ketama_free.argtypes = [c_void_p]
g_module = libredis.Module_new()
libredis.Module_init(g_module)
def g_Module_free():
libredis.Module_free(g_module)
atexit.register(g_Module_free)
DEFAULT_TIMEOUT_MS = 3000
class RedisError(Exception):
pass
class RedisConnectionError(Exception):
pass
class Executor(object):
def __init__(self):
self._executor = libredis.Executor_new()
def add(self, connection, batch):
libredis.Executor_add(self._executor, connection._connection, batch._batch)
def execute(self, timeout_ms = DEFAULT_TIMEOUT_MS):
libredis.Executor_execute(self._executor, timeout_ms)
def free(self):
libredis.Executor_free(self._executor)
self._executor = None
def __del__(self):
if self._executor is not None:
self.free()
class Connection(object):
# Connection states:
CS_CLOSED = 0
CS_CONNECTING = 1
CS_CONNECTED = 2
CS_ABORTED = 3
def __init__(self, addr):
self.addr = addr
self._connect()
def _connect(self):
self._connection = libredis.Connection_new(self.addr)
if not self._connection:
raise RedisConnectionError('Unable to connect')
def get(self, key, timeout_ms = DEFAULT_TIMEOUT_MS):
batch = Batch()
batch.write("GET %s\r\n" % key, 1)
return self._execute_simple(batch, timeout_ms)
def _execute_simple(self, batch, timeout_ms):
if not self._connection:
self._connect()
executor = Executor()
executor.add(self, batch)
executor.execute(timeout_ms)
try:
reply = Reply.from_next(batch).value
except RedisError as ex:
if self._getState() in (Connection.CS_CLOSED, Connection.CS_ABORTED):
self.free()
raise RedisConnectionError(ex.args[0])
else:
raise ex
else:
return reply
def _getState(self):
if self._connection:
return self._connection[0].state
else:
return Connection.CS_CLOSED
def free(self):
libredis.Connection_free(self._connection)
self._connection = None
def __del__(self):
if self._connection is not None:
self.free()
class ConnectionManager(object):
def __init__(self):
self._connectionsByThread = {}
def get_connection(self, addr):
thread_id = threading.current_thread().ident
if not thread_id in self._connectionsByThread:
self._connectionsByThread[thread_id] = {}
if not addr in self._connectionsByThread[thread_id]:
self._connectionsByThread[thread_id][addr] = Connection(addr)
return self._connectionsByThread[thread_id][addr]
class Reply(object):
RT_ERROR = -1
RT_NONE = 0
RT_OK = 1
RT_BULK_NIL = 2
RT_BULK = 3
RT_MULTIBULK_NIL = 4
RT_MULTIBULK = 5
RT_INTEGER = 6
def __init__(self, type, value):
self.type = type
self.value = value
def is_multibulk(self):
return self.type == self.RT_MULTIBULK
@classmethod
def from_next(cls, batch, raise_exception_on_error = True):
data = c_char_p()
rt = c_int()
datalen = c_ulong()
libredis.Batch_next_reply(batch._batch, byref(rt),byref(data), byref(datalen))
type = rt.value
#print repr(type)
if type in [cls.RT_OK, cls.RT_ERROR, cls.RT_BULK]:
value = string_at(data, datalen.value)
if type == cls.RT_ERROR and raise_exception_on_error:
raise RedisError(value)
elif type in [cls.RT_MULTIBULK]:
value = datalen.value
elif type in [cls.RT_BULK_NIL]:
value = None
elif type in [cls.RT_INTEGER]:
value = int(string_at(data, datalen.value))
else:
assert False
return Reply(type, value)
class Buffer(object):
def __init__(self, buffer):
self._buffer = buffer
#def dump(self, limit = 64):
# libredis.Buffer_dump(self._buffer, limit)
class Batch(object):
def __init__(self, cmd = '', nr_commands = 0):
self._batch = libredis.Batch_new()
if cmd or nr_commands:
self.write(cmd, nr_commands)
@classmethod
def constructUnifiedRequest(cls, argList):
req = '*%d\r\n' % (len(argList))
for arg in argList:
argStr = str(arg)
req += '$%d\r\n%s\r\n' % (len(argStr), argStr)
return req
def write(self, cmd = '', nr_commands = 0):
libredis.Batch_write(self._batch, cmd, len(cmd), nr_commands)
return self
def get(self, key):
req = Batch.constructUnifiedRequest(('GET', key))
return self.write(req, 1)
def set(self, key, value, expire = None):
req = ''
if expire:
req = Batch.constructUnifiedRequest(('SETEX', key, value, expire))
else:
req = Batch.constructUnifiedRequest(('SET', key, value))
return self.write(req, 1)
def next_reply(self):
return Reply.from_next(self)
# -- Disabled for now.
#@property
#def write_buffer(self):
# return Buffer(libredis.Batch_write_buffer(self._batch))
def free(self):
libredis.Batch_free(self._batch)
self._batch = None
def __del__(self):
if self._batch is not None:
self.free()
class Ketama(object):
def __init__(self):
self._ketama = libredis.Ketama_new()
def add_server(self, addr, weight):
libredis.Ketama_add_server(self._ketama, addr[0], addr[1], weight)
def create_continuum(self):
libredis.Ketama_create_continuum(self._ketama)
#def print_continuum(self):
# libredis.Ketama_print_continuum(self._ketama)
def get_server_ordinal(self, key):
return libredis.Ketama_get_server_ordinal(self._ketama, key, len(key))
def get_server_address(self, ordinal):
return libredis.Ketama_get_server_address(self._ketama, ordinal)
def free(self):
libredis.Ketama_free(self._ketama)
self._ketama = None
def __del__(self):
if self._ketama is not None:
self.free()
class Redis(object):
def __init__(self, server_hash, connection_manager):
self.server_hash = server_hash
self.connection_manager = connection_manager
self.retryCountOnConnectionError = 1
def _execute_simple(self, requests, server_key, timeout_ms = DEFAULT_TIMEOUT_MS):
retryCount = int(self.retryCountOnConnectionError)
server_addr = self.server_hash.get_server_address(self.server_hash.get_server_ordinal(server_key))
connection = self.connection_manager.get_connection(server_addr)
while True:
batch = Batch()
for req in requests:
batch.write(req, 1)
try:
return connection._execute_simple(batch, timeout_ms)
except RedisConnectionError as ex:
retryCount -= 1
if retryCount < 0:
raise ex
def setex(self, key, value, expire):
return self.set(key, value, expire)
def set(self, key, value, expire = None, server_key = None, timeout_ms = DEFAULT_TIMEOUT_MS):
if server_key is None: server_key = key
if expire:
req = Batch.constructUnifiedRequest(('SETEX', key, expire, value))
else:
req = Batch.constructUnifiedRequest(('SET', key, value))
return self._execute_simple((req,), server_key, timeout_ms)
def get(self, key, server_key = None, timeout_ms = DEFAULT_TIMEOUT_MS):
if server_key is None: server_key = key
req = Batch.constructUnifiedRequest(('GET', key))
return self._execute_simple((req,), server_key, timeout_ms)
def exists(self, key, server_key = None, timeout_ms = DEFAULT_TIMEOUT_MS):
if server_key is None: server_key = key
req = Batch.constructUnifiedRequest(('EXISTS', key))
return self._execute_simple((req,), server_key, timeout_ms)
def delete(self, key, server_key = None, timeout_ms = DEFAULT_TIMEOUT_MS):
if server_key is None: server_key = key
req = Batch.constructUnifiedRequest(('DEL', key))
return self._execute_simple((req,), server_key, timeout_ms)
def publish(self, channel, message, server_key = None, timeout_ms = DEFAULT_TIMEOUT_MS):
if server_key is None: server_key = channel
req = Batch.constructUnifiedRequest(('PUBLISH', channel, message))
return self._execute_simple((req,), server_key, timeout_ms)
def mget(self, *keys, **kwargs):
timeout_ms = kwargs.get('timeout_ms', DEFAULT_TIMEOUT_MS)
batchKeyLists = {}
#add all keys to batches
for key in keys:
server_ip = self.server_hash.get_server_address(self.server_hash.get_server_ordinal(key))
batchKeyList = batchKeyLists.get(server_ip, None)
if batchKeyList is None: #new batch
batchKeyList = []
batchKeyLists[server_ip] = batchKeyList
batchKeyList.append(key)
#finalize batches, and start executing
executor = Executor()
batchesWithKeys = []
for server_ip, batchKeyList in batchKeyLists.items():
batch = Batch()
batch.write(Batch.constructUnifiedRequest(['MGET'] + batchKeyList), 1)
connection = self.connection_manager.get_connection(server_ip)
executor.add(connection, batch)
batchesWithKeys.append((batch, batchKeyList))
#handle events until all complete
executor.execute(timeout_ms)
#build up results
results = {}
for (batch, keys) in batchesWithKeys:
#only expect 1 (multibulk) reply per batch
reply = batch.next_reply()
assert reply.is_multibulk()
for key in keys:
child = batch.next_reply()
value = child.value
results[key] = value
return results
|
|
#! /usr/bin/env python
"""
@file coi-services/ion/idk/data_set_agent/unit_test.py
@author Bill French
@brief Base classes for data set agent tests.
"""
import os
import time
import gevent
import shutil
import hashlib
from mi.core.log import get_logger ; log = get_logger()
import unittest
import re
from pprint import PrettyPrinter
from mi.core.unit_test import MiIntTestCase
from mi.core.unit_test import ParticleTestMixin
from ooi.reflection import EggCache
from mi.idk.util import remove_all_files
from mi.idk.unit_test import InstrumentDriverTestConfig
from mi.idk.exceptions import TestNotInitialized
from mi.idk.exceptions import IDKConfigMissing
from mi.idk.exceptions import IDKException
from mi.idk.exceptions import SampleTimeout
from mi.core.exceptions import ConfigurationException
from mi.core.exceptions import InstrumentParameterException
from mi.dataset.dataset_driver import DriverParameter
from mi.idk.unit_test import AgentCapabilityType
from mi.idk.result_set import ResultSet
from mi.idk.dataset.metadata import Metadata
from mi.idk.instrument_agent_client import InstrumentAgentClient
from mi.idk.instrument_agent_client import InstrumentAgentDataSubscribers
from mi.idk.instrument_agent_client import InstrumentAgentEventSubscribers
from mi.dataset.dataset_driver import DataSourceConfigKey, DriverParameter
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.core.instrument.instrument_driver import DriverEvent
from interface.objects import ResourceAgentConnectionLostErrorEvent
from pyon.core.exception import Conflict
from pyon.core.exception import ResourceError, BadRequest, ServerError
from pyon.agent.agent import ResourceAgentState
from pyon.agent.agent import ResourceAgentEvent
from interface.objects import AgentCommandResult
from interface.objects import AgentCommand
from interface.objects import AgentCapability
from interface.objects import CapabilityType
class DataSetTestConfig(InstrumentDriverTestConfig):
"""
Singleton driver test config object.
"""
agent_module = 'mi.idk.instrument_agent'
agent_class = 'DatasetAgent'
container_deploy_file = 'deploy/r2qual.yml'
publisher_deploy_file = 'deploy/r2pub.yml'
def initialize(self, *args, **kwargs):
super(DataSetTestConfig, self).initialize(*args, **kwargs)
log.debug("Dataset Agent Test Config:")
for property, value in vars(self).iteritems():
log.debug("key: %s, value: %s", property, value)
log.debug("Dataset Agent Test Initialized")
def initialize_ingester_test(self, directory, runtime):
self.ingestion_directory = directory
self.ingestion_runtime = runtime
class DataSetTestCase(MiIntTestCase):
"""
Base class for instrument driver tests
"""
# configuration singleton
test_config = DataSetTestConfig()
@classmethod
def initialize(cls, *args, **kwargs):
"""
Initialize the test_configuration singleton
"""
cls.test_config.initialize(*args,**kwargs)
def setUp(self):
"""
@brief Setup test cases.
"""
log.debug("*********************************************************************")
log.debug("Starting Dataset Test %s", self._testMethodName)
log.debug("*********************************************************************")
log.debug("ID: %s", self.id())
log.debug("DataSetTestCase setUp")
# Test to ensure we have initialized our test config
if not self.test_config.initialized:
return TestNotInitialized(msg="Tests non initialized. Missing DataSetTestCase.initialize(...)?")
log.debug("Driver Config: %s", self._driver_config())
self._metadata = None
self.clear_sample_data()
def _driver_config(self):
"""
Build the driver configuration and return it
"""
config = {
'dvr_mod' : self.test_config.driver_module,
'dvr_cls' : self.test_config.driver_class,
'startup_config' : self.test_config.driver_startup_config
}
return config
def _agent_config(self):
"""
Build the agent configuration and return it
"""
config = {
'driver_config': self._driver_config(),
'stream_config' : self.data_subscribers.stream_config,
'agent': {'resource_id': self.test_config.agent_resource_id}
}
return config
def _get_metadata(self):
"""
Get a metadata object for the test. We will use the module name to try and determine the
driver path.
"""
if self._metadata is None:
log.debug("Metadata self name: %s", self.__class__)
regex = re.compile(r'.*mi.dataset\.driver\.(.*)\.test\.')
match = regex.match(str(self.__class__))
if match:
driver_path = match.group(1)
d = driver_path.replace(".", "/")
log.debug("Driver path: %s", d)
self._metadata = Metadata(d)
else:
self.fail(IDKException("Unable to determine driver path"))
return self._metadata
def _get_source_data_file(self, filename):
"""
Search for a sample data file, first check the driver resource directory
then just use the filename as a path. If the file doesn't exists
raise an exception
@param filename name or path of the file to search for
@return full path to the found data file
@raise IDKException if the file isn't found
"""
resource_dir = self._get_metadata().resource_dir()
source_path = os.path.join(resource_dir, filename)
log.debug("Search for resource file (%s) in %s", filename, resource_dir)
if os.path.isfile(source_path):
log.debug("Found %s in resource directory", filename)
return source_path
log.debug("Search for resource file (%s) in current directory", filename)
if os.path.isfile(filename):
log.debug("Found %s in the current directory", filename)
return filename
raise IDKException("Data file %s does not exist", filename)
def create_data_dir(self):
"""
Verify the test data directory is created and exists. Return the path to
the directory.
@return: path to data directory
@raise: IDKConfigMissing no harvester config
@raise: IDKException if data_dir exists, but not a directory
"""
startup_config = self._driver_config().get('startup_config')
if not startup_config:
raise IDKConfigMissing("Driver config missing 'startup_config'")
harvester_config = startup_config.get(DataSourceConfigKey.HARVESTER)
if not harvester_config:
raise IDKConfigMissing("Startup config missing 'harvester' config")
if DataSetDriverConfigKeys.DIRECTORY in harvester_config:
# there is just one harvester config
data_dir = harvester_config.get(DataSetDriverConfigKeys.DIRECTORY)
if not data_dir:
raise IDKConfigMissing("Harvester config missing 'directory'")
if not os.path.exists(data_dir):
log.debug("Creating data dir: %s", data_dir)
os.makedirs(data_dir)
elif not os.path.isdir(data_dir):
raise IDKException("'data_dir' is not a directory")
else:
# return an array of dirs if there are multiple harvester configs
data_dir = []
for key in harvester_config:
this_dir = harvester_config[key].get(DataSetDriverConfigKeys.DIRECTORY)
if not this_dir:
raise IDKConfigMissing("Harvester config missing 'directory'")
if not os.path.exists(this_dir):
log.debug("Creating data dir: %s", this_dir)
os.makedirs(this_dir)
elif not os.path.isdir(this_dir):
raise IDKException("'data_dir' is not a directory")
data_dir.append(this_dir)
return data_dir
def get_data_storage_dir(self):
"""
Verify the test data directory is created and exists. Return the path to
the directory.
@return: path to data directory
@raise: IDKConfigMissing no harvester config
@raise: IDKException if data_dir exists, but not a directory
"""
startup_config = self._driver_config().get('startup_config')
if not startup_config:
raise IDKConfigMissing("Driver config missing 'startup_config'")
harvester_config = startup_config.get(DataSourceConfigKey.HARVESTER)
if not harvester_config:
raise IDKConfigMissing("Startup config missing 'harvester' config")
log.debug("Harvester config: %s", harvester_config)
data_dir = harvester_config.get("storage_directory")
log.debug("Data dir: %s", data_dir)
return data_dir
def remove_sample_dir(self):
"""
Remove the sample dir and all files
"""
data_dir = self.create_data_dir()
self.clear_sample_data()
if isinstance(data_dir, list):
for d_dir in data_dir:
# confirm this path exists in case we configure two parsers to look at the same dir
if os.path.exists(d_dir):
os.rmdir(d_dir)
else:
os.rmdir(data_dir)
def clear_sample_data(self):
"""
Remove all files from the sample data directory
"""
data_dir = self.create_data_dir()
stored_data_dir = self.get_data_storage_dir()
log.debug("Clean all data from %s", data_dir)
if isinstance(data_dir, list):
for d_dir in data_dir:
remove_all_files(d_dir)
else:
remove_all_files(data_dir)
# storage dir might not be in use
log.debug("Clean all data from %s", stored_data_dir)
if stored_data_dir:
if isinstance(stored_data_dir, list):
for d_dir in stored_data_dir:
if os.path.isdir(d_dir):
remove_all_files(d_dir)
elif os.path.isdir(stored_data_dir):
remove_all_files(stored_data_dir)
def create_sample_data(self, filename, dest_filename=None, mode=0644, create=True, copy_metadata=True):
"""
Search for a data file in the driver resource directory and if the file
is not found there then search using the filename directly. Then copy
the file to the test data directory.
If a dest_filename is supplied it will be renamed in the destination
directory.
@param: filename - filename or path to a data file to copy
@param: dest_filename - name of the file when copied. default to filename
@param: file mode
@param: create an empty file in the destination if the source is not found
@param: copy_metadata - True to copy file metadata false to not copy metadata
@return: path to file created
"""
data_dir = self.create_data_dir()
source_path = None
try:
source_path = self._get_source_data_file(filename)
except IDKException as e:
if not create:
self.fail(e)
log.debug("DIR: %s", data_dir)
if dest_filename is None and source_path is not None:
dest_path = os.path.join(data_dir, os.path.basename(source_path))
elif dest_filename is None and source_path is None:
dest_path = os.path.join(data_dir, filename)
else:
dest_path = os.path.join(data_dir, dest_filename)
log.debug("Creating data file src: %s, dest: %s", source_path, dest_path)
if source_path == None:
file = open(dest_path, 'w')
file.close()
else:
if copy_metadata:
# copy the file and its metadata
# this leaves the file modification time the same as the original file
shutil.copy2(source_path, dest_path)
else:
# copy the just the data
# this changes the file modification time to the time of the copy
shutil.copy(source_path, dest_path)
os.chmod(dest_path, mode)
return dest_path
def create_sample_data_set_dir(self, filename, dest_dir, dest_filename=None,
mode=0644, create=True, copy_metadata=True):
"""
Search for a data file in the driver resource directory and if the file
is not found there then search using the filename directly. Then copy
the file to the test data directory.
If a dest_filename is supplied it will be renamed in the destination
directory.
@param: filename - filename or path to a data file to copy
@param: dest_filename - name of the file when copied. default to filename
@param: file mode
@param: create an empty file in the destination if the source is not found
@param: copy_metadata - True to copy file metadata false to not copy metadata
@return: path to file created
"""
if not os.path.exists(dest_dir):
log.debug("Creating data dir: %s", dest_dir)
os.makedirs(dest_dir)
elif not os.path.isdir(dest_dir):
raise IDKException("'dest_dir' is not a directory")
source_path = None
try:
source_path = self._get_source_data_file(filename)
except IDKException as e:
if not create:
self.fail(e)
log.debug("DIR: %s", dest_dir)
if dest_filename is None and source_path is not None:
dest_path = os.path.join(dest_dir, os.path.basename(source_path))
elif dest_filename is None and source_path is None:
dest_path = os.path.join(dest_dir, filename)
else:
dest_path = os.path.join(dest_dir, dest_filename)
log.debug("Creating data file src: %s, dest: %s", source_path, dest_path)
if source_path == None:
file = open(dest_path, 'w')
file.close()
else:
if copy_metadata:
# copy the file and its metadata
# this leaves the file modification time the same as the original file
shutil.copy2(source_path, dest_path)
else:
# copy the just the data
# this changes the file modification time to the time of the copy
shutil.copy(source_path, dest_path)
os.chmod(dest_path, mode)
return dest_path
def get_file_state(self, path, ingested = False, position = None):
"""
Create a state object for a file. If a position is passed then add a parser state as well.
"""
mod_time = os.path.getmtime(path)
file_size = os.path.getsize(path)
with open(path) as filehandle:
md5_checksum = hashlib.md5(filehandle.read()).hexdigest()
parser_state = {}
if position:
parser_state = {'position': position}
return {
'ingested': ingested,
'file_mod_date': mod_time,
'file_checksum': md5_checksum,
'file_size': file_size,
'parser_state': parser_state
}
class DataSetUnitTestCase(DataSetTestCase):
"""
Base class for dataset driver unit tests
"""
def setUp(self):
super(DataSetUnitTestCase, self).setUp()
class DataSetIntegrationTestCase(DataSetTestCase):
"""
Base class for dataset driver integration unit tests
"""
def state_callback(self, state):
log.debug("State callback: %s", state)
self.state_callback_result.append(state)
def data_callback(self, data):
log.debug("Data callback: %s", data)
if not isinstance(data, list):
data = [data]
for d in data:
self.data_callback_result.append(d)
def event_callback(self, **kwargs):
log.debug("Event callback: %s", kwargs, exc_info=True)
self.event_callback_result.append(kwargs)
def exception_callback(self, ex):
log.debug("Exception callback: %s", ex, exc_info=True)
self.exception_callback_result.append(ex)
def setUp(self):
super(DataSetIntegrationTestCase, self).setUp()
self.state_callback_result = []
self.data_callback_result = []
self.event_callback_result = []
self.exception_callback_result = []
self.memento = {}
self.driver = self._get_driver_object()
self.addCleanup(self._stop_driver)
def _get_driver_object(self, **kwargs):
config = kwargs.get('config', self._driver_config()['startup_config'])
memento = kwargs.get('memento', self.memento)
data_callback = kwargs.get('data_callback', self.data_callback)
state_callback = kwargs.get('state_callback', self.state_callback)
event_callback = kwargs.get('event_callback', self.event_callback)
exception_callback = kwargs.get('exception_callback', self.exception_callback)
module_object = __import__(self.test_config.driver_module, fromlist=[self.test_config.driver_class])
class_object = getattr(module_object, self.test_config.driver_class)
driver = class_object(config, memento, data_callback, state_callback, event_callback, exception_callback)
return driver
def _stop_driver(self):
if self.driver:
self.driver.shutdown()
def clear_async_data(self):
self.state_callback_result = []
self.data_callback_result = []
self.event_callback_result = []
self.exception_callback_result = []
def assert_exception(self, exception_class, timeout=35):
"""
Wait for an exception in the exception callback queue
"""
to = gevent.Timeout(timeout)
to.start()
done = False
try:
while not done:
for exp in self.exception_callback_result:
if isinstance(exp, exception_class):
log.info("Expected exception detected: %s", exp)
done = True
if not done:
log.debug("No exception detected yet, sleep for a bit")
gevent.sleep(1)
except gevent.Timeout:
log.error("Failed to detect exception %s", exception_class)
self.fail("Exception detection failed.")
finally:
to.cancel()
def assert_event(self, event_class_str, timeout=35):
"""
Wait for an event in the event callback queue
"""
to = gevent.Timeout(timeout)
to.start()
done = False
try:
while not done:
for exp in self.event_callback_result:
if 'event_type' in exp and exp.get('event_type') == event_class_str:
log.info("Expected event detected: %s", exp)
done = True
if not done:
log.debug("No event detected yet, sleep for a bit")
gevent.sleep(1)
except gevent.Timeout:
log.error("Failed to detect event %s", event_class_str)
self.fail("Event detection failed.")
finally:
to.cancel()
def assert_data(self, particle_class=None, result_set_file=None, count=1, timeout=10):
"""
Wait for a data particle in the data callback queue
@param particle_class, class of the expected data particles
@param result_set_file, filename containing definition of the resulting dataset
@param count, how many records to wait for
@param timeout, how long to wait for the records.
"""
particles = self.get_samples(particle_class, count, timeout)
if len(particles) == count:
# Verify the data against the result data set definition
if result_set_file:
rs_file = self._get_source_data_file(result_set_file)
rs = ResultSet(rs_file)
self.assertTrue(rs.verify(particles), msg="Failed data validation, check the logs.")
else:
log.error("%d particles were requested but only %d were found within the timeout of %d seconds",
count, len(particles), timeout)
self.fail("%d particles were requested but only %d were found within the timeout of %d seconds" %
(count, len(particles), timeout))
def assert_file_ingested(self, filename, data_source_key=None):
"""
Assert that a particular file was ingested (useable by Single Directory driver, not Single File driver),
If the ingested flag is not set in the driver state for this file, fail the test
@ param filename name of the file to check that it was ingested using the ingested flag
"""
log.debug("last state callback result %s", self.state_callback_result[-1])
if data_source_key is None:
last_state = self.state_callback_result[-1]
else:
last_state = self.state_callback_result[-1][data_source_key]
if not filename in last_state or not last_state[filename]['ingested']:
self.fail("File %s was not ingested" % filename)
def assert_file_not_ingested(self, filename, data_source_key=None):
"""
Assert that a particular file was not ingested (useable by Single Directory driver, not Single File driver),
If the ingested flag is set in the driver state for this file, fail the test
@ param filename name of the file to check that it was ingested using the ingested flag
"""
log.debug("last state callback result %s", self.state_callback_result[-1])
if data_source_key is None:
last_state = self.state_callback_result[-1]
else:
last_state = self.state_callback_result[-1][data_source_key]
if filename in last_state and last_state[filename]['ingested']:
self.fail("File %s was ingested when we expected it not to be" % filename)
def get_samples(self, particle_class=None, count=1, timeout=10):
"""
pop samples of the specified class from the data callback result queue
@param particle_class None, a single particle class or a tuple of classes
@param count the number of particles to return
@param timeout how many seconds to wait for the specified number of particles
"""
to = gevent.Timeout(timeout)
to.start()
result = []
found = 0
done = False
try:
while(not done):
check_idx = 0
# enumerate does not necessarily keep the particles in order as we remove other samples,
# loop over data callback results starting with lowest index
while len(self.data_callback_result) > 0 and found < count and \
check_idx < len(self.data_callback_result):
data = self.data_callback_result[check_idx]
if particle_class is None or isinstance(data, particle_class):
found += 1
result.append(self.data_callback_result.pop(check_idx))
log.trace("Found sample index %d, #%d", check_idx, found)
else:
# skip past a particle that doesn't match our particle class
check_idx += 1
if found >= count:
log.debug("All done. %d >= %d", found, count)
done = True
break
# in case we have lots of callback results to check lets sleep
gevent.sleep(0)
# data_callback_result may get updated while counting particles, check again
if not done and self.data_callback_result == []:
log.debug("No particle detected yet, sleep for a bit")
gevent.sleep(1)
except gevent.Timeout:
log.error("Failed to detect particle %s, expected %d particles, found %d", particle_class, count, found)
result = []
finally:
to.cancel()
log.debug("Samples found: %d, %s", len(result), result)
return result
###
# Common integration tests
###
def test_harvester_config_exception(self):
"""
Start the a driver with a bad configuration. Should raise
an exception.
"""
with self.assertRaises(ConfigurationException):
self._get_driver_object(config={})
def test_harvester_new_file_exception(self):
"""
Test an exception raised after the driver is started during
the file read. Should call the exception callback.
"""
self.clear_sample_data()
harvester_config = self._driver_config()['startup_config'][DataSourceConfigKey.HARVESTER]
if DataSetDriverConfigKeys.PATTERN in harvester_config:
pattern = harvester_config[DataSetDriverConfigKeys.PATTERN]
filename = pattern.replace("*", "foo")
self.assertIsNotNone(pattern)
# create the file so that it is unreadable
self.create_sample_data(filename, create=True, mode=000)
# Start sampling and watch for an exception
self.driver.start_sampling()
self.assert_exception(IOError)
else:
# Start sampling and watch for an exceptions
self.driver.start_sampling()
# there are multiple harvester configs, test each one
for key in harvester_config:
pattern = harvester_config[key][DataSetDriverConfigKeys.PATTERN]
filename = pattern.replace("*", "foo")
file_dir = harvester_config[key][DataSetDriverConfigKeys.DIRECTORY]
self.assertIsNotNone(pattern)
self.assertIsNotNone(file_dir)
# create the file so that it is unreadable
self.create_sample_data_set_dir(filename, file_dir, create=True, mode=000)
self.assert_exception(IOError)
# clear out exceptions so we know we get a new one next key
self.clear_async_data()
def test_parameters(self):
"""
Verify that we can get, set, and report all driver parameters.
"""
expected_params = [DriverParameter.BATCHED_PARTICLE_COUNT,
DriverParameter.PUBLISHER_POLLING_INTERVAL,
DriverParameter.RECORDS_PER_SECOND]
(res_cmds, res_params) = self.driver.get_resource_capabilities()
# Ensure capabilities are as expected
self.assertEqual(len(res_cmds), 1)
self.assertEqual(len(res_params), len(expected_params))
self.assertEqual(sorted(res_params), sorted(expected_params))
# Verify default values are as expected.
params = self.driver.get_resource(DriverParameter.ALL)
log.debug("Get Resources Result: %s", params)
self.assertEqual(params[DriverParameter.BATCHED_PARTICLE_COUNT], 1)
self.assertEqual(params[DriverParameter.PUBLISHER_POLLING_INTERVAL], 1)
self.assertEqual(params[DriverParameter.RECORDS_PER_SECOND], 60)
# Try set resource individually
self.driver.set_resource({DriverParameter.BATCHED_PARTICLE_COUNT: 2})
self.driver.set_resource({DriverParameter.PUBLISHER_POLLING_INTERVAL: 2})
self.driver.set_resource({DriverParameter.RECORDS_PER_SECOND: 59})
params = self.driver.get_resource(DriverParameter.ALL)
log.debug("Get Resources Result: %s", params)
self.assertEqual(params[DriverParameter.BATCHED_PARTICLE_COUNT], 2)
self.assertEqual(params[DriverParameter.PUBLISHER_POLLING_INTERVAL], 2)
self.assertEqual(params[DriverParameter.RECORDS_PER_SECOND], 59)
# Try set resource in bulk
self.driver.set_resource(
{DriverParameter.BATCHED_PARTICLE_COUNT: 1,
DriverParameter.PUBLISHER_POLLING_INTERVAL: .1,
DriverParameter.RECORDS_PER_SECOND: 60})
params = self.driver.get_resource(DriverParameter.ALL)
log.debug("Get Resources Result: %s", params)
self.assertEqual(params[DriverParameter.BATCHED_PARTICLE_COUNT], 1)
self.assertEqual(params[DriverParameter.PUBLISHER_POLLING_INTERVAL], .1)
self.assertEqual(params[DriverParameter.RECORDS_PER_SECOND], 60)
# Set with some bad values
with self.assertRaises(InstrumentParameterException):
self.driver.set_resource({DriverParameter.BATCHED_PARTICLE_COUNT: 'a'})
with self.assertRaises(InstrumentParameterException):
self.driver.set_resource({DriverParameter.BATCHED_PARTICLE_COUNT: -1})
with self.assertRaises(InstrumentParameterException):
self.driver.set_resource({DriverParameter.BATCHED_PARTICLE_COUNT: 0})
# Try to configure with the driver startup config
driver_config = self._driver_config()['startup_config']
cfg = {
DataSourceConfigKey.RESOURCE_ID: 'xxxxxxxx',
DataSourceConfigKey.HARVESTER: driver_config.get(DataSourceConfigKey.HARVESTER),
DataSourceConfigKey.PARSER: driver_config.get(DataSourceConfigKey.PARSER),
DataSourceConfigKey.DRIVER: {
DriverParameter.PUBLISHER_POLLING_INTERVAL: .2,
DriverParameter.RECORDS_PER_SECOND: 3,
DriverParameter.BATCHED_PARTICLE_COUNT: 3
}
}
self.driver = self._get_driver_object(config=cfg)
params = self.driver.get_resource(DriverParameter.ALL)
log.debug("Get Resources Result: %s", params)
self.assertEqual(params[DriverParameter.BATCHED_PARTICLE_COUNT], 3)
self.assertEqual(params[DriverParameter.PUBLISHER_POLLING_INTERVAL], .2)
self.assertEqual(params[DriverParameter.RECORDS_PER_SECOND], 3)
# Finally verify we get a KeyError when sending in bad config keys
cfg[DataSourceConfigKey.DRIVER] = {
DriverParameter.PUBLISHER_POLLING_INTERVAL: .2,
DriverParameter.RECORDS_PER_SECOND: 3,
DriverParameter.BATCHED_PARTICLE_COUNT: 3,
'something_extra': 1
}
with self.assertRaises(KeyError):
self._get_driver_object(config=cfg)
def test_schema(self):
"""
Test the driver schema
"""
config_json = self.driver.get_config_metadata()
log.debug("config: %s", PrettyPrinter().pformat(config_json))
###
# Driver
###
driver = config_json.get('driver')
self.assertEqual(driver, {})
###
# Commands
###
cmds = config_json.get('commands')
self.assertIsNotNone(cmds)
self.assertIsNotNone(cmds.get(DriverEvent.START_AUTOSAMPLE))
self.assertIsNotNone(cmds.get(DriverEvent.STOP_AUTOSAMPLE))
###
# Parameters
###
params = config_json.get('parameters')
self.assertIsNotNone(params)
self.assertIsNotNone(params.get(DriverParameter.RECORDS_PER_SECOND))
self.assertIsNotNone(params.get(DriverParameter.PUBLISHER_POLLING_INTERVAL))
self.assertIsNotNone(params.get(DriverParameter.BATCHED_PARTICLE_COUNT))
class DataSetAgentTestCase(DataSetTestCase):
"""
Base class for dataset driver unit tests
"""
def setUp(self):
"""
Startup the container and start the agent.
"""
super(DataSetAgentTestCase, self).setUp()
self.instrument_agent_manager = InstrumentAgentClient()
self.instrument_agent_manager.start_container(deploy_file=self.test_config.container_deploy_file)
self.container = self.instrument_agent_manager.container
log.debug("Packet Config: %s", self.test_config.agent_packet_config)
self.data_subscribers = InstrumentAgentDataSubscribers(
packet_config=self.test_config.agent_packet_config,
)
self.event_subscribers = InstrumentAgentEventSubscribers(instrument_agent_resource_id=self.test_config.agent_resource_id)
self.init_dataset_agent_client()
self.event_subscribers.events_received = []
self.data_subscribers.start_data_subscribers()
log.debug("********* setUp complete. Begin Testing *********")
self.addCleanup(self._end_test)
def _end_test(self):
"""
Cleanup after the test completes or fails
"""
log.debug("Starting test cleanup")
#self.assert_reset()
self.event_subscribers.stop()
self.data_subscribers.stop_data_subscribers()
self.instrument_agent_manager.stop_container()
log.debug("Test complete and all cleaned up.")
def init_dataset_agent_client(self, bootmode=None):
self.set_dsa_client(self.get_dataset_agent_client(bootmode))
log.debug("DSA Client. Result: %s", self.dataset_agent_client)
def get_dataset_agent_client(self, bootmode=None, config=None, resource_id=None, agent_name=None):
log.info("Start Dataset Agent Client")
if config is None:
config = self._agent_config()
if resource_id is None:
resource_id = self.test_config.agent_resource_id
if agent_name is None:
agent_name = self.test_config.agent_name
# Start instrument agent client.
result = self.instrument_agent_manager.start_client(
name=agent_name,
module=self.test_config.agent_module,
cls=self.test_config.agent_class,
config=config,
resource_id=resource_id,
deploy_file=self.test_config.container_deploy_file,
bootmode=bootmode
)
log.debug("DSA Initialized. Result: %s", result)
return self.instrument_agent_manager.instrument_agent_client
def set_dsa_client(self, client):
self.dataset_agent_client = client
def get_dsa_client(self):
return self.dataset_agent_client
def stop_dataset_agent_client(self):
log.debug("Stopping dataset agent. ff")
self.instrument_agent_manager.stop_client()
def get_samples(self, stream_name, sample_count = 1, timeout = 10):
"""
listen on a stream until 'sample_count' samples are read and return
a list of all samples read. If the required number of samples aren't
read then throw an exception.
Note that this method does not clear the sample queue for the stream.
This should be done explicitly by the caller. However, samples that
are consumed by this method are removed.
@raise SampleTimeout - if the required number of samples aren't read
"""
result = []
start_time = time.time()
i = 1
log.debug("Fetch %d sample(s) from stream '%s'" % (sample_count, stream_name))
while(len(result) < sample_count):
if(self.data_subscribers.samples_received.has_key(stream_name) and
len(self.data_subscribers.samples_received.get(stream_name))):
log.trace("get_samples() received sample #%d!", i)
result.append(self.data_subscribers.samples_received[stream_name].pop(0))
log.debug('Popping received sample')
i += 1
# Check for timeout
if(start_time + timeout < time.time()):
raise SampleTimeout("DataSetQualificationTestCase.get_samples")
if(not self.data_subscribers.samples_received.has_key(stream_name) or
len(self.data_subscribers.samples_received.get(stream_name)) == 0):
log.debug("No samples in queue, sleep for a bit")
gevent.sleep(.2)
log.debug("get_samples() complete. returning %d records", sample_count)
return result
def assert_sample_queue_size(self, stream_name, size):
"""
Verify that a queue has size samples in it.
"""
if(not self.data_subscribers.samples_received.has_key(stream_name) and size == 0):
return
self.assertTrue(self.data_subscribers.samples_received.has_key(stream_name), msg="Sample queue does not exists")
self.assertEqual(len(self.data_subscribers.samples_received.get(stream_name)), size)
def assert_data_values(self, particles, dataset_definition_file):
"""
Verify particles match the particles defined in the definition file
"""
rs_file = self._get_source_data_file(dataset_definition_file)
rs = ResultSet(rs_file)
self.assertTrue(rs.verify(particles))
def assert_initialize(self, final_state = ResourceAgentState.STREAMING):
'''
Walk through DSA states to get to streaming mode from uninitialized
'''
log.debug("Initialize DataSet agent, %s", self.dataset_agent_client)
cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.INACTIVE)
log.info("Sent INITIALIZE; DSA state = %s", state)
log.debug("DataSet agent go active")
cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent GO_ACTIVE; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.IDLE)
log.debug("DataSet agent run")
cmd = AgentCommand(command=ResourceAgentEvent.RUN)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent RUN; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.COMMAND)
if final_state == ResourceAgentState.STREAMING:
self.assert_start_sampling()
def assert_stop_sampling(self):
'''
transition to command. Must be called from streaming
'''
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.STREAMING)
log.debug("DataSet agent stop sampling")
cmd = AgentCommand(command=DriverEvent.STOP_AUTOSAMPLE)
retval = self.dataset_agent_client.execute_resource(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent STOP SAMPLING; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.COMMAND)
def assert_start_sampling(self):
'''
transition to sampling. Must be called from command
:rtype : object
'''
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
log.debug("DataSet agent start sampling")
cmd = AgentCommand(command=DriverEvent.START_AUTOSAMPLE)
retval = self.dataset_agent_client.execute_resource(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent START SAMPLING; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.STREAMING)
def assert_reset(self):
'''
Put the instrument back in uninitialized
'''
agent_state = self.dataset_agent_client.get_agent_state()
log.debug("Resetting agent: current state: %s", agent_state)
if agent_state != ResourceAgentState.UNINITIALIZED:
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
log.debug("Resetting agent: final state: %s", state)
def assert_agent_state(self, target_state):
"""
Verify the current agent state
@param target_state: What we expect the agent state to be
"""
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, target_state)
def assert_agent_command(self, command, args=None, timeout=None, client=None):
"""
Verify an agent command
@param command: driver command to execute
@param args: kwargs to pass to the agent command object
"""
if client is None:
client = self.dataset_agent_client
cmd = AgentCommand(command=command, kwargs=args)
retval = client.execute_agent(cmd, timeout=timeout)
def assert_resource_command(self, command, args=None, timeout=None):
"""
Verify a resource command
@param command: driver command to execute
@param args: kwargs to pass to the agent command object
"""
cmd = AgentCommand(command=command, kwargs=args)
retval = self.dataset_agent_client.execute_resource(cmd)
def assert_state_change(self, target_agent_state, timeout=10):
"""
Verify the agent and resource states change as expected within the timeout
Fail if the state doesn't change to the expected state.
@param target_agent_state: State we expect the agent to be in
@param timeout: how long to wait for the driver to change states
"""
to = gevent.Timeout(timeout)
to.start()
done = False
agent_state = None
try:
while(not done):
agent_state = self.dataset_agent_client.get_agent_state()
log.error("Current agent state: %s", agent_state)
if(agent_state == target_agent_state):
log.debug("Current state match: %s", agent_state)
done = True
if not done:
log.debug("state mismatch, waiting for state to transition.")
gevent.sleep(1)
except gevent.Timeout:
log.error("Failed to transition agent state to %s, current state: %s", target_agent_state, agent_state)
self.fail("Failed to transition state.")
finally:
to.cancel()
def assert_event_received(self, event_object_type, timeout=10):
"""
Verify an event has been received of a sepecific type
@param event_object_type: Event object we are looking for
@param timeout: how long to wait
"""
to = gevent.Timeout(timeout)
to.start()
done = False
try:
while(not done):
for event in self.event_subscribers.events_received:
log.debug("Event: %s", event)
if isinstance(event, event_object_type):
done = True
if not done:
log.debug("target event not detected, sleep a bit to let events happen")
gevent.sleep(1)
except gevent.Timeout:
log.error("Failed to find event in queue: %s", event_object_type)
log.error("Current event queue: %s", self.event_subscribers.events_received)
self.fail("%s event not detected")
finally:
to.cancel()
log.info("Expected event detected: %s", event)
class DataSetQualificationTestCase(DataSetAgentTestCase):
"""
Base class for dataset driver unit tests
"""
def test_initialize(self):
"""
Test that we can start the container and initialize the dataset agent.
"""
self.assert_initialize()
self.assert_stop_sampling()
self.assert_reset()
def test_resource_parameters(self):
"""
verify we can get a resource parameter lists and get/set parameters.
"""
def sort_capabilities(caps_list):
'''
sort a return value into capability buckets.
@retval agt_cmds, agt_pars, res_cmds, res_iface, res_pars
'''
agt_cmds = []
agt_pars = []
res_cmds = []
res_iface = []
res_pars = []
if len(caps_list)>0 and isinstance(caps_list[0], AgentCapability):
agt_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_CMD]
agt_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_PAR]
res_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_CMD]
#res_iface = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_IFACE]
res_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_PAR]
elif len(caps_list)>0 and isinstance(caps_list[0], dict):
agt_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_CMD]
agt_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_PAR]
res_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_CMD]
#res_iface = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_IFACE]
res_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_PAR]
agt_cmds.sort()
agt_pars.sort()
res_cmds.sort()
res_iface.sort()
res_pars.sort()
return agt_cmds, agt_pars, res_cmds, res_iface, res_pars
log.debug("Initialize the agent")
expected_params = [DriverParameter.BATCHED_PARTICLE_COUNT,
DriverParameter.PUBLISHER_POLLING_INTERVAL,
DriverParameter.RECORDS_PER_SECOND]
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
log.debug("Call get capabilities")
retval = self.dataset_agent_client.get_capabilities()
log.debug("Capabilities: %s", retval)
agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_capabilities(retval)
self.assertEqual(sorted(res_pars), sorted(expected_params))
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 20})
reply = self.dataset_agent_client.get_resource(DriverParameter.ALL)
log.debug("Get Resource Result: %s", reply)
def test_capabilities(self):
"""
Verify capabilities throughout the agent lifecycle
"""
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.UNINITIALIZED),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: None,
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: None,
}
###
# DSA State INACTIVE
###
log.debug("Initialize DataSet agent")
self.assert_agent_command(ResourceAgentEvent.INITIALIZE)
self.assert_state_change(ResourceAgentState.INACTIVE)
self.assert_capabilities(capabilities)
###
# DSA State IDLE
###
log.debug("DataSet agent go active")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.IDLE)
self.assert_agent_command(ResourceAgentEvent.GO_ACTIVE)
self.assert_state_change(ResourceAgentState.IDLE)
self.assert_capabilities(capabilities)
###
# DSA State COMMAND
###
log.debug("DataSet agent run")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.COMMAND)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [DriverEvent.START_AUTOSAMPLE]
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = self._common_resource_parameters()
self.assert_agent_command(ResourceAgentEvent.RUN)
self.assert_state_change(ResourceAgentState.COMMAND)
self.assert_capabilities(capabilities)
###
# DSA State STREAMING
###
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [DriverEvent.STOP_AUTOSAMPLE]
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = self._common_resource_parameters()
self.assert_start_sampling()
self.assert_capabilities(capabilities)
###
# DSA State COMMAND Revisited
###
log.debug("DataSet agent run")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.COMMAND)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [DriverEvent.START_AUTOSAMPLE]
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = self._common_resource_parameters()
self.assert_stop_sampling()
self.assert_capabilities(capabilities)
###
# DSA State INACTIVE
###
log.debug("DataSet agent run")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.INACTIVE)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = None
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = None
self.assert_agent_command(ResourceAgentEvent.GO_INACTIVE)
self.assert_state_change(ResourceAgentState.INACTIVE)
self.assert_capabilities(capabilities)
###
# DSA State LOST_CONNECTION
###
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.LOST_CONNECTION)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = None
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = None
self.assert_agent_command(ResourceAgentEvent.RESET)
self.assert_state_change(ResourceAgentState.UNINITIALIZED)
self.remove_sample_dir()
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.assert_resource_command(DriverEvent.START_AUTOSAMPLE)
self.assert_state_change(ResourceAgentState.LOST_CONNECTION, 90)
def assert_capabilities(self, capabilities):
'''
Verify that all capabilities are available for a give state
@todo: Currently resource interface not implemented because it requires
a submodule update and some of the submodules are in release
states. So for now, no resource interfaces
@param: dictionary of all the different capability types that are
supposed to be there. i.e.
{
agent_command = ['DO_MY_COMMAND'],
agent_parameter = ['foo'],
resource_command = None,
resource_interface = None,
resource_parameter = None,
}
'''
def sort_capabilities(caps_list):
'''
sort a return value into capability buckets.
@retval agt_cmds, agt_pars, res_cmds, res_iface, res_pars
'''
agt_cmds = []
agt_pars = []
res_cmds = []
res_iface = []
res_pars = []
if len(caps_list)>0 and isinstance(caps_list[0], AgentCapability):
agt_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_CMD]
agt_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_PAR]
res_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_CMD]
#res_iface = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_IFACE]
res_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_PAR]
elif len(caps_list)>0 and isinstance(caps_list[0], dict):
agt_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_CMD]
agt_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_PAR]
res_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_CMD]
#res_iface = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_IFACE]
res_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_PAR]
agt_cmds.sort()
agt_pars.sort()
res_cmds.sort()
res_iface.sort()
res_pars.sort()
return agt_cmds, agt_pars, res_cmds, res_iface, res_pars
if(not capabilities.get(AgentCapabilityType.AGENT_COMMAND)):
capabilities[AgentCapabilityType.AGENT_COMMAND] = []
if(not capabilities.get(AgentCapabilityType.AGENT_PARAMETER)):
capabilities[AgentCapabilityType.AGENT_PARAMETER] = []
if(not capabilities.get(AgentCapabilityType.RESOURCE_COMMAND)):
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
if(not capabilities.get(AgentCapabilityType.RESOURCE_INTERFACE)):
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
if(not capabilities.get(AgentCapabilityType.RESOURCE_PARAMETER)):
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
expected_agent_cmd = capabilities.get(AgentCapabilityType.AGENT_COMMAND)
expected_agent_cmd.sort()
expected_agent_param = self._common_agent_parameters()
expected_agent_param.sort()
expected_res_cmd = capabilities.get(AgentCapabilityType.RESOURCE_COMMAND)
expected_res_cmd.sort()
expected_res_param = capabilities.get(AgentCapabilityType.RESOURCE_PARAMETER)
expected_res_param.sort()
expected_res_int = capabilities.get(AgentCapabilityType.RESOURCE_INTERFACE)
expected_res_int.sort()
# go get the active capabilities
retval = self.dataset_agent_client.get_capabilities()
agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_capabilities(retval)
log.debug("Get capabilities retval: %s", retval)
log.debug("Agent Commands: %s ", str(agt_cmds))
log.debug("Compared to: %s", expected_agent_cmd)
log.debug("Agent Parameters: %s ", str(agt_pars))
log.debug("Compared to: %s", expected_agent_param)
log.debug("Resource Commands: %s ", str(res_cmds))
log.debug("Compared to: %s", expected_res_cmd)
log.debug("Resource Interface: %s ", str(res_iface))
log.debug("Compared to: %s", expected_res_int)
log.debug("Resource Parameter: %s ", str(res_pars))
log.debug("Compared to: %s", expected_res_param)
# Compare to what we are supposed to have
self.assertEqual(expected_agent_cmd, agt_cmds)
self.assertEqual(expected_agent_param, agt_pars)
self.assertEqual(expected_res_cmd, res_cmds)
self.assertEqual(expected_res_int, res_iface)
self.assertEqual(expected_res_param, res_pars)
def _common_resource_parameters(self):
'''
list of common resource parameters
@return: list of resource parameters
'''
return [DriverParameter.BATCHED_PARTICLE_COUNT,
DriverParameter.PUBLISHER_POLLING_INTERVAL,
DriverParameter.RECORDS_PER_SECOND]
def _common_agent_parameters(self):
'''
list of common agent parameters
@return: list of agent parameters
'''
return ['aggstatus', 'alerts', 'driver_name', 'driver_pid', 'example', 'pubrate', 'streams']
def _common_agent_commands(self, agent_state):
'''
list of common agent parameters for a agent state
@return: list of agent parameters
@raise: KeyError for undefined agent state
'''
capabilities = {
ResourceAgentState.UNINITIALIZED: [
ResourceAgentEvent.GO_ACTIVE,
ResourceAgentEvent.RESET,
],
ResourceAgentState.IDLE: [
ResourceAgentEvent.GO_INACTIVE,
ResourceAgentEvent.RESET,
ResourceAgentEvent.RUN,
],
ResourceAgentState.COMMAND: [
ResourceAgentEvent.CLEAR,
ResourceAgentEvent.RESET,
ResourceAgentEvent.GO_INACTIVE,
ResourceAgentEvent.PAUSE
],
ResourceAgentState.STREAMING: [
ResourceAgentEvent.RESET,
ResourceAgentEvent.GO_INACTIVE
],
ResourceAgentState.LOST_CONNECTION: [
ResourceAgentEvent.RESET,
ResourceAgentEvent.GO_INACTIVE
],
ResourceAgentState.INACTIVE: [
ResourceAgentEvent.RESET,
ResourceAgentEvent.GO_ACTIVE
]
}
return capabilities[agent_state]
def test_missing_directory(self):
"""
Test starting the driver when the data directory doesn't exists. This
should prevent the driver from going into streaming mode. When the
directory is created then we should be able to transition into streaming.
"""
self.remove_sample_dir()
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.event_subscribers.clear_events()
self.assert_resource_command(DriverEvent.START_AUTOSAMPLE)
self.assert_state_change(ResourceAgentState.LOST_CONNECTION, 90)
self.assert_event_received(ResourceAgentConnectionLostErrorEvent, 10)
self.create_data_dir()
# Should automatically retry connect and transition to streaming
self.assert_state_change(ResourceAgentState.STREAMING, 90)
def test_harvester_new_file_exception(self):
"""
Test an exception raised after the driver is started during
the file read.
exception callback called.
"""
harvester_config = self._driver_config()['startup_config'][DataSourceConfigKey.HARVESTER]
log.debug('Harvester config %s', harvester_config)
if DataSetDriverConfigKeys.PATTERN in harvester_config:
pattern = harvester_config[DataSetDriverConfigKeys.PATTERN]
filename = pattern.replace("*", "foo")
self.assert_new_file_exception(filename)
else:
for key in harvester_config:
pattern = harvester_config[key][DataSetDriverConfigKeys.PATTERN]
file_dir = harvester_config[key][DataSetDriverConfigKeys.DIRECTORY]
filename = pattern.replace("*", "foo")
self.assert_new_file_exception(filename, file_dir)
# stop sampling so we can start again
self.assert_stop_sampling()
# stop and restart the agent so we can test the next key new file exception
self.stop_dataset_agent_client()
self.init_dataset_agent_client()
def assert_new_file_exception(self, filename, directory=None):
"""
Assert that an unreadable file creates an exception and loses connection,
then making that file readable regains the connection
@param filename filename of the file to create
@param directory optional directory to create file in, default to None
"""
self.clear_sample_data()
if directory is None:
self.create_sample_data(filename, mode=000)
else:
self.create_sample_data_set_dir(filename, directory, mode=000)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.event_subscribers.clear_events()
self.assert_resource_command(DriverEvent.START_AUTOSAMPLE)
self.assert_state_change(ResourceAgentState.LOST_CONNECTION, 90)
self.assert_event_received(ResourceAgentConnectionLostErrorEvent, 10)
self.clear_sample_data()
if directory is None:
self.create_sample_data(filename)
else:
self.create_sample_data_set_dir(filename, directory)
# Should automatically retry connect and transition to streaming
self.assert_state_change(ResourceAgentState.STREAMING, 90)
def test_autosample_recover(self):
"""
Verify that if we stop the agent without stopping sampling first then
the next agent startup will restore to streaming.
"""
# First verify the happy path. We start sampling, stop then reset
# On reinit state should still be command mode
self.assert_initialize()
self.assert_stop_sampling()
log.debug("stop data set agent")
self.stop_dataset_agent_client()
log.debug("restart data set agent")
self.init_dataset_agent_client(bootmode='restart')
self.assert_state_change(ResourceAgentState.COMMAND, 10)
# Now start sampling and then just reset the instrument agent.
# When we reinitialize go_active should put us in streaming mode.
self.assert_start_sampling()
self.stop_dataset_agent_client()
self.init_dataset_agent_client(bootmode='restart')
self.assert_state_change(ResourceAgentState.STREAMING, 10)
class DataSetIngestionTestCase(DataSetAgentTestCase):
"""
Base class for dataset driver unit tests
"""
def test_ingestion(self):
"""
Test that will start a dataset agent and put it into streaming
mode. Then run continually and ingest files until we exceed our
specified runtime. No runtime means run perpetually.
"""
directory = DataSetTestConfig().ingestion_directory
runtime = DataSetTestConfig().ingestion_runtime
sleeptime = 600
to = None
if runtime:
sleeptime = int(runtime)
to = gevent.Timeout(sleeptime)
to.start()
try:
# Now start the agent up and just hang out.
self.assert_initialize()
while True:
log.debug("In our event sleep loop. just resting for a bit.")
gevent.sleep(sleeptime)
except gevent.Timeout:
log.info("Finished ingestion test as runtime has been exceeded")
finally:
if runtime:
to.cancel()
|
|
# Copyright (C) 2011 Nick Johnson <nickbjohnson4224 at gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# piece states / player colors
EMPTY = 0
BLACK = 1
WHITE = -1
class IllegalMoveError(Exception):
def __init__(self, pos, player, reason):
self.pos = pos
self.reason = reason
self.player = player
def __str__(self):
if self.player == WHITE:
player_name = "white"
elif self.player == BLACK:
player_name = "black"
elif self.player == EMPTY:
player_name = "empty"
else:
player_name = "invalid"
return "(%d %d %s) : %s" % (self.pos[0], self.pos[1], player_name, self.reason)
# represents a piece on the board
class Piece:
def __init__(self, color = EMPTY):
self.color = color
self.group = self
self.libs = 0
self.rank = 0
def get_libs(self):
return self.get_group().libs
def set_libs(self, libs = 0):
self.get_group().libs = libs
def add_libs(self, libs = 0):
self.get_group().libs += libs
def set_group(self, group = None):
if not group: group = self
self.group = group
def get_group(self):
if self.group != self:
self.group = self.group.get_group()
return self.group
else:
return self
def merge_group(self, group):
group1 = self.get_group()
group2 = group.get_group()
if group1 != group2:
libs = group1.libs + group2.libs;
if group1.rank < group2.rank:
group1.group = group2
group2.libs += group1.libs
elif group2.rank < group1.rank:
group2.group = group1
group1.libs += group2.libs
else:
group1.group = group2
group2.libs += group1.libs
group2.rank += 1
# represents a Go board
class Board:
def __init__(self, xdim = 19, ydim = 19):
self.board = []
self.xdim = xdim
self.ydim = ydim
self.last = None
self.llast = None
self.player = BLACK
self.ko = None
for i in range(0, xdim):
self.board += [[]]
for j in range(0, ydim):
self.board[i] += [ Piece() ]
def __copy__(self):
new = Board(self.xdim, self.ydim)
for x in range(1, self.xdim + 1):
for y in range(1, self.ydim + 1):
new.place_unchecked((x, y), self.get((x, y)).color)
new.last = self.last
new.llast = self.llast
new.player = self.player
new.ko = self.ko
return new
def get(self, pos):
return self.board[pos[0] - 1][pos[1] - 1]
def validate_pos(self, pos):
if not pos:
return None
if pos[0] < 1 or pos[1] < 1 or pos[0] > self.xdim or pos[1] > self.ydim:
return None
return pos
_adj_table = [ [1, 0], [0, 1], [-1, 0], [0, -1] ]
def get_adj_pos(self, pos, direction):
if not pos:
return None
if direction == 0:
return self.validate_pos((pos.x + 1, pos.y))
if direction == 1:
return self.validate_pos((pos.x, pos.y + 1))
if direction == 2:
return self.validate_pos((pos.x - 1, pos.y))
if direction == 3:
return self.validate_pos((pos.x, pos.y - 1))
def get_adj_list(self, pos):
adj = []
if pos[0] < self.xdim:
adj += [ (pos[0] + 1, pos[1]) ]
if pos[1] < self.ydim:
adj += [ (pos[0], pos[1] + 1) ]
if pos[0] > 1:
adj += [ (pos[0] - 1, pos[1]) ]
if pos[1] > 1:
adj += [ (pos[0], pos[1] - 1) ]
return adj
def capture(self, pos):
if not self.get(pos):
return
color = self.get(pos).color
self.get(pos).color = EMPTY
self.get(pos).group = self.get(pos)
self.get(pos).libs = 0
self.get(pos).rank = 0
for i in self.get_adj_list(pos):
color1 = self.get(i).color
if color1 == -color:
self.get(i).add_libs(1)
elif color1 == color:
self.capture(i)
def place_unchecked(self, pos, player):
if not pos or not self.get(pos) or not player:
return
self.ko = None
adj = []
# get adjacent groups
adj = self.get_adj_list(pos)
# reduce liberties of all adjacent groups
for i in adj:
self.get(i).add_libs(-1)
libs = 0
for i in adj:
color = self.get(i).color
# capture all adjacent enemy groups with no liberties
if color == -player:
if self.get(i).get_libs() <= 0:
self.capture(i)
self.ko = pos
libs += 1
# count liberties of added piece
elif color == EMPTY:
libs += 1
self.get(pos).libs = libs
self.get(pos).color = player
self.get(pos).group = self.get(pos)
self.get(pos).rank = 0
# merge with adjacent allied groups
for i in adj:
if self.get(i).color == player:
self.get(pos).merge_group(self.get(i))
self.ko = None
self.llast = self.last
self.last = pos
def check(self, pos, player):
if not pos: return
if not self.validate_pos(pos):
raise IllegalMoveError(pos, player, "position not on board")
# make sure space is open
if self.get(pos).color != EMPTY:
raise IllegalMoveError(pos, player, "position not empty")
# make sure there are no ko captures
if self.ko and self.get(self.ko).get_libs() == 1:
for i in self.get_adj_list(pos):
if i == self.ko:
raise IllegalMoveError(pos, player, "ko capture")
# make sure there is no suicide
for i in self.get_adj_list(pos):
if self.get(i).color == EMPTY:
return
libs_taken = 0
for j in self.get_adj_list(pos):
if self.get(j).group == self.get(i).group:
libs_taken += 1
if self.get(i).color == player:
if libs_taken < self.get(i).get_libs():
return
elif self.get(i).color == -player:
if libs_taken == self.get(i).get_libs():
return
raise IllegalMoveError(pos, player, "suicide move")
def check_fast(self, pos, player):
if not pos: return True
if not self.validate_pos(pos):
return False
# make sure space is open
if self.get(pos).color != EMPTY:
return False
# make sure there are no ko captures
if self.ko and self.get(self.ko).get_libs() == 1:
for i in self.get_adj_list(pos):
if i == self.ko: return False
# make sure there is no suicide
for i in self.get_adj_list(pos):
if self.get(i).color == EMPTY:
return True
libs_taken = 0
for j in self.get_adj_list(pos):
if self.get(j).group == self.get(i).group:
libs_taken += 1
if self.get(i).color == player:
if libs_taken < self.get(i).get_libs():
return True
elif self.get(i).color == -player:
if libs_taken == self.get(i).get_libs():
return True
return False
def place(self, pos, player = None):
if not player: player = self.player
if not player in ( BLACK, WHITE ):
raise IllegalMoveError(pos, player, "invalid player")
self.check(pos, player)
self.place_unchecked(pos, player)
self.player = -player
def score(self):
b = 0
w = 0
for x in range(1, self.xdim + 1):
for y in range(1, self.ydim + 1):
pos = (x, y)
if self.get(pos).color == WHITE:
w += 1
elif self.get(pos).color == BLACK:
b += 1
else:
for j in self.get_adj_list(pos):
if self.get(j).color == BLACK:
b += 1
elif self.get(j).color == WHITE:
w += 1
return [ b - w, b, w ]
|
|
#
# Copyright (c) 2005 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.util.strtools import globdistance
from smart.cache import Provides
from smart import *
import fnmatch
import re
def _stripeol(pattern):
if pattern.endswith("$"):
pattern = pattern[:-1]
elif pattern.endswith('\Z(?ms)'):
pattern = pattern[:-7]
return pattern
class Searcher(object):
"""
The search mechanism is smart is accessed mainly by giving a
Searcher instance to the cache.search() method.
Internally, the searching may go through different paths depending
on the kind of information being looked for. More specifically:
- nameversion is matched in Package.search(), so that backends
may implement their own details in the searching mechanism.
- provides is matched in Provides.search(), for the same reason.
- requires, recommends, upgrades, and conflicts don't have special
searching methods. Instead, their usual match() method is given
an instance of the Provides type.
- group, path, url, and other information which is found by
PackageInfo, is searched by the Loader.search() method and
the overloaded methods in Loader's subclasses. This ensures
that Loaders are able to speedup the searching process, since
many times it's necessary to access huge sequential files for
looking up information.
"""
def __init__(self):
self._results = {}
self.nameversion = []
self.provides = []
self.requires = []
self.recommends = []
self.upgrades = []
self.conflicts = []
self.path = []
self.url = []
self.group = []
self.summary = []
self.description = []
self.ignorecase = True
def reset(self):
self._results.clear()
del self.nameversion[:]
del self.provides[:]
del self.requires[:]
del self.recommends[:]
del self.upgrades[:]
del self.conflicts[:]
del self.path[:]
del self.url[:]
del self.group[:]
del self.summary[:]
del self.description[:]
def addResult(self, obj, ratio=1.0):
results = self._results
if obj not in results or ratio > results[obj]:
results[obj] = ratio
def getResult(self, obj, default=None):
return self._results.get(obj, default)
def getResults(self):
results = self._results
lst = [(results[obj], obj) for obj in results]
lst.sort()
lst.reverse()
return lst
def getBestResults(self):
results = self._results
lst = [(results[obj], obj) for obj in results]
if lst:
lst.sort()
lst.reverse()
best = lst[0][0]
lst = [x for x in lst if x[0] == best]
return lst
def searchCache(self, cache):
for loader in cache.getLoaders():
loader.search(self)
def searchPackage(self, pkg):
pkg.search(self)
def addAuto(self, s, cutoff=1.0):
if not s: return
if s.startswith("provides:"):
self.addProvides(s[9:], cutoff)
elif s.startswith("requires:"):
self.addRequires(s[9:])
elif s.startswith("recommends:"):
self.addRecommends(s[11:])
elif s.startswith("upgrades:"):
self.addUpgrades(s[9:])
elif s.startswith("conflicts:"):
self.addConflicts(s[10:])
elif s.startswith("url:"):
self.addURL(s[4:], cutoff)
elif s.startswith("path:"):
self.addPath(s[5:], cutoff)
elif s.startswith("group:"):
self.addGroup(s[6:])
elif s.startswith("summary:"):
self.addSummary(s[8:])
elif s.startswith("descr:"):
self.addDescription(s[6:])
elif s.startswith("description:"):
self.addDescription(s[12:])
elif s.startswith("name:"):
self.addNameVersion(s[5:], cutoff)
elif s[0] == "/":
self.addPath(s, cutoff)
elif ":/" in s:
self.addURL(s, cutoff)
else:
self.addNameVersion(s, cutoff)
def hasAutoMeaning(self, s):
return s and (
s.startswith("provides:") or
s.startswith("requires:") or
s.startswith("recommends:") or
s.startswith("upgrades:") or
s.startswith("conflicts:") or
s.startswith("url:") or
s.startswith("path:") or
s.startswith("group:") or
s.startswith("summary:") or
s.startswith("descr:") or
s.startswith("description:") or
s.startswith("name:") or
s[0] == "/" or ":/" in s
)
def addNameVersion(self, s, cutoff=1.0):
self.nameversion.append((s, cutoff))
def addProvides(self, s, cutoff=1.0):
self.provides.append((s.replace("=", "-"), cutoff))
def _buildProvides(self, s):
tokens = s.split("=")
if len(tokens) == 2:
prv = Provides(*tokens)
elif len(tokens) == 1:
prv = Provides(tokens[0], None)
else:
raise Error, _("Invalid string")
return prv
def addRequires(self, s):
self.requires.append(self._buildProvides(s))
def addRecommends(self, s):
self.recommends.append(self._buildProvides(s))
def addUpgrades(self, s):
self.upgrades.append(self._buildProvides(s))
def addConflicts(self, s):
self.conflicts.append(self._buildProvides(s))
def needsPackageInfo(self):
return bool(self.group or self.path or self.url or
self.summary or self.description)
def addPath(self, s, cutoff=1.0):
self.path.append((s, cutoff))
def addURL(self, s, cutoff=1.0):
self.url.append((s, cutoff))
def addGroup(self, s):
s = _stripeol(fnmatch.translate(s)).replace("\ ", " ")
p = re.compile("\s+".join(s.split()), self.ignorecase and re.I or 0)
self.group.append(p)
def addSummary(self, s):
s = _stripeol(fnmatch.translate(s)).replace("\ ", " ")
p = re.compile("\s+".join(s.split()), self.ignorecase and re.I or 0)
self.summary.append(p)
def addDescription(self, s):
s = _stripeol(fnmatch.translate(s)).replace("\ ", " ")
p = re.compile("\s+".join(s.split()), self.ignorecase and re.I or 0)
self.description.append(p)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FieldsOperations:
"""FieldsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_farmer_id(
self,
farmer_id: str,
farm_ids: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
names: Optional[List[str]] = None,
property_filters: Optional[List[str]] = None,
statuses: Optional[List[str]] = None,
min_created_date_time: Optional[datetime.datetime] = None,
max_created_date_time: Optional[datetime.datetime] = None,
min_last_modified_date_time: Optional[datetime.datetime] = None,
max_last_modified_date_time: Optional[datetime.datetime] = None,
max_page_size: Optional[int] = 50,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.FieldListResponse"]:
"""Returns a paginated list of field resources under a particular farmer.
:param farmer_id: ID of the associated farmer.
:type farmer_id: str
:param farm_ids: Farm Ids of the resource.
:type farm_ids: list[str]
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FieldListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.agrifood.farming.models.FieldListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FieldListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_farmer_id.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if farm_ids is not None:
query_parameters['farmIds'] = [self._serialize.query("farm_ids", q, 'str') if q is not None else '' for q in farm_ids]
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FieldListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_farmer_id.metadata = {'url': '/farmers/{farmerId}/fields'} # type: ignore
def list(
self,
farm_ids: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
names: Optional[List[str]] = None,
property_filters: Optional[List[str]] = None,
statuses: Optional[List[str]] = None,
min_created_date_time: Optional[datetime.datetime] = None,
max_created_date_time: Optional[datetime.datetime] = None,
min_last_modified_date_time: Optional[datetime.datetime] = None,
max_last_modified_date_time: Optional[datetime.datetime] = None,
max_page_size: Optional[int] = 50,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.FieldListResponse"]:
"""Returns a paginated list of field resources across all farmers.
:param farm_ids: Farm Ids of the resource.
:type farm_ids: list[str]
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FieldListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.agrifood.farming.models.FieldListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FieldListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if farm_ids is not None:
query_parameters['farmIds'] = [self._serialize.query("farm_ids", q, 'str') if q is not None else '' for q in farm_ids]
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FieldListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/fields'} # type: ignore
async def get(
self,
farmer_id: str,
field_id: str,
**kwargs: Any
) -> "_models.Field":
"""Gets a specified field resource under a particular farmer.
:param farmer_id: ID of the associated farmer.
:type farmer_id: str
:param field_id: ID of the field.
:type field_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Field, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Field
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Field"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'fieldId': self._serialize.url("field_id", field_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Field', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/farmers/{farmerId}/fields/{fieldId}'} # type: ignore
async def create_or_update(
self,
farmer_id: str,
field_id: str,
field: Optional["_models.Field"] = None,
**kwargs: Any
) -> "_models.Field":
"""Creates or Updates a field resource under a particular farmer.
:param farmer_id: ID of the associated farmer resource.
:type farmer_id: str
:param field_id: ID of the field resource.
:type field_id: str
:param field: Field resource payload to create or update.
:type field: ~azure.agrifood.farming.models.Field
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Field, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Field
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Field"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'fieldId': self._serialize.url("field_id", field_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if field is not None:
body_content = self._serialize.body(field, 'Field')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('Field', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Field', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/farmers/{farmerId}/fields/{fieldId}'} # type: ignore
async def delete(
self,
farmer_id: str,
field_id: str,
**kwargs: Any
) -> None:
"""Deletes a specified field resource under a particular farmer.
:param farmer_id: ID of the farmer.
:type farmer_id: str
:param field_id: ID of the field.
:type field_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'fieldId': self._serialize.url("field_id", field_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/farmers/{farmerId}/fields/{fieldId}'} # type: ignore
async def get_cascade_delete_job_details(
self,
job_id: str,
**kwargs: Any
) -> "_models.CascadeDeleteJob":
"""Get a cascade delete job for specified field.
:param job_id: ID of the job.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CascadeDeleteJob, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.CascadeDeleteJob
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CascadeDeleteJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get_cascade_delete_job_details.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CascadeDeleteJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cascade_delete_job_details.metadata = {'url': '/fields/cascade-delete/{jobId}'} # type: ignore
async def _create_cascade_delete_job_initial(
self,
job_id: str,
farmer_id: str,
field_id: str,
**kwargs: Any
) -> "_models.CascadeDeleteJob":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CascadeDeleteJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self._create_cascade_delete_job_initial.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['farmerId'] = self._serialize.query("farmer_id", farmer_id, 'str')
query_parameters['fieldId'] = self._serialize.query("field_id", field_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CascadeDeleteJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_cascade_delete_job_initial.metadata = {'url': '/fields/cascade-delete/{jobId}'} # type: ignore
async def begin_create_cascade_delete_job(
self,
job_id: str,
farmer_id: str,
field_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.CascadeDeleteJob"]:
"""Create a cascade delete job for specified field.
:param job_id: Job ID supplied by end user.
:type job_id: str
:param farmer_id: ID of the associated farmer.
:type farmer_id: str
:param field_id: ID of the field to be deleted.
:type field_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CascadeDeleteJob or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.agrifood.farming.models.CascadeDeleteJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CascadeDeleteJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_cascade_delete_job_initial(
job_id=job_id,
farmer_id=farmer_id,
field_id=field_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CascadeDeleteJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_cascade_delete_job.metadata = {'url': '/fields/cascade-delete/{jobId}'} # type: ignore
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "memote-"
cfg.versionfile_source = "src/memote/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
from dolfin import tic, toc
import HiptmairSetup
import PETScIO as IO
import scipy.sparse as sp
import matplotlib.pylab as plt
import MatrixOperations as MO
import HiptmairSetup
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class InnerOuterWITHOUT2inverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
#MX = self.AA+self.F
MX = self.F # MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
kspMX.setOperators(MX,MX)
OptDB = PETSc.Options()
#OptDB["pc_factor_mat_ordering_type"] = "rcm"
#OptDB["pc_factor_mat_solver_package"] = "mumps"
kspMX.setFromOptions()
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuterMAGNETICinverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
FF = self.F
# MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setOperators(FF,FF)
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuterMAGNETICapprox(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
FF = self.F
# MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setOperators(FF,FF)
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
#self.kspMX.solve(bb,xb)
xb, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, bb, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
|
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'sos_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
|
import logging
import json
import copy
import operator
import os
import pprint
import shlex
import datetime
import glob
class NxConfig():
""" Simple configuration loader """
cfg = {}
def __init__(self, fname):
try:
self.cfg = (json.loads(open(fname).read()))
except:
logging.critical("Unable to open/parse configuration file.")
raise ValueError
class NxRating():
""" A class that is used to check success criterias of rule.
attempts jit querying + caching """
def __init__(self, cfg, es, tr):
self.tr = tr
self.cfg = cfg
self.es = es
self.esq = {
'global' : None,
'template' : None,
'rule' : None}
self.stats = {
'global' : {},
'template' : {},
'rule' : {}
}
self.global_warnings = cfg["global_warning_rules"]
self.global_success = cfg["global_success_rules"]
self.global_deny = cfg["global_deny_rules"]
def drop(self):
""" clears all existing stats """
self.stats['template'] = {}
self.stats['global'] = {}
self.stats['rule'] = {}
def refresh_scope(self, scope, esq):
""" drops all datas for a named scope """
if scope not in self.esq.keys():
print "Unknown scope ?!"+scope
self.esq[scope] = esq
self.stats[scope] = {}
def query_ratio(self, scope, scope_small, score, force_refresh):
""" wrapper to calculate ratio between two vals, rounded float """
#print "ratio :"+str(self.get(scope_small, score))+" / "+str( self.get(scope, score))
ratio = round( (float(self.get(scope_small, score)) / self.get(scope, score)) * 100.0, 2)
return ratio
def get(self, scope, score, scope_small=None, force_refresh=False):
""" fetch a value from self.stats or query ES """
#print "#GET:"+scope+"_?"+str(scope_small)+"?_"+score+" = ?"
if scope not in self.stats.keys():
#print "unknown scope :"+scope
return None
if scope_small is not None:
return self.query_ratio(scope, scope_small, score, force_refresh)
elif score in self.stats[scope].keys() and force_refresh is False:
return self.stats[scope][score]
else:
if score is not 'total':
self.stats[scope][score] = self.tr.fetch_uniques(self.esq[scope], score)['total']
else:
res = self.tr.search(self.esq[scope])
self.stats[scope][score] = res['hits']['total']
return self.stats[scope][score]
def check_rule_score(self, tpl):
""" wrapper to check_score, TOFIX ? """
return self.check_score(tpl_success=tpl.get('_success', None),
tpl_warnings=tpl.get('_warnings', None),
tpl_deny=tpl.get('_deny', None))
def check_score(self, tpl_success=None, tpl_warnings=None, tpl_deny=None):
# pprint.pprint(self.stats)
debug = False
success = []
warning = []
deny = False
failed_tests = {"success" : [], "warnings" : []}
glb_success = self.global_success
glb_warnings = self.global_warnings
glb_deny = self.global_deny
for sdeny in [tpl_deny, glb_deny]:
if sdeny is None:
continue
for k in sdeny.keys():
res = self.check_rule(k, sdeny[k])
if res['check'] is True:
# print "WE SHOULD DENY THAT"
deny = True
break
for scheck in [glb_success, tpl_success]:
if scheck is None:
continue
for k in scheck.keys():
res = self.check_rule(k, scheck[k])
if res['check'] is True:
if debug is True:
print "[SUCCESS] OK, on "+k+" vs "+str(res['curr'])+", check :"+str(scheck[k][0])+" - "+str(scheck[k][1])
success.append({'key' : k, 'criteria' : scheck[k], 'curr' : res['curr']})
else:
if debug is True:
print "[SUCCESS] KO, on "+k+" vs "+str(res['curr'])+", check :"+str(scheck[k][0])+" - "+str(scheck[k][1])
failed_tests["success"].append({'key' : k, 'criteria' : scheck[k], 'curr' : res['curr']})
for fcheck in [glb_warnings, tpl_warnings]:
if fcheck is None:
continue
for k in fcheck.keys():
res = self.check_rule(k, fcheck[k])
if res['check'] is True:
if debug is True:
print "[WARNINGS] TRIGGERED, on "+k+" vs "+str(res['curr'])+", check :"+str(fcheck[k][0])+" - "+str(fcheck[k][1])
warning.append({'key' : k, 'criteria' : fcheck[k], 'curr' : res['curr']})
else:
if debug is True:
print "[WARNINGS] NOT TRIGGERED, on "+k+" vs "+str(res['curr'])+", check :"+str(fcheck[k][0])+" - "+str(fcheck[k][1])
failed_tests["warnings"].append({'key' : k, 'criteria' : fcheck[k], 'curr' : res['curr']})
x = { 'success' : success,
'warnings' : warning,
'failed_tests' : failed_tests,
'deny' : deny}
return x
def check_rule(self, label, check_rule):
""" check met/failed success/warning criterias
of a given template vs a set of results """
check = check_rule[0]
beat = check_rule[1]
if label.find("var_name") != -1:
label = label.replace("var_name", "var-name")
items = label.split('_')
for x in range(len(items)):
items[x] = items[x].replace("var-name", "var_name")
if len(items) == 2:
scope = items[0]
score = items[1]
x = self.get(scope, score)
# print "scope:"+str(scope)+" score:"+str(score)
return {'curr' : x, 'check' : check( int(self.get(scope, score)), int(beat))}
elif len(items) == 4:
scope = items[0]
scope_small = items[1]
score = items[2]
x = self.get(scope, score, scope_small=scope_small)
#Xpprint.pprint()
return {'curr' : x, 'check' : check(int(self.get(scope, score, scope_small=scope_small)), int(beat))}
else:
print "cannot understand rule ("+label+"):",
pprint.pprint(check_rule)
return { 'curr' : 0, 'check' : False }
class NxTranslate():
""" Transform Whitelists, template into
ElasticSearch queries, and vice-versa, conventions :
esq : elasticsearch query
tpl : template
cr : core rule
wl : whitelist """
def __init__(self, es, cfg):
self.es = es
self.debug = True
self.cfg = cfg.cfg
self.cfg["global_warning_rules"] = self.normalize_checks(self.cfg["global_warning_rules"])
self.cfg["global_success_rules"] = self.normalize_checks(self.cfg["global_success_rules"])
self.cfg["global_deny_rules"] = self.normalize_checks(self.cfg["global_deny_rules"])
self.core_msg = {}
# by default, es queries will return 1000 results max
self.es_max_size = self.cfg.get("elastic").get("max_size", 1000)
print "# size :"+str(self.es_max_size)
# purely for output coloring
self.red = '{0}'
self.grn = '{0}'
self.blu = '{0}'
if self.cfg["output"]["colors"] == "true":
self.red = "\033[91m{0}\033[0m"
self.grn = "\033[92m{0}\033[0m"
self.blu = "\033[94m{0}\033[0m"
# Attempt to parse provided core rules file
self.load_cr_file(self.cfg["naxsi"]["rules_path"])
def full_auto(self):
""" Loads all tpl within template_path
If templates has hit, peers or url(s) ratio > 15%,
attempts to generate whitelists.
Only displays the wl that did not raise warnings, ranked by success"""
# gather total IPs, total URIs, total hit count
scoring = NxRating(self.cfg, self.es, self)
strict = True
if self.cfg.get("naxsi").get("strict", "") == "false":
strict = False
scoring.refresh_scope("global", self.cfg["global_filters"])
if scoring.get("global", "ip") <= 0:
print "No hits for this filter."
return
for sdir in self.cfg["naxsi"]["template_path"]:
for root, dirs, files in os.walk(sdir):
for file in files:
if file.endswith(".tpl"):
print "# "+self.grn.format(" template :")+root+"/"+file+" "
template = self.load_tpl_file(root+"/"+file)
scoring.refresh_scope('template', self.tpl2esq(template))
print "Nb of hits :"+str(scoring.get('template', 'total'))
if scoring.get('template', 'total') > 0:
print self.grn.format("# template matched, generating all rules.")
whitelists = self.gen_wl(template, rule={})
#x add here
print str(len(whitelists))+" whitelists ..."
for genrule in whitelists:
scoring.refresh_scope('rule', genrule['rule'])
results = scoring.check_rule_score(template)
#XX1
if ( len(results['success']) > len(results['warnings']) and results["deny"] == False) or self.cfg["naxsi"]["strict"] == "false":
#print "?deny "+str(results['deny'])
self.fancy_display(genrule, results, template)
print self.grn.format(self.tpl2wl(genrule['rule']).encode('utf-8', errors='replace'), template)
def fancy_display(self, full_wl, scores, template=None):
if template is not None and '_msg' in template.keys():
print "#msg: "+template['_msg']
rid = full_wl['rule'].get('id', "0")
print "#Rule ("+rid+") "+self.core_msg.get(rid, 'Unknown ..')
if self.cfg["output"]["verbosity"] >= 4:
print "#total hits "+str(full_wl['total_hits'])
for x in [ "content", "peers", "uri", "var_name" ]:
if not x in full_wl.keys():
continue
for y in full_wl[x]:
print "#"+x+" : "+unicode(y).encode("utf-8", errors='replace')
# pprint.pprint(scores)
for x in scores['success']:
print "# success : "+self.grn.format(str(x['key'])+" is "+str(x['curr']))
for x in scores['warnings']:
print "# warnings : "+self.grn.format(str(x['key'])+" is "+str(x['curr']))
pass
def expand_tpl_path(self, template):
""" attempts to convert stuff to valid tpl paths.
if it starts with / or . it will consider it's a relative/absolute path,
else, that it's a regex on tpl names. """
clean_tpls = []
tpl_files = []
if template.startswith('/') or template.startswith('.'):
tpl_files.extend(glob.glob(template))
else:
for sdir in self.cfg['naxsi']['template_path']:
tpl_files.extend(glob.glob(sdir +"/"+template))
for x in tpl_files:
if x.endswith(".tpl") and x not in clean_tpls:
clean_tpls.append(x)
return clean_tpls
def load_tpl_file(self, tpl):
""" open, json.loads a tpl file,
cleanup data, return dict. """
try:
x = open(tpl)
except:
logging.error("Unable to open tpl file.")
return None
tpl_s = ""
for l in x.readlines():
if l.startswith('#'):
continue
else:
tpl_s += l
try:
template = json.loads(tpl_s)
except:
logging.error("Unable to load json from '"+tpl_s+"'")
return None
if '_success' in template.keys():
template['_success'] = self.normalize_checks(template['_success'])
if '_warnings' in template.keys():
template['_warnings'] = self.normalize_checks(template['_warnings'])
if '_deny' in template.keys():
template['_deny'] = self.normalize_checks(template['_deny'])
#return self.tpl_append_gfilter(template)
return template
def load_wl_file(self, wlf):
""" Loads a file of whitelists,
convert them to ES queries,
and returns them as a list """
esql = []
try:
wlfd = open(wlf, "r")
except:
logging.error("Unable to open whitelist file.")
return None
for wl in wlfd:
[res, esq] = self.wl2esq(wl)
if res is True:
esql.append(esq)
if len(esql) > 0:
return esql
return None
def load_cr_file(self, cr_file):
""" parses naxsi's core rule file, to
decorate output with "msg:" field content """
core_msg = {}
core_msg['0'] = "id:0 is wildcard (all rules) whitelist."
try:
fd = open(cr_file, 'r')
for i in fd:
if i.startswith('MainRule') or i.startswith('#@MainRule'):
pos = i.find('id:')
pos_msg = i.find('msg:')
self.core_msg[i[pos + 3:i[pos + 3].find(';') - 1]] = i[pos_msg + 4:][:i[pos_msg + 4:].find('"')]
fd.close()
except:
logging.error("Unable to open rules file")
def tpl2esq(self, ob, full=True):
''' receives template or a rule, returns a valid
ElasticSearch query '''
qr = {
"query" : { "bool" : { "must" : [ ]} },
"size" : self.es_max_size
}
# A hack in case we were inadvertently given an esq
if 'query' in ob.keys():
return ob
for k in ob.keys():
if k.startswith("_"):
continue
# if key starts with '?' :
# use content for search, but use content from exceptions to generate WL
if k[0] == '?':
k = k[1:]
qr['query']['bool']['must'].append({"regexp" : { k : ob['?'+k] }})
# wildcard
elif ob[k] == '?':
pass
else:
qr['query']['bool']['must'].append({"match" : { k : ob[k]}})
qr = self.append_gfilter(qr)
return qr
def append_gfilter(self, esq):
""" append global filters parameters
to and existing elasticsearch query """
for x in self.cfg["global_filters"]:
if x.startswith('?'):
x = x[1:]
if {"regexp" : { x : self.cfg["global_filters"]['?'+x] }} not in esq['query']['bool']['must']:
esq['query']['bool']['must'].append({"regexp" : { x : self.cfg["global_filters"]['?'+x] }})
else:
if {"match" : { x : self.cfg["global_filters"][x] }} not in esq['query']['bool']['must']:
esq['query']['bool']['must'].append({"match" : { x : self.cfg["global_filters"][x] }})
return esq
def tpl_append_gfilter(self, tpl):
for x in self.cfg["global_filters"]:
tpl[x] = self.cfg["global_filters"][x]
return tpl
def wl2esq(self, raw_line):
""" parses a fulltext naxsi whitelist,
and outputs the matching es query (ie. for tagging),
returns [True|False, error_string|ESquery] """
esq = {
"query" : { "bool" : { "must" : [ ]} },
"size" : self.es_max_size
}
wl_id = ""
mz_str = ""
# do some pre-check to ensure it's a valid line
if raw_line.startswith("#"):
return [False, "commented out"]
if raw_line.find("BasicRule") == -1:
return [False, "not a BasicRule"]
# split line
strings = shlex.split(raw_line)
# more checks
if len(strings) < 3:
return [False, "empty/incomplete line"]
if strings[0].startswith('#'):
return [False, "commented line"]
if strings[0] != "BasicRule":
return [False, "not a BasicRule, keyword '"+strings[0]+"'"]
if strings[len(strings) - 1].endswith(';'):
strings[len(strings) - 1] = strings[len(strings) - 1][:-1]
for x in strings:
if x.startswith("wl:"):
wl_id = x[3:]
# if ID contains "," replace them with OR for ES query
wl_id = wl_id.replace(",", " OR ")
# if ID != 0 add it, otherwise, it's a wildcard!
if wl_id != "0":
# if IDs are negative, we must exclude all IDs except
# those ones.
if wl_id.find("-") != -1:
wl_id = wl_id.replace("-", "")
#print "Negative query."
if not 'must_not' in esq['query']['bool'].keys():
esq['query']['bool']['must_not'] = []
esq['query']['bool']['must_not'].append({"match" : { "id" : wl_id}})
else:
esq['query']['bool']['must'].append({"match" : { "id" : wl_id}})
if x.startswith("mz:"):
mz_str = x[3:]
[res, filters] = self.parse_mz(mz_str, esq)
if res is False:
return [False, "matchzone parsing failed."]
esq = self.append_gfilter(esq)
return [True, filters]
def parse_mz(self, mz_str, esq):
""" parses a match zone from BasicRule, and updates
es query accordingly. Removes ^/$ chars from regexp """
forbidden_rx_chars = "^$"
kw = mz_str.split("|")
tpl = esq['query']['bool']['must']
uri = ""
zone = ""
var_name = ""
t_name = False
# |NAME flag
if "NAME" in kw:
t_name = True
kw.remove("NAME")
for k in kw:
# named var
if k.startswith('$'):
k = k[1:]
try:
[zone, var_name] = k.split(':')
except:
return [False, "Incoherent zone : "+k]
# *_VAR:<string>
if zone.endswith("_VAR"):
zone = zone[:-4]
if t_name is True:
zone += "|NAME"
tpl.append({"match" : { "zone" : zone}})
tpl.append({"match" : { "var_name" : var_name}})
# *_VAR_X:<regexp>
elif zone.endswith("_VAR_X"):
zone = zone[:-6]
if t_name is True:
zone += "|NAME"
tpl.append({"match" : { "zone" : zone}})
#.translate(string.maketrans(chars, newchars))
tpl.append({"regexp" : { "var_name" : var_name.translate(None, forbidden_rx_chars)}})
# URL_X:<regexp>
elif zone == "URL_X":
zone = zone[:-2]
tpl.append({"regexp" : { "uri" : var_name.translate(None, forbidden_rx_chars)}})
# URL:<string>
elif zone == "URL":
tpl.append({"match" : { "uri" : var_name }})
else:
print "huh, what's that ? "+zone
# |<ZONE>
else:
if k not in ["HEADERS", "BODY", "URL", "ARGS", "FILE_EXT"]:
return [False, "Unknown zone : '"+k+"'"]
zone = k
if t_name is True:
zone += "|NAME"
tpl.append({"match" : {"zone" : zone}})
# print "RULE :"
# pprint.pprint(esq)
return [True, esq]
def tpl2wl(self, rule, template=None):
""" transforms a rule/esq
to a valid BasicRule. """
tname = False
zone = ""
if template is not None and '_statics' in template.keys():
for x in template['_statics'].keys():
rule[x] = template['_statics'][x]
wl = "BasicRule "
wl += " wl:"+str(rule.get('id', 0)).replace("OR", ",").replace("|", ",").replace(" ", "")
wl += ' "mz:'
if rule.get('uri', None) is not None:
wl += "$URL:"+rule['uri']
wl += "|"
# whitelist targets name
if rule.get('zone', '').endswith("|NAME"):
tname = True
zone = rule['zone'][:-5]
else:
zone = rule['zone']
if rule.get('var_name', '') not in ['', '?'] and zone != "FILE_EXT":
wl += "$"+zone+"_VAR:"+rule['var_name']
else:
wl += zone
if tname is True:
wl += "|NAME"
wl += '";'
return wl
def fetch_top(self, template, field, limit=10):
""" fetch top items for a given field,
clears the field if exists in gfilters """
x = None
if field in template.keys():
x = template[field]
del template[field]
esq = self.tpl2esq(template)
if x is not None:
template[field] = x
esq['facets'] = { "facet_results" : {"terms": { "field": field, "size" : self.es_max_size} }}
res = self.search(esq)
total = res['facets']['facet_results']['total']
count = 0
for x in res['facets']['facet_results']['terms']:
print "# "+self.grn.format(x['term'])+" "+str(round( (float(x['count']) / total) * 100.0, 2))+" % (total:"+str(x['count'])+"/"+str(total)+")"
count += 1
if count > limit:
break
def fetch_uniques(self, rule, key):
""" shortcut function to gather unique
values and their associated match count """
uniques = []
esq = self.tpl2esq(rule)
esq['facets'] = { "facet_results" : {"terms": { "field": key, "size" : 50000} }}
res = self.search(esq)
for x in res['facets']['facet_results']['terms']:
if x['term'] not in uniques:
uniques.append(x['term'])
return { 'list' : uniques, 'total' : len(uniques) }
def index(self, body, eid):
return self.es.index(index=self.cfg["elastic"]["index"], doc_type=self.cfg["elastic"]["doctype"], body=body, id=eid)
def search(self, esq, stats=False):
""" search wrapper with debug """
debug = False
if debug is True:
print "#SEARCH:PARAMS:index="+self.cfg["elastic"]["index"]+", doc_type="+self.cfg["elastic"]["doctype"]+", body=",
print "#SEARCH:QUERY:",
pprint.pprint (esq)
if len(esq["query"]["bool"]["must"]) == 0:
del esq["query"]
x = self.es.search(index=self.cfg["elastic"]["index"], doc_type=self.cfg["elastic"]["doctype"], body=esq)
if debug is True:
print "#RESULT:",
pprint.pprint(x)
return x
def normalize_checks(self, tpl):
""" replace check signs (<, >, <=, >=) by
operator.X in a dict-form tpl """
replace = {
'>' : operator.gt,
'<' : operator.lt,
'>=' : operator.ge,
'<=' : operator.le
}
for tpl_key in tpl.keys():
for token in replace.keys():
if tpl[tpl_key][0] == token:
tpl[tpl_key][0] = replace[token]
return tpl
def tag_events(self, esq, msg, tag=False):
""" tag events with msg + tstamp if they match esq """
count = 0
total_events = 0
esq["size"] = "0"
print "TAG RULE :",
pprint.pprint(esq)
x = self.search(esq)
total_events = int(str(x["hits"]["total"]))
print str(self.grn.format(total_events)) + " items to be tagged ..."
size = int(x['hits']['total'])
if size > 100:
size = size / 10
while count < total_events:
esq["size"] = size
esq["from"] = 0
res = self.search(esq)
# Iterate through matched evts to tag them.
for item in res['hits']['hits']:
eid = item['_id']
body = item['_source']
cm = item['_source']['comments']
body['comments'] += ","+msg+":"+str(datetime.datetime.now())
body['whitelisted'] = "true"
if tag is True:
# print "Tagging id: "+eid
self.index(body, eid)
else:
print eid+",",
count += 1
print "Tagged {0} events out of {1}".format(count, total_events)
if total_events - count < size:
size = total_events - count
print ""
return count
def gen_wl(self, tpl, rule={}):
""" recursive whitelist generation function,
returns a list of all possible witelists. """
retlist = []
for tpl_key in tpl.keys():
if tpl_key in rule.keys():
continue
if tpl_key[0] in ['_', '?']:
continue
if tpl[tpl_key] == '?':
continue
rule[tpl_key] = tpl[tpl_key]
for tpl_key in tpl.keys():
if tpl_key.startswith('_'):
continue
elif tpl_key.startswith('?'):
if tpl_key[1:] in rule.keys():
continue
unique_vals = self.fetch_uniques(rule, tpl_key[1:])['list']
for uval in unique_vals:
rule[tpl_key[1:]] = uval
retlist += self.gen_wl(tpl, copy.copy(rule))
return retlist
elif tpl[tpl_key] == '?':
if tpl_key in rule.keys():
continue
unique_vals = self.fetch_uniques(rule, tpl_key)['list']
for uval in unique_vals:
rule[tpl_key] = uval
retlist += self.gen_wl(tpl, copy.copy(rule))
return retlist
elif tpl_key not in rule.keys():
rule[tpl_key] = tpl[tpl_key]
retlist += self.gen_wl(tpl, copy.copy(rule))
return retlist
esq = self.tpl2esq(rule)
res = self.search(esq)
if res['hits']['total'] > 0:
clist = []
peers = []
uri = []
var_name = []
for x in res['hits']['hits']:
if len(x.get("_source").get("ip", "")) > 0 and x.get("_source").get("ip", "") not in peers:
peers.append(x["_source"]["ip"])
if len(x.get("_source").get("uri", "")) > 0 and x.get("_source").get("uri", "") not in uri:
uri.append(x["_source"]["uri"])
if len(x.get("_source").get("var_name", "")) > 0 and x.get("_source").get("var_name", "") not in var_name:
var_name.append(x["_source"]["var_name"])
if len(x.get("_source").get("content", "")) > 0 and x.get("_source").get("content", "") not in clist:
clist.append(x["_source"]["content"])
if len(clist) >= 5:
break
retlist.append({'rule' : rule, 'content' : clist[:5], 'total_hits' : res['hits']['total'], 'peers' : peers[:5], 'uri' : uri[:5],
'var_name' : var_name[:5]})
return retlist
return []
|
|
"""
Mean log loss from 5-fold CV: 0.480065955962
"""
import copy
import itertools
import numpy as np
import lasagne
import math
import os
import theano
import theano.tensor as T
import time
from lasagne.layers import DenseLayer, DropoutLayer, InputLayer, get_all_params
from lasagne.nonlinearities import rectify, softmax
from lasagne.objectives import categorical_crossentropy, Objective
from lasagne.updates import adagrad
from sklearn import feature_extraction
from sklearn.base import BaseEstimator
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.utils import check_random_state
from otto_utils import consts, utils
MODEL_NAME = 'model_09_nn_adagrad'
MODE = 'cv' # cv|submission|holdout|tune
class NeuralNetwork(BaseEstimator):
def __init__(self, n_hidden=20, max_epochs=150, batch_size=200,
lr=0.01, epsilon=0.9, dropout=0.5, valid_ratio=0.0,
use_valid=False, verbose=0, random_state=None):
self.n_hidden = n_hidden
self.max_epochs = max_epochs
self.batch_size = batch_size
self.lr = lr
self.epsilon = epsilon
self.dropout = dropout
self.valid_ratio = valid_ratio
self.use_valid = use_valid
self.verbose = verbose
self.random_state = random_state
# State
self.score_ = None
self.classes_ = None
self.n_classes_ = None
self.model = None
def fit(self, data, targets, sample_weight=None):
self.classes_, indices = np.unique(targets, return_inverse=True)
self.n_classes_ = self.classes_.shape[0]
random_state = check_random_state(self.random_state)
# Shuffle data and eventually split on train and validation sets
if self.valid_ratio > 0:
strat_shuffled_split = StratifiedShuffleSplit(targets, test_size=self.valid_ratio,
n_iter=1, random_state=self.random_state)
train_index, valid_index = [s for s in strat_shuffled_split][0]
X_train, y_train = data[train_index], targets[train_index]
X_valid, y_valid = data[valid_index], targets[valid_index]
else:
X_train, y_train = data, targets
X_valid, y_valid = np.array([]), np.array([])
if self.verbose > 5:
print 'X_train: %s, y_train: %s' % (X_train.shape, y_train.shape)
if self.use_valid:
print 'X_valid: %s, y_valid: %s' % (X_valid.shape, y_valid.shape)
# Prepare theano variables
dataset = dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
input_dim=X_train.shape[1],
output_dim=self.n_classes_,
)
if self.verbose > 0:
print "Building model and compiling functions..."
output_layer = self.build_model(dataset['input_dim'])
iter_funcs = self.create_iter_functions(dataset, output_layer)
if self.verbose > 0:
print "Starting training..."
now = time.time()
results = []
try:
for epoch in self.train(iter_funcs, dataset, output_layer):
if self.verbose > 1:
print "Epoch {} of {} took {:.3f}s".format(
epoch['number'], self.max_epochs, time.time() - now)
now = time.time()
results.append([epoch['number'], epoch['train_loss'], epoch['valid_loss']])
if self.verbose > 1:
print " training loss:\t\t{:.6f}".format(epoch['train_loss'])
print " validation loss:\t\t{:.6f}".format(epoch['valid_loss'])
print " validation accuracy:\t\t{:.2f} %%".format(
epoch['valid_accuracy'] * 100)
if epoch['number'] >= self.max_epochs:
break
if self.verbose > 0:
print 'Minimum validation error: %f (epoch %d)' % \
(epoch['best_val_error'], epoch['best_val_iter'])
except KeyboardInterrupt:
pass
return self
def predict(self, data):
preds, _ = self.make_predictions(data)
return preds
def predict_proba(self, data):
_, proba = self.make_predictions(data)
return proba
def score(self):
return self.score_
# Private methods
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
def create_iter_functions(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
objective = Objective(output_layer, loss_function=categorical_crossentropy)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch, deterministic=True)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = get_all_params(output_layer)
updates = adagrad(loss_train, all_params, self.lr, self.epsilon)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
on_unused_input='ignore',
)
iter_valid = None
if self.use_valid:
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy, proba],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
return dict(train=iter_train, valid=iter_valid)
def create_test_function(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
iter_test = theano.function(
[batch_index], [pred, proba],
givens={
X_batch: dataset['X_test'][batch_slice],
},
)
return dict(test=iter_test)
def train(self, iter_funcs, dataset, output_layer):
num_batches_train = dataset['num_examples_train'] // self.batch_size
num_batches_valid = int(math.ceil(dataset['num_examples_valid'] / float(self.batch_size)))
best_val_err = 100
best_val_iter = -1
for epoch in itertools.count(1):
batch_train_losses = []
for b in range(num_batches_train):
batch_train_loss = iter_funcs['train'](b)
batch_train_losses.append(batch_train_loss)
avg_train_loss = np.mean(batch_train_losses)
batch_valid_losses = []
batch_valid_accuracies = []
batch_valid_probas = []
if self.use_valid:
for b in range(num_batches_valid):
batch_valid_loss, batch_valid_accuracy, batch_valid_proba = iter_funcs['valid'](b)
batch_valid_losses.append(batch_valid_loss)
batch_valid_accuracies.append(batch_valid_accuracy)
batch_valid_probas.append(batch_valid_proba)
avg_valid_loss = np.mean(batch_valid_losses)
avg_valid_accuracy = np.mean(batch_valid_accuracies)
if (best_val_err > avg_valid_loss and self.use_valid) or\
(epoch == self.max_epochs and not self.use_valid):
best_val_err = avg_valid_loss
best_val_iter = epoch
# Save model
self.score_ = best_val_err
self.model = copy.deepcopy(output_layer)
yield {
'number': epoch,
'train_loss': avg_train_loss,
'valid_loss': avg_valid_loss,
'valid_accuracy': avg_valid_accuracy,
'best_val_error': best_val_err,
'best_val_iter': best_val_iter,
}
def make_predictions(self, data):
dataset = dict(
X_test=theano.shared(lasagne.utils.floatX(data)),
num_examples_test=data.shape[0],
input_dim=data.shape[1],
output_dim=self.n_classes_,
)
iter_funcs = self.create_test_function(dataset, self.model)
num_batches_test = int(math.ceil(dataset['num_examples_test'] / float(self.batch_size)))
test_preds, test_probas = np.array([]), None
for b in range(num_batches_test):
batch_test_pred, batch_test_proba = iter_funcs['test'](b)
test_preds = np.append(test_preds, batch_test_pred)
test_probas = np.append(test_probas, batch_test_proba, axis=0) if test_probas is not None else batch_test_proba
return test_preds, test_probas
if __name__ == '__main__':
train, labels, test, _, _ = utils.load_data()
# Preprocess data - transform counts to TFIDF features
tfidf = feature_extraction.text.TfidfTransformer(smooth_idf=False)
train = np.append(train, tfidf.fit_transform(train).toarray(), axis=1)
test = np.append(test, tfidf.transform(test).toarray(), axis=1)
clf = NeuralNetwork(512, 110, 128, 0.004438538808932511, 1.6674644616533133e-14, 0.2137591043893735,
.02, True, 10, random_state=23)
if MODE == 'cv':
scores, predictions = utils.make_blender_cv(clf, train, labels, calibrate=False)
print 'CV:', scores, 'Mean log loss:', np.mean(scores)
utils.write_blender_data(consts.BLEND_PATH, MODEL_NAME + '.csv', predictions)
elif MODE == 'submission':
clf.fit(train, labels)
predictions = clf.predict_proba(test)
utils.save_submission(consts.DATA_SAMPLE_SUBMISSION_PATH,
os.path.join(consts.ENSEMBLE_PATH, MODEL_NAME + '.csv'),
predictions)
elif MODE == 'holdout':
score = utils.hold_out_evaluation(clf, train, labels, calibrate=False)
print 'Log loss:', score
else:
print 'Unknown mode'
|
|
# Copyright 2020 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
import pprint
from typing import Iterable
from typing import List
from botocore.exceptions import ClientError
from cloudaux.aws.iam import delete_role_policy
from cloudaux.aws.iam import get_role_inline_policies
from cloudaux.aws.iam import put_role_policy
from tabulate import tabulate
import repokid.hooks
from repokid.datasource.access_advisor import AccessAdvisorDatasource
from repokid.datasource.iam import IAMDatasource
from repokid.exceptions import RoleStoreError
from repokid.role import Role
from repokid.role import RoleList
from repokid.types import RepokidConfig
from repokid.types import RepokidHooks
from repokid.utils.dynamo import find_role_in_cache
from repokid.utils.dynamo import role_arns_for_all_accounts
from repokid.utils.permissions import get_services_in_permissions
LOGGER = logging.getLogger("repokid")
def _repo_role(
account_number: str,
role_name: str,
config: RepokidConfig,
hooks: RepokidHooks,
commit: bool = False,
scheduled: bool = False,
) -> List[str]:
"""
Calculate what repoing can be done for a role and then actually do it if commit is set
1) Check that a role exists, it isn't being disqualified by a filter, and that is has fresh AA data
2) Get the role's current permissions, repoable permissions, and the new policy if it will change
3) Make the changes if commit is set
Args:
account_number (string)
role_name (string)
commit (bool)
Returns:
None
"""
role_id = find_role_in_cache(role_name, account_number)
# only load partial data that we need to determine if we should keep going
role = Role(role_id=role_id, config=config)
role.fetch()
return role.repo(hooks, commit=commit, scheduled=scheduled)
def _rollback_role(
account_number: str,
role_name: str,
config: RepokidConfig,
hooks: RepokidHooks,
selection: int = -1,
commit: bool = False,
) -> List[str]:
"""
Display the historical policy versions for a role as a numbered list. Restore to a specific version if selected.
Indicate changes that will be made and then actually make them if commit is selected.
Args:
account_number (string)
role_name (string)
selection (int): which policy version in the list to rollback to
commit (bool): actually make the change
Returns:
errors (list): if any
"""
errors = []
role_id = find_role_in_cache(role_name, account_number)
if not role_id:
message = "Could not find role with name {} in account {}".format(
role_name, account_number
)
errors.append(message)
LOGGER.warning(message)
return errors
else:
role = Role(role_id=role_id)
role.fetch()
# no option selected, display a table of options
if selection < 0:
headers = ["Number", "Source", "Discovered", "Permissions", "Services"]
rows = []
for index, policies_version in enumerate(role.policies):
policy_permissions, _ = repokid.utils.permissions.get_permissions_in_policy(
policies_version["Policy"]
)
rows.append(
[
index,
policies_version["Source"],
policies_version["Discovered"],
len(policy_permissions),
get_services_in_permissions(policy_permissions),
]
)
print(tabulate(rows, headers=headers))
return errors
conn = config["connection_iam"]
conn["account_number"] = account_number
current_policies = get_role_inline_policies(role.dict(by_alias=True), **conn)
pp = pprint.PrettyPrinter()
print("Will restore the following policies:")
pp.pprint(role.policies[int(selection)]["Policy"])
print("Current policies:")
pp.pprint(current_policies)
current_permissions, _ = role.get_permissions_for_policy_version()
selected_permissions, _ = role.get_permissions_for_policy_version(
selection=selection
)
restored_permissions = selected_permissions - current_permissions
print("\nResore will return these permissions:")
print("\n".join([perm for perm in sorted(restored_permissions)]))
if not commit:
return errors
# if we're restoring from a version with fewer policies than we have now, we need to remove them to
# complete the restore. To do so we'll store all the policy names we currently have and remove them
# from the list as we update. Any policy names left need to be manually removed
policies_to_remove = current_policies.keys()
for policy_name, policy in role.policies[int(selection)]["Policy"].items():
try:
LOGGER.info(
f"Pushing cached policy: {policy_name} (role: {role.role_name} account {account_number})"
)
put_role_policy(
RoleName=role.role_name,
PolicyName=policy_name,
PolicyDocument=json.dumps(policy, indent=2, sort_keys=True),
**conn,
)
except ClientError:
message = f"Unable to push policy {policy_name}. (role: {role.role_name} account {account_number})"
LOGGER.error(message, exc_info=True)
errors.append(message)
else:
# remove the policy name if it's in the list
try:
policies_to_remove.remove(policy_name)
except Exception: # nosec
pass
if policies_to_remove:
for policy_name in policies_to_remove:
try:
LOGGER.info(
f"Deleting policy {policy_name} for rollback (role: {role.role_name} account {account_number})"
)
delete_role_policy(
RoleName=role.role_name, PolicyName=policy_name, **conn
)
except ClientError:
message = f"Unable to delete policy {policy_name}. (role: {role.role_name} account {account_number})"
LOGGER.error(message, exc_info=True)
errors.append(message)
try:
role.store()
except RoleStoreError:
message = (
f"failed to store role data for {role.role_name} in account {role.account}"
)
errors.append(message)
LOGGER.exception(message, exc_info=True)
if not errors:
LOGGER.info(
f"Successfully restored selected version {selection} of role policies (role: {role.role_name} "
f"account: {account_number}"
)
return errors
def _repo_all_roles(
account_number: str,
config: RepokidConfig,
hooks: RepokidHooks,
commit: bool = False,
scheduled: bool = True,
limit: int = -1,
) -> None:
"""
Repo all scheduled or eligible roles in an account. Collect any errors and display them at the end.
Args:
account_number (string)
dynamo_table
config
commit (bool): actually make the changes
scheduled (bool): if True only repo the scheduled roles, if False repo all the (eligible) roles
limit (int): limit number of roles to be repoed per run (< 0 is unlimited)
Returns:
None
"""
access_advisor_datasource = AccessAdvisorDatasource()
access_advisor_datasource.seed(account_number)
iam_datasource = IAMDatasource()
role_arns = iam_datasource.seed(account_number)
errors = []
roles = RoleList.from_arns(role_arns, config=config)
roles = roles.get_active()
if scheduled:
roles = roles.get_scheduled()
if not roles:
LOGGER.info(f"No roles to repo in account {account_number}")
return
LOGGER.info(
"Repoing these {}roles from account {}:\n\t{}".format(
"scheduled " if scheduled else "",
account_number,
", ".join([role.role_name for role in roles]),
)
)
repokid.hooks.call_hooks(
hooks, "BEFORE_REPO_ROLES", {"account_number": account_number, "roles": roles}
)
count = 0
repoed = RoleList([])
for role in roles:
if limit >= 0 and count == limit:
break
role_errors = role.repo(hooks, commit=commit, scheduled=scheduled)
if role_errors:
errors.extend(role_errors)
repoed.append(role)
count += 1
if errors:
LOGGER.error(f"Error(s) during repo in account: {account_number}: {errors}")
LOGGER.info(f"Successfully repoed {count} roles in account {account_number}")
repokid.hooks.call_hooks(
hooks,
"AFTER_REPO_ROLES",
{"account_number": account_number, "roles": repoed, "errors": errors},
)
def _repo_stats(output_file: str, account_number: str = "") -> None:
"""
Create a csv file with stats about roles, total permissions, and applicable filters over time
Args:
output_file (string): the name of the csv file to write
account_number (string): if specified only display roles from selected account, otherwise display all
Returns:
None
"""
role_ids: Iterable[str]
if account_number:
access_advisor_datasource = AccessAdvisorDatasource()
access_advisor_datasource.seed(account_number)
iam_datasource = IAMDatasource()
role_arns = iam_datasource.seed(account_number)
else:
role_arns = role_arns_for_all_accounts()
headers = [
"RoleId",
"Role Name",
"Account",
"Active",
"Date",
"Source",
"Permissions Count",
"Repoable Permissions Count",
"Disqualified By",
]
rows = []
roles = RoleList.from_arns(
role_arns, fields=["RoleId", "RoleName", "Account", "Active", "Stats"]
)
for role in roles:
for stats_entry in role.stats:
rows.append(
[
role.role_id,
role.role_name,
role.account,
role.active,
stats_entry["Date"],
stats_entry["Source"],
stats_entry["PermissionsCount"],
stats_entry.get("RepoablePermissionsCount", 0),
stats_entry.get("DisqualifiedBy", []),
]
)
try:
with open(output_file, "w") as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headers)
for row in rows:
csv_writer.writerow(row)
except IOError as e:
LOGGER.error(
"Unable to write file {}: {}".format(output_file, e), exc_info=True
)
else:
LOGGER.info("Successfully wrote stats to {}".format(output_file))
|
|
import binascii
import datetime
import io
import os
import hashlib
from PIL import Image
from shutil import copyfile
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.storage import get_storage_class
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.db.models import signals
from avatar.conf import settings
from avatar.utils import get_username, force_bytes, invalidate_cache
try:
from django.utils.timezone import now
except ImportError:
now = datetime.datetime.now
# Issue 182: six no longer included with Django 3.0
try:
from django.utils import six
except ImportError:
import six
avatar_storage = get_storage_class(settings.AVATAR_STORAGE)()
def avatar_path_handler(instance=None, filename=None, size=None, ext=None):
tmppath = [settings.AVATAR_STORAGE_DIR]
if settings.AVATAR_HASH_USERDIRNAMES:
tmp = hashlib.md5(force_bytes(get_username(instance.user))).hexdigest()
tmppath.extend(tmp[0:2])
if settings.AVATAR_EXPOSE_USERNAMES:
tmppath.append(get_username(instance.user))
else:
tmppath.append(force_text(instance.user.pk))
if not filename:
# Filename already stored in database
filename = instance.avatar.name
if ext and settings.AVATAR_HASH_FILENAMES:
# An extension was provided, probably because the thumbnail
# is in a different format than the file. Use it. Because it's
# only enabled if AVATAR_HASH_FILENAMES is true, we can trust
# it won't conflict with another filename
(root, oldext) = os.path.splitext(filename)
filename = root + "." + ext
else:
# File doesn't exist yet
if settings.AVATAR_HASH_FILENAMES:
(root, ext) = os.path.splitext(filename)
if settings.AVATAR_RANDOMIZE_HASHES:
filename = binascii.hexlify(os.urandom(16)).decode('ascii')
else:
filename = hashlib.md5(force_bytes(filename)).hexdigest()
filename = filename + ext
if size:
tmppath.extend(['resized', str(size)])
tmppath.append(os.path.basename(filename))
return os.path.join(*tmppath)
avatar_file_path = import_string(settings.AVATAR_PATH_HANDLER)
def find_extension(format):
format = format.lower()
if format == 'jpeg':
format = 'jpg'
return format
class AvatarField(models.ImageField):
def __init__(self, *args, **kwargs):
super(AvatarField, self).__init__(*args, **kwargs)
self.max_length = 1024
self.upload_to = avatar_file_path
self.storage = avatar_storage
self.blank = True
def deconstruct(self):
name, path, args, kwargs = super(models.ImageField, self).deconstruct()
return name, path, (), {}
class Avatar(models.Model):
user = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
verbose_name=_("user"), on_delete=models.CASCADE,
)
primary = models.BooleanField(
verbose_name=_("primary"),
default=False,
)
avatar = AvatarField(
verbose_name=_("avatar")
)
date_uploaded = models.DateTimeField(
verbose_name=_("uploaded at"),
default=now,
)
class Meta:
app_label = 'avatar'
verbose_name = _('avatar')
verbose_name_plural = _('avatars')
def __unicode__(self):
return _(six.u('Avatar for %s')) % self.user
def save(self, *args, **kwargs):
avatars = Avatar.objects.filter(user=self.user)
if self.pk:
avatars = avatars.exclude(pk=self.pk)
if settings.AVATAR_MAX_AVATARS_PER_USER > 1:
if self.primary:
avatars = avatars.filter(primary=True)
avatars.update(primary=False)
else:
avatars.delete()
super(Avatar, self).save(*args, **kwargs)
def thumbnail_exists(self, size):
return self.avatar.storage.exists(self.avatar_name(size))
def transpose_image(self, image):
"""
Transpose based on EXIF information.
Borrowed from django-imagekit:
imagekit.processors.Transpose
"""
EXIF_ORIENTATION_STEPS = {
1: [],
2: ['FLIP_LEFT_RIGHT'],
3: ['ROTATE_180'],
4: ['FLIP_TOP_BOTTOM'],
5: ['ROTATE_270', 'FLIP_LEFT_RIGHT'],
6: ['ROTATE_270'],
7: ['ROTATE_90', 'FLIP_LEFT_RIGHT'],
8: ['ROTATE_90'],
}
try:
orientation = image._getexif()[0x0112]
ops = EXIF_ORIENTATION_STEPS[orientation]
except Exception:
ops = []
for method in ops:
image = image.transpose(getattr(Image, method))
return image
def create_thumbnail(self, size, quality=None):
# invalidate the cache of the thumbnail with the given size first
invalidate_cache(self.user, size)
try:
orig = self.avatar.storage.open(self.avatar.name, 'rb')
image = Image.open(orig)
image = self.transpose_image(image)
except IOError:
import traceback
traceback.print_exc()
# TODO: What should we do here? Render a "sorry, didn't work" img?
return
quality = quality or settings.AVATAR_THUMB_QUALITY
(w, h) = image.size
if w != size or h != size:
if w > h:
diff = (w - h) / 2
image = image.crop((diff, 0, w - diff, h))
else:
diff = (h - w) / 2
image = image.crop((0, diff, w, h - diff))
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize((size, size), settings.AVATAR_RESIZE_METHOD)
thumb = io.BytesIO()
image.save(thumb, settings.AVATAR_THUMB_FORMAT, quality=quality)
thumb_file = ContentFile(thumb.getvalue())
else:
if hasattr(orig, 'read'):
orig = orig.read()
thumb_file = ContentFile(orig)
thumb = self.avatar.storage.save(self.avatar_name(size), thumb_file)
def avatar_url(self, size):
try:
_base_path = os.path.split(self.avatar_name(size))[0]
_upload_path = os.path.split(
self.avatar.storage.url(self.avatar_name(size)))[1]
_upload_path = os.path.join(_base_path, _upload_path)
if not self.avatar.storage.exists(_upload_path):
copyfile(self.avatar.storage.path(self.avatar_name(size)),
self.avatar.storage.path(_upload_path))
return self.avatar.storage.url(self.avatar_name(size))
except BaseException:
from django.contrib.staticfiles.templatetags import staticfiles
return staticfiles.static(settings.MISSING_THUMBNAIL)
def get_absolute_url(self):
return self.avatar_url(settings.AVATAR_DEFAULT_SIZE)
def avatar_name(self, size):
ext = find_extension(settings.AVATAR_THUMB_FORMAT)
return avatar_file_path(
instance=self,
size=size,
ext=ext
)
def invalidate_avatar_cache(sender, instance, **kwargs):
invalidate_cache(instance.user)
def create_default_thumbnails(sender, instance, created=False, **kwargs):
invalidate_avatar_cache(sender, instance)
if created:
for size in settings.AVATAR_AUTO_GENERATE_SIZES:
instance.create_thumbnail(size)
def remove_avatar_images(instance=None, **kwargs):
for size in settings.AVATAR_AUTO_GENERATE_SIZES:
if instance.thumbnail_exists(size):
instance.avatar.storage.delete(instance.avatar_name(size))
if instance.avatar.storage.exists(instance.avatar.name):
instance.avatar.storage.delete(instance.avatar.name)
signals.post_save.connect(create_default_thumbnails, sender=Avatar)
signals.post_delete.connect(invalidate_avatar_cache, sender=Avatar)
if settings.AVATAR_CLEANUP_DELETED:
signals.post_delete.connect(remove_avatar_images, sender=Avatar)
|
|
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import os
import tempfile
import subprocess
import copy
import re
import datetime
# internal modules:
from yotta.lib.fsutils import mkDirP, rmRf
from yotta.lib.detect import systemDefaultTarget
from . import cli
Test_Complex = {
'module.json': '''{
"name": "test-testdep-a",
"version": "0.0.2",
"description": "Module to test test-dependencies",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {
"test-testdep-b": "*",
"test-testdep-c": "*",
"test-testdep-d": "*"
},
"testDependencies": {
"test-testdep-e": "*"
}
}
''',
'source/a.c': '''
#include "a/a.h"
#include "b/b.h"
#include "c/c.h"
#include "d/d.h"
int a(){
return 1 + b() + c() + d(); // 35
}
''',
'source/a.c': '''
#include "a/a.h"
#include "b/b.h"
#include "c/c.h"
#include "d/d.h"
int a(){
return 1 + b() + c() + d(); // 35
}
''',
'a/a.h':'''
#ifndef __A_H__
#define __A_H__
int a();
#endif
''',
'test/check.c': '''
#include <stdio.h>
#include "a/a.h"
#include "b/b.h"
#include "c/c.h"
#include "d/d.h"
#include "e/e.h"
int main(){
int result = a() + b() + c() + d() + e();
printf("%d\\n", result);
return !(result == 86);
}
'''
}
Test_Trivial_Lib = {
'module.json':'''{
"name": "test-trivial-lib",
"version": "0.0.2",
"description": "Module to test trivial lib compilation",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {
}
}''',
'test-trivial-lib/lib.h': '''
int foo();
''',
'source/lib.c':'''
#include "test-trivial-lib/lib.h"
int foo(){
return 7;
}
'''
}
Test_Trivial_Exe = {
'module.json':'''{
"name": "test-trivial-exe",
"version": "0.0.2",
"description": "Module to test trivial exe compilation",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {
},
"bin":"./source"
}''',
'source/lib.c':'''
int main(){
return 0;
}
'''
}
Test_Build_Info = copy.copy(Test_Trivial_Exe)
Test_Build_Info['source/lib.c'] = '''
#include "stdio.h"
#include YOTTA_BUILD_INFO_HEADER
#define STRINGIFY(s) STRINGIFY_INDIRECT(s)
#define STRINGIFY_INDIRECT(s) #s
int main(){
printf("vcs ID: %s\\n", STRINGIFY(YOTTA_BUILD_VCS_ID));
printf("vcs clean: %d\\n", YOTTA_BUILD_VCS_CLEAN);
printf("build UUID: %s\\n", STRINGIFY(YOTTA_BUILD_UUID));
printf(
"build timestamp: %.4d-%.2d-%.2d-%.2d-%.2d-%.2d\\n",
YOTTA_BUILD_YEAR,
YOTTA_BUILD_MONTH,
YOTTA_BUILD_DAY,
YOTTA_BUILD_HOUR,
YOTTA_BUILD_MINUTE,
YOTTA_BUILD_SECOND
);
return 0;
}
'''
Test_Tests = {
'module.json':'''{
"name": "test-tests",
"version": "0.0.0",
"description": "Test yotta's compilation of tests.",
"keywords": [],
"author": "James Crosby <[email protected]>",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {},
"targetDependencies": {}
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
'test-tests/foo.h':'int foo();',
'test/a/bar.c':'#include "test-tests/foo.h"\nint main(){ foo(); return 0; }',
'test/b/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/b/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/c/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/c/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/d/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/d/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/e/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/e/b/a/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/f/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/f/a/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/g/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/g/a/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }'
}
def isWindows():
# can't run tests that hit github without an authn token
return os.name == 'nt'
class TestCLIBuild(unittest.TestCase):
def writeTestFiles(self, files, add_space_in_path=False):
test_dir = tempfile.mkdtemp()
if add_space_in_path:
test_dir = test_dir + ' spaces in path'
for path, contents in files.items():
path_dir, file_name = os.path.split(path)
path_dir = os.path.join(test_dir, path_dir)
mkDirP(path_dir)
with open(os.path.join(path_dir, file_name), 'w') as f:
f.write(contents)
return test_dir
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildTrivialLib(self):
test_dir = self.writeTestFiles(Test_Trivial_Lib)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildTrivialExe(self):
test_dir = self.writeTestFiles(Test_Trivial_Exe)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildComplex(self):
test_dir = self.writeTestFiles(Test_Complex)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildComplexSpaceInPath(self):
test_dir = self.writeTestFiles(Test_Complex, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildTests(self):
test_dir = self.writeTestFiles(Test_Tests, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
self.assertIn('test-a', stdout)
self.assertIn('test-c', stdout)
self.assertIn('test-d', stdout)
self.assertIn('test-e', stdout)
self.assertIn('test-f', stdout)
self.assertIn('test-g', stdout)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildInfo(self):
test_dir = self.writeTestFiles(Test_Build_Info, True)
# commit all the test files to git so that the VCS build info gets
# defined:
subprocess.check_call(['git', 'init', '-q'], cwd=test_dir)
subprocess.check_call(['git', 'add', '.'], cwd=test_dir)
subprocess.check_call(['git', 'commit', '-m', 'test build info automated commit', '-q'], cwd=test_dir)
self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
build_time = datetime.datetime.utcnow()
output = subprocess.check_output(['./build/' + systemDefaultTarget().split(',')[0] + '/source/test-trivial-exe'], cwd=test_dir).decode()
self.assertIn('vcs clean: 1', output)
# check build timestamp
self.assertIn('build timestamp: ', output)
build_timestamp_s = re.search('build timestamp: (.*)\n', output)
self.assertTrue(build_timestamp_s)
build_timestamp_s = build_timestamp_s.group(1)
build_time_parsed = datetime.datetime.strptime(build_timestamp_s, '%Y-%m-%d-%H-%M-%S')
build_time_skew = build_time_parsed - build_time
self.assertTrue(abs(build_time_skew.total_seconds()) < 3)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=test_dir)
if statuscode != 0:
print('command failed with status %s' % statuscode)
print(stdout)
print(stderr)
self.assertEqual(statuscode, 0)
return stdout + stderr
|
|
# Copyright 2016, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test pypowervm.tasks.slot_map."""
import mock
import six
import testtools
from pypowervm import exceptions as pv_e
from pypowervm.tasks import slot_map
from pypowervm.tests.test_utils import pvmhttp
from pypowervm.utils import lpar_builder as lb
from pypowervm.wrappers import iocard as ioc
from pypowervm.wrappers import network as net
from pypowervm.wrappers import storage as stor
from pypowervm.wrappers import virtual_io_server as vios
def loadf(wcls, fname):
return wcls.wrap(pvmhttp.load_pvm_resp(fname).get_response())
# Load data files just once, since the wrappers will be read-only
vio1 = loadf(vios.VIOS, 'fake_vios_ssp_npiv.txt')
vio2 = loadf(vios.VIOS, 'fake_vios_mappings.txt')
cnafeed1 = loadf(net.CNA, 'cna_feed1.txt')
vswitchfeed = loadf(net.VSwitch, 'vswitch_feed.txt')
vnicfeed = loadf(ioc.VNIC, 'vnic_feed.txt')
class SlotMapTestImplLegacy(slot_map.SlotMapStore):
"""Legacy subclass overriding load/save/delete directly."""
def __init__(self, inst_key, load=True, load_ret=None):
self._load_ret = load_ret
super(SlotMapTestImplLegacy, self).__init__(inst_key, load=load)
def load(self):
return self._load_ret
def save(self):
pass
def delete(self):
pass
class SlotMapTestImpl(slot_map.SlotMapStore):
"""New-style subclass overriding _load/_save/_delete."""
def __init__(self, inst_key, load=True, load_ret=None):
self._load_ret = load_ret
super(SlotMapTestImpl, self).__init__(inst_key, load=load)
def _load(self, key):
return self._load_ret
def _save(self, key, blob):
pass
def _delete(self, key):
pass
class TestSlotMapStoreLegacy(testtools.TestCase):
"""Test slot_map.SlotMapStore with a legacy impl."""
def __init__(self, *args, **kwargs):
"""Initialize with a legacy SlotMapStore implementation."""
super(TestSlotMapStoreLegacy, self).__init__(*args, **kwargs)
self.smt_impl = SlotMapTestImplLegacy
def test_ioclass_consts(self):
"""Make sure the IOCLASS constants are disparate."""
constl = [key for key in dir(slot_map.IOCLASS) if not
key.startswith('_')]
self.assertEqual(len(constl), len(set(constl)))
def test_init_calls_load(self):
"""Ensure SlotMapStore.__init__ calls load or not based on the parm."""
with mock.patch.object(self.smt_impl, 'load') as mock_load:
mock_load.return_value = None
loads = self.smt_impl('foo')
mock_load.assert_called_once_with()
self.assertEqual('foo', loads.inst_key)
mock_load.reset_mock()
doesnt_load = self.smt_impl('bar', load=False)
self.assertEqual('bar', doesnt_load.inst_key)
mock_load.assert_not_called()
@mock.patch('pickle.loads')
def test_init_deserialize(self, mock_unpickle):
"""Ensure __init__ deserializes or not based on what's loaded."""
# By default, load returns None, so nothing to unpickle
doesnt_unpickle = self.smt_impl('foo')
mock_unpickle.assert_not_called()
self.assertEqual({}, doesnt_unpickle.topology)
unpickles = self.smt_impl('foo', load_ret='abc123')
mock_unpickle.assert_called_once_with('abc123')
self.assertEqual(mock_unpickle.return_value, unpickles.topology)
@mock.patch('pickle.dumps')
@mock.patch('pypowervm.tasks.slot_map.SlotMapStore.topology',
new_callable=mock.PropertyMock)
def test_serialized(self, mock_topo, mock_pickle):
"""Validate the serialized property."""
mock_pickle.return_value = 'abc123'
smt = self.smt_impl('foo')
self.assertEqual('abc123', smt.serialized)
mock_pickle.assert_called_once_with(mock_topo.return_value, protocol=2)
mock_topo.assert_called_once()
@mock.patch('pypowervm.wrappers.managed_system.System.get')
@mock.patch('pypowervm.wrappers.network.VSwitch.get')
def test_vswitch_id2name(self, mock_vsw_get, mock_sys_get):
"""Ensure _vswitch_id2name caches, and gets the right content."""
mock_vsw_get.return_value = vswitchfeed
mock_sys_get.return_value = ['sys']
smt = self.smt_impl('foo')
# We didn't cache yet
mock_vsw_get.assert_not_called()
mock_sys_get.assert_not_called()
map1 = smt._vswitch_id2name('adap')
# Now we grabbed the REST data
mock_vsw_get.assert_called_once_with('adap', parent='sys')
mock_sys_get.assert_called_once_with('adap')
mock_vsw_get.reset_mock()
mock_sys_get.reset_mock()
map2 = smt._vswitch_id2name('adap2')
# The same data is returned each time
self.assertEqual(map2, map1)
# The second call didn't re-fetch from REST
mock_vsw_get.assert_not_called()
mock_sys_get.assert_not_called()
# Make sure the data is in the right shape
self.assertEqual({0: 'ETHERNET0', 1: 'MGMTSWITCH'}, map1)
@mock.patch('pypowervm.wrappers.managed_system.System.get')
@mock.patch('pypowervm.wrappers.network.VSwitch.get')
@mock.patch('warnings.warn')
def test_register_cna(self, mock_warn, mock_vsw_get, mock_sys_get):
"""Test deprecated register_cna."""
mock_vsw_get.return_value = vswitchfeed
mock_sys_get.return_value = ['sys']
smt = self.smt_impl('foo')
for cna in cnafeed1:
smt.register_cna(cna)
self.assertEqual({3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}},
smt.topology)
# The vswitch_map is cached in the slot_map, so these only get
# called once
self.assertEqual(mock_vsw_get.call_count, 1)
self.assertEqual(mock_sys_get.call_count, 1)
self.assertEqual(mock_warn.call_count, 3)
@mock.patch('warnings.warn')
def test_drop_cna(self, mock_warn):
"""Test deprecated drop_cna."""
smt = self.smt_impl('foo')
smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}
# Drop the first CNA and verify it was removed
smt.drop_cna(cnafeed1[0])
self.assertEqual({4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}},
smt.topology)
# Drop all remaining CNAs, including a redundant drop on index 0
for cna in cnafeed1:
smt.drop_cna(cna)
self.assertEqual({}, smt.topology)
self.assertEqual(mock_warn.call_count, 4)
@mock.patch('pypowervm.wrappers.managed_system.System.get')
@mock.patch('pypowervm.wrappers.network.VSwitch.get')
def test_register_vnet(self, mock_vsw_get, mock_sys_get):
"""Test register_vnet."""
mock_vsw_get.return_value = vswitchfeed
mock_sys_get.return_value = ['sys']
smt = self.smt_impl('foo')
for vnic in vnicfeed:
smt.register_vnet(vnic)
for cna in cnafeed1:
smt.register_vnet(cna)
self.assertEqual({3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}},
7: {'VNIC': {'AE7A25E59A07': None}},
8: {'VNIC': {'AE7A25E59A08': None}}},
smt.topology)
# The vswitch_map is cached in the slot_map, so these only get
# called once
self.assertEqual(mock_vsw_get.call_count, 1)
self.assertEqual(mock_sys_get.call_count, 1)
def test_register_vnet_exception(self):
"""Test register_vnet raises exception without CNA or VNIC."""
smt = self.smt_impl('foo')
self.assertRaises(pv_e.InvalidVirtualNetworkDeviceType,
smt.register_vnet, None)
def test_drop_vnet(self):
"""Test drop_vnet."""
smt = self.smt_impl('foo')
smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}},
7: {'VNIC': {'AE7A25E59A07': None}},
8: {'VNIC': {'AE7A25E59A08': None}}}
# Drop the first CNA and VNIC and verify it was removed
smt.drop_vnet(cnafeed1[0])
smt.drop_vnet(vnicfeed[0])
self.assertEqual({4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}},
8: {'VNIC': {'AE7A25E59A08': None}}},
smt.topology)
# Drop all remaining VNICs
for vnic in vnicfeed:
smt.drop_vnet(vnic)
self.assertEqual({4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}},
smt.topology)
# Drop all remaining CNAs
for cna in cnafeed1:
smt.drop_vnet(cna)
self.assertEqual({}, smt.topology)
def test_drop_vnet_exception(self):
"""Test drop_vnet raises exception without CNA or VNIC."""
smt = self.smt_impl('foo')
self.assertRaises(pv_e.InvalidVirtualNetworkDeviceType,
smt.drop_vnet, None)
def test_register_vfc_mapping(self):
"""Test register_vfc_mapping."""
smt = self.smt_impl('foo')
i = 1
for vio in (vio1, vio2):
for vfcmap in vio.vfc_mappings:
smt.register_vfc_mapping(vfcmap, 'fab%d' % i)
i += 1
self.assertEqual({3: {'VFC': {'fab1': None, 'fab10': None,
'fab11': None, 'fab12': None,
'fab13': None, 'fab14': None,
'fab15': None, 'fab16': None,
'fab17': None, 'fab18': None,
'fab19': None, 'fab20': None,
'fab21': None, 'fab22': None,
'fab23': None, 'fab24': None,
'fab25': None, 'fab26': None,
'fab28': None, 'fab29': None,
'fab3': None, 'fab30': None,
'fab31': None, 'fab32': None,
'fab33': None, 'fab4': None,
'fab5': None, 'fab6': None,
'fab7': None, 'fab8': None,
'fab9': None}},
6: {'VFC': {'fab2': None}},
8: {'VFC': {'fab27': None}}}, smt.topology)
def test_drop_vfc_mapping(self):
"""Test drop_vfc_mapping."""
# Init data to test with
mock_server_adapter = mock.Mock(lpar_slot_num=3)
vfcmap = mock.Mock(server_adapter=mock_server_adapter)
smt = self.smt_impl('foo')
smt._slot_topo = {3: {'VFC': {'fab1': None, 'fab10': None,
'fab7': None, 'fab8': None,
'fab9': None}},
6: {'VFC': {'fab2': None}},
8: {'VFC': {'fab27': None}}}
# Drop a single slot entry and verify it is removed
smt.drop_vfc_mapping(vfcmap, 'fab1')
self.assertEqual({3: {'VFC': {'fab10': None,
'fab7': None, 'fab8': None,
'fab9': None}},
6: {'VFC': {'fab2': None}},
8: {'VFC': {'fab27': None}}},
smt.topology)
# Drop remaining LPAR 3 slot entries and verify they are removed
for i in range(7, 11):
smt.drop_vfc_mapping(vfcmap, 'fab%s' % str(i))
self.assertEqual({6: {'VFC': {'fab2': None}},
8: {'VFC': {'fab27': None}}},
smt.topology)
def test_register_vscsi_mappings(self):
"""Test register_vscsi_mappings."""
smt = self.smt_impl('foo')
for vio in (vio1, vio2):
for vscsimap in vio.scsi_mappings:
smt.register_vscsi_mapping(vscsimap)
self.assertEqual(
{2: {'LU': {'274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771'
'd6a32accde003': '0x8500000000000000',
'274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec'
'14327771522b0': '0x8300000000000000',
'274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247'
'56e9713a93f90': '0x8400000000000000',
'274d7bb790666211e3bc1a00006cae8b01c96f590914bccbc8b7b'
'88c37165c0485': '0x8200000000000000'},
'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA'
'wNTJBOQ==': '0x8600000000000000'},
'VDisk': {'0300004c7a00007a00000001466c54110f.16':
'0x8100000000000000'},
'VOptMedia': {
'0evopt_19bbb46ad15747d79fe08f8464466144':
'vopt_19bbb46ad15747d79fe08f8464466144',
'0evopt_2c7aa01349714368a3d040bb0d613a67':
'vopt_2c7aa01349714368a3d040bb0d613a67',
'0evopt_2e51e8b4b9f04b159700e654b2436a01':
'vopt_2e51e8b4b9f04b159700e654b2436a01',
'0evopt_84d7bfcf44964f398e60254776b94d41':
'vopt_84d7bfcf44964f398e60254776b94d41',
'0evopt_de86c46e07004993b412c948bd5047c2':
'vopt_de86c46e07004993b412c948bd5047c2'}},
3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1':
'0x8700000000000000'}},
65535: {'PV': {'01M0lCTUZsYXNoU3lzdGVtLTk4NDA2MDA1MDc2ODA5OEIxMEI'
'4MDgwMDAwMDAwNTAwMDAzMA==': '0x81000000000'
'00000'}}},
smt.topology)
def test_drop_vscsi_mappings(self):
"""Test drop_vscsi_mappings."""
# Init objects to test with
bstor = mock.Mock(stor.LU,
udid='274d7bb790666211e3bc1a00006cae8b01c96f59091'
'4bccbc8b7b88c37165c0485')
mock_server_adapter = mock.Mock(lpar_slot_num=2)
vscsimap = mock.Mock(backing_storage=bstor,
server_adapter=mock_server_adapter)
smt = self.smt_impl('foo')
smt._slot_topo = {
2: {'LU': {'274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771'
'd6a32accde003': None,
'274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec'
'14327771522b0': None,
'274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247'
'56e9713a93f90': None,
'274d7bb790666211e3bc1a00006cae8b01c96f590914bccbc8b7b'
'88c37165c0485': None},
'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA'
'wNTJBOQ==': None},
'VDisk': {'0300004c7a00007a00000001466c54110f.16':
'0x8100000000000000'},
'VOptMedia': {
'0evopt_19bbb46ad15747d79fe08f8464466144':
'vopt_19bbb46ad15747d79fe08f8464466144',
'0evopt_2c7aa01349714368a3d040bb0d613a67':
'vopt_2c7aa01349714368a3d040bb0d613a67',
'0evopt_2e51e8b4b9f04b159700e654b2436a01':
'vopt_2e51e8b4b9f04b159700e654b2436a01',
'0evopt_84d7bfcf44964f398e60254776b94d41':
'vopt_84d7bfcf44964f398e60254776b94d41',
'0evopt_de86c46e07004993b412c948bd5047c2':
'vopt_de86c46e07004993b412c948bd5047c2'}},
3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1':
'0x8700000000000000'}}
}
# Remove a single LU entry and verify it was removed
smt.drop_vscsi_mapping(vscsimap)
self.assertEqual(
{2: {'LU': {'274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771'
'd6a32accde003': None,
'274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec'
'14327771522b0': None,
'274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247'
'56e9713a93f90': None},
'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA'
'wNTJBOQ==': None},
'VDisk': {'0300004c7a00007a00000001466c54110f.16':
'0x8100000000000000'},
'VOptMedia': {
'0evopt_19bbb46ad15747d79fe08f8464466144':
'vopt_19bbb46ad15747d79fe08f8464466144',
'0evopt_2c7aa01349714368a3d040bb0d613a67':
'vopt_2c7aa01349714368a3d040bb0d613a67',
'0evopt_2e51e8b4b9f04b159700e654b2436a01':
'vopt_2e51e8b4b9f04b159700e654b2436a01',
'0evopt_84d7bfcf44964f398e60254776b94d41':
'vopt_84d7bfcf44964f398e60254776b94d41',
'0evopt_de86c46e07004993b412c948bd5047c2':
'vopt_de86c46e07004993b412c948bd5047c2'}},
3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1':
'0x8700000000000000'}}},
smt.topology)
# Remove all other LPAR 2 LU entries and verify they are removed
udids = ['274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771'
'd6a32accde003',
'274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec'
'14327771522b0',
'274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247'
'56e9713a93f90']
for udid in udids:
bstor.udid = udid
smt.drop_vscsi_mapping(vscsimap)
self.assertEqual(
{2: {'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA'
'wNTJBOQ==': None},
'VDisk': {'0300004c7a00007a00000001466c54110f.16':
'0x8100000000000000'},
'VOptMedia': {
'0evopt_19bbb46ad15747d79fe08f8464466144':
'vopt_19bbb46ad15747d79fe08f8464466144',
'0evopt_2c7aa01349714368a3d040bb0d613a67':
'vopt_2c7aa01349714368a3d040bb0d613a67',
'0evopt_2e51e8b4b9f04b159700e654b2436a01':
'vopt_2e51e8b4b9f04b159700e654b2436a01',
'0evopt_84d7bfcf44964f398e60254776b94d41':
'vopt_84d7bfcf44964f398e60254776b94d41',
'0evopt_de86c46e07004993b412c948bd5047c2':
'vopt_de86c46e07004993b412c948bd5047c2'}},
3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1':
'0x8700000000000000'}}},
smt.topology)
@mock.patch('pypowervm.wrappers.managed_system.System.get')
@mock.patch('pypowervm.wrappers.network.VSwitch.get')
def test_serialize_unserialize(self, mock_vsw_get, mock_sys_get):
"""Ensure that saving/loading doesn't corrupt the data."""
mock_vsw_get.return_value = vswitchfeed
mock_sys_get.return_value = ['sys']
# Set up a nice, big, complicated source slot map
smt1 = self.smt_impl('foo')
for cna in cnafeed1:
smt1.register_vnet(cna)
for vnic in vnicfeed:
smt1.register_vnet(vnic)
i = 1
for vio in (vio1, vio2):
for vscsimap in vio.scsi_mappings:
smt1.register_vscsi_mapping(vscsimap)
for vfcmap in vio.vfc_mappings:
smt1.register_vfc_mapping(vfcmap, 'fab%d' % i)
i += 1
# Serialize, and make a new slot map that loads that serialized data
smt2 = self.smt_impl('bar', load_ret=smt1.serialized)
# Ensure their topologies are identical
self.assertEqual(smt1.topology, smt2.topology)
def test_max_vslots(self):
"""Test setting/getting the max_vslots."""
smt = self.smt_impl('foo')
# Starts off unset
self.assertIsNone(smt.max_vslots)
# Can assign initially
smt.register_max_vslots(123)
self.assertEqual(123, smt.max_vslots)
# Can overwrite
smt.register_max_vslots(234)
self.assertEqual(234, smt.max_vslots)
# Can throw other stuff in there
i = 1
for vio in (vio1, vio2):
for vfcmap in vio.vfc_mappings:
smt.register_vfc_mapping(vfcmap, 'fab%d' % i)
i += 1
# max_vslots still set
self.assertEqual(234, smt.max_vslots)
# Topology not polluted by max_vslots
self.assertEqual({3: {'VFC': {'fab1': None, 'fab10': None,
'fab11': None, 'fab12': None,
'fab13': None, 'fab14': None,
'fab15': None, 'fab16': None,
'fab17': None, 'fab18': None,
'fab19': None, 'fab20': None,
'fab21': None, 'fab22': None,
'fab23': None, 'fab24': None,
'fab25': None, 'fab26': None,
'fab28': None, 'fab29': None,
'fab3': None, 'fab30': None,
'fab31': None, 'fab32': None,
'fab33': None, 'fab4': None,
'fab5': None, 'fab6': None,
'fab7': None, 'fab8': None,
'fab9': None}},
6: {'VFC': {'fab2': None}},
8: {'VFC': {'fab27': None}}}, smt.topology)
class TestSlotMapStore(TestSlotMapStoreLegacy):
"""Test slot_map.SlotMapStore with a new-style impl."""
def __init__(self, *args, **kwargs):
"""Initialize with a new-style SlotMapStore implementation."""
super(TestSlotMapStore, self).__init__(*args, **kwargs)
self.smt_impl = SlotMapTestImpl
self.load_meth_nm = '_load'
def test_init_calls_load(self):
"""Ensure SlotMapStore.__init__ calls load or not based on the parm.
This overrides the legacy test of the same name to ensure that _load
gets invoked properly.
"""
with mock.patch.object(self.smt_impl, '_load') as mock_load:
mock_load.return_value = None
loads = self.smt_impl('foo')
mock_load.assert_called_once_with('foo')
self.assertEqual('foo', loads.inst_key)
mock_load.reset_mock()
doesnt_load = self.smt_impl('bar', load=False)
self.assertEqual('bar', doesnt_load.inst_key)
mock_load.assert_not_called()
@mock.patch('pypowervm.tasks.slot_map.SlotMapStore.serialized',
new_callable=mock.PropertyMock)
def test_save_when_needed(self, mock_ser):
"""Overridden _save call invoked only when needed."""
with mock.patch.object(self.smt_impl, '_save') as mock_save:
smt = self.smt_impl('foo')
smt.save()
# Nothing changed yet
mock_save.assert_not_called()
smt.register_vfc_mapping(vio1.vfc_mappings[0], 'fabric')
# Not yet...
mock_save.assert_not_called()
smt.save()
# Now it's been called.
mock_save.assert_called_once_with('foo', mock_ser.return_value)
mock_save.reset_mock()
# Saving again has no effect
smt.save()
mock_save.assert_not_called()
# Verify it works on drop too
smt.drop_vfc_mapping(vio1.vfc_mappings[0], 'fabric')
mock_save.assert_not_called()
smt.save()
# Now it's been called.
mock_save.assert_called_once_with('foo', mock_ser.return_value)
mock_save.reset_mock()
# Saving again has no effect
smt.save()
mock_save.assert_not_called()
def test_delete(self):
"""Overridden _delete is called properly when delete is invoked."""
with mock.patch.object(self.smt_impl, '_delete') as mock_delete:
smt = self.smt_impl('foo')
smt.delete()
mock_delete.assert_called_once_with('foo')
class TestRebuildSlotMapLegacy(testtools.TestCase):
"""Test for RebuildSlotMap class with legacy SlotMapStore subclass.
Tests BuildSlotMap class's get methods as well.
"""
def __init__(self, *args, **kwargs):
"""Initialize with a particular SlotMapStore implementation."""
super(TestRebuildSlotMapLegacy, self).__init__(*args, **kwargs)
self.smt_impl = SlotMapTestImplLegacy
def setUp(self):
super(TestRebuildSlotMapLegacy, self).setUp()
self.vio1 = mock.Mock(uuid='vios1')
self.vio2 = mock.Mock(uuid='vios2')
def test_get_mgmt_vea_slot(self):
smt = self.smt_impl('foo')
# Make sure it returns the next slot available
smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'VFC': {'fab1': None}}}
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None,
['fab1'])
self.assertEqual((None, 7), rsm.get_mgmt_vea_slot())
# Second call should return the same slot, as there is only one mgmt
# vif per VM
self.assertEqual((None, 7), rsm.get_mgmt_vea_slot())
# Make sure it returns the existing MGMT switch
smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, [])
self.assertEqual(('3AEAC528A7E3', 6), rsm.get_mgmt_vea_slot())
# Make sure it returns None if there is no real data
smt._slot_topo = {}
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, [])
self.assertEqual((None, None), rsm.get_mgmt_vea_slot())
def test_vea_build_out(self):
"""Test _vea_build_out."""
# Create a slot topology that will be converted to a rebuild map
smt = self.smt_impl('foo')
smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}
# Run the actual test
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {})
# Verify rebuild map was created successfully
self.assertEqual(
{'CNA': {'2A2E57A4DE9C': 4, '5E372CFD9E6D': 3},
'MGMTCNA': {'mac': '3AEAC528A7E3', 'slot': 6}},
rsm._build_map)
# Verify the VEA slot can be read by MAC address
self.assertEqual(3, rsm.get_vea_slot('5E372CFD9E6D'))
self.assertEqual(4, rsm.get_vea_slot('2A2E57A4DE9C'))
self.assertEqual(None, rsm.get_vea_slot('3AEAC528A7E3'))
self.assertEqual(('3AEAC528A7E3', 6), rsm.get_mgmt_vea_slot())
def test_vnic_build_out(self):
"""Test _vnic_build_out."""
smt = self.smt_impl('foo')
smt._slot_topo = {5: {'VNIC': {'72AB8C392CD6': None}},
6: {'VNIC': {'111111111111': None}},
7: {'VNIC': {'45F16A97BC7E': None}}}
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {})
self.assertEqual(
{'VNIC': {'72AB8C392CD6': 5,
'111111111111': 6,
'45F16A97BC7E': 7}},
rsm._build_map)
self.assertEqual(5, rsm.get_vnet_slot('72AB8C392CD6'))
self.assertEqual(6, rsm.get_vnet_slot('111111111111'))
self.assertEqual(7, rsm.get_vnet_slot('45F16A97BC7E'))
def test_max_vslots(self):
"""Ensure max_vslots returns the set value, or 10 + highest slot."""
# With max_vslots unset and nothing in the topology...
smt = self.smt_impl('foo')
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {})
# ...max_vslots defaults to 64
self.assertEqual(lb.DEF_MAX_SLOT, rsm.get_max_vslots())
# When unset, and the highest registered slot is small...
smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {})
# ...max_vslots still defaults to 64
self.assertEqual(lb.DEF_MAX_SLOT, rsm.get_max_vslots())
# When unset, and the highest registered slot is big...
smt._slot_topo = {62: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}},
4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}},
6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {})
# ...max_vslots derives to 10 + highest
self.assertEqual(72, rsm.get_max_vslots())
# With max_vslots set, even if it's lower than 64...
smt.register_max_vslots(23)
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {})
# ...max_vslots returns the exact value
self.assertEqual(23, rsm.get_max_vslots())
def test_rebuild_fails_w_vopt(self):
"""Test RebuildSlotMap fails when a Vopt exists in topology."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_W_VOPT
self.assertRaises(
pv_e.InvalidHostForRebuildInvalidIOType,
slot_map.RebuildSlotMap, smt,
[self.vio1, self.vio2], VOL_TO_VIO1, {})
def test_rebuild_w_vdisk(self):
"""Test RebuildSlotMap deterministic."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_W_VDISK
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2],
VOL_TO_VIO1, {})
# Deterministic. vios1 gets slot 1
for udid in rsm._build_map['VDisk']['vios1']:
slot, lua = rsm.get_vscsi_slot(self.vio1, udid)
self.assertEqual(1, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_W_VDISK[slot][slot_map.IOCLASS.VDISK][udid],
lua)
# Deterministic. vios2 gets slot 2
for udid in rsm._build_map['VDisk']['vios2']:
slot, lua = rsm.get_vscsi_slot(self.vio2, udid)
self.assertEqual(2, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_W_VDISK[slot][slot_map.IOCLASS.VDISK][udid],
lua)
# The build map won't actually have these as keys but
# the get should return None nicely.
slot, lua = rsm.get_vscsi_slot(self.vio1, 'vd_udid3')
self.assertIsNone(slot)
def test_lu_vscsi_build_out_1(self):
"""Test RebuildSlotMap deterministic."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_LU_1
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2],
VOL_TO_VIO1, {})
# Deterministic. vios1 gets slot 1
for udid in rsm._build_map['LU']['vios1']:
slot, lua = rsm.get_vscsi_slot(self.vio1, udid)
self.assertEqual(1, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_LU_1[slot][slot_map.IOCLASS.LU][udid], lua)
# Deterministic. vios2 gets slot 2
for udid in rsm._build_map['LU']['vios2']:
slot, lua = rsm.get_vscsi_slot(self.vio2, udid)
self.assertEqual(2, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_LU_1[slot][slot_map.IOCLASS.LU][udid], lua)
# The build map won't actually have these as keys but
# the get should return None nicely.
slot, lua = rsm.get_vscsi_slot(self.vio1, 'lu_udid4')
self.assertIsNone(slot)
slot, lua = rsm.get_vscsi_slot(self.vio2, 'lu_udid2')
self.assertIsNone(slot)
def test_pv_vscsi_build_out_1(self):
"""Test RebuildSlotMap deterministic."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_PV_1
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2],
VOL_TO_VIO1, {})
# Deterministic. vios1 gets slot 1
for udid in rsm._build_map['PV']['vios1']:
self.assertEqual(
1, rsm.get_pv_vscsi_slot(self.vio1, udid))
slot, lua = rsm.get_vscsi_slot(self.vio1, udid)
self.assertEqual(1, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_PV_1[slot][slot_map.IOCLASS.PV][udid], lua)
# Deterministic. vios2 gets slot 2
for udid in rsm._build_map['PV']['vios2']:
self.assertEqual(
2, rsm.get_pv_vscsi_slot(self.vio2, udid))
slot, lua = rsm.get_vscsi_slot(self.vio2, udid)
self.assertEqual(2, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_PV_1[slot][slot_map.IOCLASS.PV][udid], lua)
# The build map won't actually have these as keys but
# the get should return None nicely.
self.assertIsNone(
rsm.get_pv_vscsi_slot(self.vio1, 'pv_udid4'))
self.assertIsNone(
rsm.get_pv_vscsi_slot(self.vio2, 'pv_udid2'))
def test_mix_vscsi_build_out_1(self):
"""Test RebuildSlotMap deterministic."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_MIX_1
rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2],
VOL_TO_VIO1, {})
# Deterministic. vios1 gets slot 1
for udid in rsm._build_map['PV']['vios1']:
slot, lua = rsm.get_vscsi_slot(self.vio1, udid)
self.assertEqual(1, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_MIX_1[slot][slot_map.IOCLASS.PV][udid], lua)
for udid in rsm._build_map['LU']['vios1']:
slot, lua = rsm.get_vscsi_slot(self.vio1, udid)
self.assertEqual(1, slot)
# Make sure we got the right LUA for this UDID
self.assertEqual(SCSI_MIX_1[slot][slot_map.IOCLASS.LU][udid], lua)
# The build map won't actually have these as keys but
# the get should return None nicely.
slot, lua = rsm.get_vscsi_slot(self.vio2, 'lu_udid2')
self.assertIsNone(slot)
slot, lua = rsm.get_vscsi_slot(self.vio2, 'pv_udid2')
self.assertIsNone(slot)
def test_vscsi_build_out_arbitrary_dest_vioses(self):
"""Test RebuildSlotMap with multiple candidate dest VIOSes."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_ARB_MAP
rsm = slot_map.RebuildSlotMap(
smt, [self.vio1, self.vio2], VTV_2V_ARB, {})
# Since this isn't deterministic we want to make sure each UDID
# got their slot assigned to one VIOS and not the other.
expected_map = {'lu_udid1': 47, 'pv_udid2': 9, 'lu_udid3': 23,
'pv_udid4': 56}
for udid, eslot in six.iteritems(expected_map):
aslot1, lua1 = rsm.get_vscsi_slot(self.vio1, udid)
aslot2, lua2 = rsm.get_vscsi_slot(self.vio2, udid)
if aslot1 is None:
self.assertEqual(eslot, aslot2)
if SCSI_ARB_MAP[eslot].get(slot_map.IOCLASS.LU):
self.assertEqual(
SCSI_ARB_MAP[eslot][slot_map.IOCLASS.LU][udid], lua2)
else:
self.assertEqual(
SCSI_ARB_MAP[eslot][slot_map.IOCLASS.PV][udid], lua2)
else:
self.assertEqual(eslot, aslot1)
self.assertIsNone(aslot2)
if SCSI_ARB_MAP[eslot].get(slot_map.IOCLASS.LU):
self.assertEqual(
SCSI_ARB_MAP[eslot][slot_map.IOCLASS.LU][udid], lua1)
else:
self.assertEqual(
SCSI_ARB_MAP[eslot][slot_map.IOCLASS.PV][udid], lua1)
def test_vscsi_build_out_full_coverage(self):
"""Test rebuild with 2 slots per udid and 2 candidate VIOSes."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_PV_2S_2V_MAP
rsm = slot_map.RebuildSlotMap(
smt, [self.vio1, self.vio2], VTV_2V_ARB, {})
expected_map = {'lu_udid1': [5, 23], 'pv_udid2': [6, 24],
'lu_udid3': [7, 25], 'pv_udid4': [8, 26]}
# We know what slots the UDIDs should get but not what VIOSes they'll
# belong to. So we'll assert that one VIOS gets 1 slot and the other
# VIOS gets the other for each UDID.
for udid, (eslot1, eslot2) in six.iteritems(expected_map):
if rsm.get_pv_vscsi_slot(self.vio1, udid) != eslot1:
self.assertEqual(
eslot1, rsm.get_pv_vscsi_slot(self.vio2, udid))
self.assertEqual(
eslot2, rsm.get_pv_vscsi_slot(self.vio1, udid))
else:
# We already know vio1 got the first slot
self.assertEqual(
eslot2, rsm.get_pv_vscsi_slot(self.vio2, udid))
aslot1, lua1 = rsm.get_vscsi_slot(self.vio1, udid)
aslot2, lua2 = rsm.get_vscsi_slot(self.vio2, udid)
if eslot1 == aslot1:
self.assertEqual(eslot2, aslot2)
self.assertEqual(
SCSI_PV_2S_2V_MAP[eslot1][slot_map.IOCLASS.PV][udid], lua1)
self.assertEqual(
SCSI_PV_2S_2V_MAP[eslot2][slot_map.IOCLASS.PV][udid], lua2)
else:
self.assertEqual(eslot1, aslot2)
self.assertEqual(eslot2, aslot1)
self.assertEqual(
SCSI_PV_2S_2V_MAP[eslot1][slot_map.IOCLASS.PV][udid], lua2)
self.assertEqual(
SCSI_PV_2S_2V_MAP[eslot2][slot_map.IOCLASS.PV][udid], lua1)
def test_pv_udid_not_found_on_dest(self):
"""Test RebuildSlotMap fails when UDID not found on dest."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_PV_3
self.assertRaises(
pv_e.InvalidHostForRebuildNoVIOSForUDID, slot_map.RebuildSlotMap,
smt, [self.vio1, self.vio2], BAD_VOL_TO_VIO_FOR_PV_3, {})
def test_more_pv_udids_than_dest_vioses_fails(self):
"""Test RebuildSlotMap fails when there's not enough VIOSes."""
smt = self.smt_impl('foo')
smt._slot_topo = SCSI_PV_1
self.assertRaises(
pv_e.InvalidHostForRebuildNotEnoughVIOS, slot_map.RebuildSlotMap,
smt, [self.vio1, self.vio2], VOL_TO_VIO_1_VIOS_PV1, {})
def test_npiv_build_out(self):
"""Test _npiv_build_out."""
# Create a topology that will be converted to a rebuild map
smt = self.smt_impl('foo')
vios1 = mock.Mock()
vios1.get_pfc_wwpns = mock.Mock(return_value=['wwpn1'])
vios2 = mock.Mock()
vios2.get_pfc_wwpns = mock.Mock(return_value=['wwpn2'])
smt._slot_topo = {
3: {'VFC': {'fab1': None}}, 4: {'VFC': {'fab7': None}},
5: {'VFC': {'fab10': None}}, 6: {'VFC': {'fab8': None}},
7: {'VFC': {'fab9': None}}, 8: {'VFC': {'fab9': None}},
9: {'VFC': {'fab1': None}}, 10: {'VFC': {'fab9': None}},
11: {'VFC': {'fab1': None}}, 12: {'VFC': {'fab7': None}},
113: {'VFC': {'fab7': None}}, 114: {'VFC': {'fab7': None}}}
# Run the actual test and verify an exception is raised
self.assertRaises(
pv_e.InvalidHostForRebuildFabricsNotFound, slot_map.RebuildSlotMap,
smt, [vios1, vios2], None, ['fab1'])
# Run the actual test
fabrics = ['fab1', 'fab2', 'fab7', 'fab8', 'fab9', 'fab10', 'fab27']
rsm = slot_map.RebuildSlotMap(smt, [vios1, vios2], None, fabrics)
# Verify rebuild map was created successfully
self.assertEqual({'VFC': {'fab1': [3, 9, 11], 'fab10': [5], 'fab2': [],
'fab27': [], 'fab7': [4, 12, 113, 114],
'fab8': [6], 'fab9': [7, 8, 10]}},
rsm._build_map)
# Verify the getters return the slots correctly
self.assertEqual([3, 9, 11], rsm.get_vfc_slots('fab1', 3))
self.assertEqual([4, 12, 113, 114], rsm.get_vfc_slots('fab7', 4))
self.assertEqual([6], rsm.get_vfc_slots('fab8', 1))
self.assertEqual([7, 8, 10], rsm.get_vfc_slots('fab9', 3))
self.assertEqual([5], rsm.get_vfc_slots('fab10', 1))
self.assertEqual([], rsm.get_vfc_slots('fab2', 0))
self.assertEqual([], rsm.get_vfc_slots('fab27', 0))
# Check None paths
self.assertEqual([], rsm.get_vfc_slots('badfab', 0))
self.assertEqual([None], rsm.get_vfc_slots('badfab', 1))
self.assertEqual([None, None], rsm.get_vfc_slots('badfab', 2))
# Check error path.
self.assertRaises(pv_e.InvalidHostForRebuildSlotMismatch,
rsm.get_vfc_slots, 'fab1', 2)
class TestRebuildSlotMap(TestRebuildSlotMapLegacy):
"""Test for RebuildSlotMap class with new-style SlotMapStore subclass.
Tests BuildSlotMap class's get methods as well.
"""
def __init__(self, *args, **kwargs):
"""Initialize with a particular SlotMapStore implementation."""
super(TestRebuildSlotMap, self).__init__(*args, **kwargs)
self.smt_impl = SlotMapTestImpl
SCSI_W_VOPT = {
1: {
slot_map.IOCLASS.VOPT: {
slot_map.IOCLASS.VOPT: 'vopt_name'
},
slot_map.IOCLASS.PV: {
'pv_udid1': 'pv_lua_1',
'pv_udid2': 'pv_lua_2'
}
}
}
SCSI_W_VDISK = {
1: {
slot_map.IOCLASS.VDISK: {
'vd_udid1': 'vd_lua_1',
'vd_udid2': 'vd_lua_2'
},
slot_map.IOCLASS.PV: {
'pv_udid1': 'pv_lua_1',
'pv_udid2': 'pv_lua_2'
}
},
2: {
slot_map.IOCLASS.VDISK: {
'vd_udid1': 'vd_lua_1',
'vd_udid2': 'vd_lua_2'
}
}
}
SCSI_LU_1 = {
1: {
slot_map.IOCLASS.LU: {
'lu_udid1': 'lu_lua_1',
'lu_udid2': 'lu_lua_2',
'lu_udid3': 'lu_lua_3'
}
},
2: {
slot_map.IOCLASS.LU: {
'lu_udid1': 'lu_lua_1',
'lu_udid3': 'lu_lua_3',
'lu_udid4': 'lu_lua_4'
}
}
}
SCSI_PV_1 = {
1: {
slot_map.IOCLASS.PV: {
'pv_udid1': 'pv_lua_1',
'pv_udid2': 'pv_lua_2',
'pv_udid3': 'pv_lua_3'
}
},
2: {
slot_map.IOCLASS.PV: {
'pv_udid1': 'pv_lua_1',
'pv_udid3': 'pv_lua_3',
'pv_udid4': 'pv_lua_4'
}
}
}
SCSI_MIX_1 = {
1: {
slot_map.IOCLASS.LU: {
'lu_udid1': 'lu_lua_1',
'lu_udid2': 'lu_lua_2'
},
slot_map.IOCLASS.PV: {
'pv_udid1': 'pv_lua_1',
'pv_udid2': 'pv_lua_2'
}
}
}
SCSI_ARB_MAP = {
47: {
slot_map.IOCLASS.LU: {
'lu_udid1': 'lu_lua_1'
}
},
9: {
slot_map.IOCLASS.PV: {
'pv_udid2': 'pv_lua_2'
}
},
23: {
slot_map.IOCLASS.LU: {
'lu_udid3': 'lu_lua_3'
}
},
56: {
slot_map.IOCLASS.PV: {
'pv_udid4': 'pv_lua_4'
}
}
}
SCSI_PV_2S_2V_MAP = {
5: {
slot_map.IOCLASS.PV: {
'lu_udid1': 'pv_lua_1'
}
},
6: {
slot_map.IOCLASS.PV: {
'pv_udid2': 'pv_lua_2'
}
},
7: {
slot_map.IOCLASS.PV: {
'lu_udid3': 'pv_lua_3'
}
},
8: {
slot_map.IOCLASS.PV: {
'pv_udid4': 'pv_lua_4'
}
},
23: {
slot_map.IOCLASS.PV: {
'lu_udid1': 'pv_lua_1'
}
},
24: {
slot_map.IOCLASS.PV: {
'pv_udid2': 'pv_lua_2'
}
},
25: {
slot_map.IOCLASS.PV: {
'lu_udid3': 'pv_lua_3'
}
},
26: {
slot_map.IOCLASS.PV: {
'pv_udid4': 'pv_lua_4'
}
}
}
SCSI_PV_3 = {
23: {
slot_map.IOCLASS.PV: {
'pv_udid1': 'pv_lua_1'
}
},
12: {
slot_map.IOCLASS.PV: {
'pv_udid2': 'pv_lua_2'
}
},
4: {
slot_map.IOCLASS.PV: {
'pv_udid3': 'pv_lua_3'
}
}
}
BAD_VOL_TO_VIO_FOR_PV_3 = {
'pv_udid1': [
'vios1',
'vios2'
],
'pv_udid2': [
'vios1',
'vios2'
]
}
VOL_TO_VIO1 = {
'lu_udid1': [
'vios1',
'vios2'
],
'lu_udid2': [
'vios1'
],
'lu_udid3': [
'vios1',
'vios2'
],
'lu_udid4': [
'vios2'
],
'pv_udid1': [
'vios1',
'vios2'
],
'pv_udid2': [
'vios1'
],
'pv_udid3': [
'vios1',
'vios2'
],
'pv_udid4': [
'vios2'
],
'vd_udid1': [
'vios1',
'vios2'
],
'vd_udid2': [
'vios1',
'vios2'
]
}
VOL_TO_VIO2 = {
'pv_udid1': [
'vios1',
'vios2'
],
'pv_udid2': [
'vios1'
],
'pv_udid3': [
'vios1',
'vios2'
],
'pv_udid4': [
'vios2'
]
}
VOL_TO_VIO_1_VIOS_PV1 = {
'pv_udid1': [
'vios1'
],
'pv_udid2': [
'vios1'
],
'pv_udid3': [
'vios1'
],
'pv_udid4': [
'vios1'
]
}
VTV_2V_ARB = {
'lu_udid1': [
'vios1',
'vios2'
],
'pv_udid2': [
'vios1',
'vios2'
],
'lu_udid3': [
'vios1',
'vios2'
],
'pv_udid4': [
'vios1',
'vios2'
]
}
|
|
"""Copyright 2010 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, ServerConnectArgs, ServerDisconnectArgs
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetLibrary import PhidgetLibrary
import sys
class Manager:
"""This class represents a Phidget Manager.
The Phidget manager is a way to keep track of attached phidgets,
it will send Attach and Detach events as Phidgets are added and removed fromt the system.
The Phidget manager deals in base Phidget objects.
These objects are not actually connected to opened Phidgets but can be used
to get serial number, name, version, etc.
"""
def __init__(self):
"""The Constructor Method for the Manager Class
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened.
"""
self.handle = c_void_p()
self.__attach = None
self.__detach = None
self.__error = None
self.__serverConnect = None
self.__serverDisconnect = None
self.__onAttach = None
self.__onDetach = None
self.__onError = None
self.__onServerConnect = None
self.__onServerDisconnect = None
if sys.platform == 'win32':
self.__ATTACHHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p)
self.__DETACHHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p)
self.__ERRORHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_char_p)
self.__SERVERATTACHHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p)
self.__SERVERDETACHHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p)
elif sys.platform == 'darwin' or sys.platform == 'linux2':
self.__ATTACHHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p)
self.__DETACHHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p)
self.__ERRORHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_char_p)
self.__SERVERATTACHHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p)
self.__SERVERDETACHHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p)
try:
result = PhidgetLibrary.getDll().CPhidgetManager_create(byref(self.handle))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __del__(self):
try:
result = PhidgetLibrary.getDll().CPhidgetManager_delete(self.handle)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
self.handle = None
def __nativeAttachEvent(self, handle, usrptr):
phid = Phidget()
phid.handle = c_void_p(handle)
if self.__attach != None:
self.__attach(AttachEventArgs(phid))
return 0
def setOnAttachHandler(self, attachHandler):
"""Set the Attach event handler.
The attach handler is a method that will be called when a Phidget is phisically attached to the system,
and has gone through its initalization, and so is ready to be used.
Parameters:
attachHandler: hook to the attachHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if attachHandler == None:
self.__attach = None
self.__onAttach = None
else:
self.__attach = attachHandler
self.__onAttach = self.__ATTACHHANDLER(self.__nativeAttachEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetManager_set_OnAttach_Handler(self.handle, self.__onAttach, None)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativeDetachEvent(self, handle, usrptr):
phid = Phidget()
phid.handle = c_void_p(handle)
if self.__detach != None:
self.__detach(DetachEventArgs(phid))
return 0
def setOnDetachHandler(self, detachHandler):
"""Set the Detach event handler.
The detach handler is a method that will be called when a Phidget is phisically detached from the system, and is no longer available.
Parameters:
detachHandler: hook to the detachHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if detachHandler == None:
self.__detach = None
self.__onDetach = None
else:
self.__detach = detachHandler
self.__onDetach = self.__DETACHHANDLER(self.__nativeDetachEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetManager_set_OnDetach_Handler(self.handle, self.__onDetach, None)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativeErrorEvent(self, handle, usrptr, errorCode, errorMessage):
if self.__error != None:
code = errorCode.value
message = errorMessage.value
self.__error(ErrorEventArgs(self, message, code))
return 0
def setOnErrorHandler(self, errorHandler):
"""Sets the error event handler.
The error handler is a method that will be called when an asynchronous error occurs.
Error events are not currently used, but will be in the future to report any problems that happen out of context from a direct function call.
Parameters:
errorHandler: hook to the errorHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if errorHandler == None:
self.__error = None
self.__onError = None
else:
self.__error = errorHandler
self.__onError = self.__ERRORHANDLER(self.__nativeErrorEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetManager_set_OnError_Handler(self.handle, self.__onError, None)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativeServerConnectEvent(self, handle, usrptr):
if self.__serverConnect != None:
self.__serverConnect(ServerConnectArgs(self))
return 0
def setOnServerConnectHandler(self, serverConnectHandler):
"""Sets the ServerConnect event handler
The serverConnect handler is a method that will be called when a connection to a server is made
Parameters:
serverConnectHandler: hook to the serverConnectHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if serverConnectHandler == None:
self.__serverConnect = None
self.__onServerConnect = None
else:
self.__serverConnect = serverConnectHandler
self.__onServerConnect = self.__SERVERATTACHHANDLER(self.__nativeServerConnectEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetManager_set_OnServerConnect_Handler(self.handle, self.__onServerConnect, None)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativeServerDisconnectEvent(self, handle, usrptr):
if self.__serverDisconnect != None:
self.__serverDisconnect(ServerConnectArgs(self))
return 0
def setOnServerDisconnectHandler(self, serverDisconnectHandler):
"""Sets the ServerDisconnect event handler.
The serverDisconnect handler is a method that will be called when a connection to a server is terminated.
Parameters:
serverDisconnectHandler: hook to the serverDisconnectHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if serverDisconnectHandler == None:
self.__serverDisconnect = None
self.__onServerDisconnect = None
else:
self.__serverDisconnect = serverDisconnectHandler
self.__onServerDisconnect = self.__SERVERDETACHHANDLER(self.__nativeServerDisconnectEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetManager_set_OnServerDisconnect_Handler(self.handle, self.__onServerDisconnect, None)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getAttachedDevices(self):
"""Returns a list of Phidgets attached to the host computer.
This list is updated right before the attach and detach events, and so will be up to date within these events.
Returns:
The list of attached phidgets <array of Phidget objects>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
devices = []
count = c_int()
listptr = pointer(c_void_p())
try:
result = PhidgetLibrary.getDll().CPhidgetManager_getAttachedDevices(self.handle, byref(listptr), byref(count))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
for i in range(count.value):
phid = Phidget()
devicePtr = c_void_p(listptr[i])
phid.handle = devicePtr
devices.append(phid)
return devices
def openManager(self):
"""Starts the PhidgetManager.
This method starts the phidget manager running in the base Phidget21 C library.
If attach and detach listeners are to be used, they should be registered before start is called so that no events are missed.
Once start is called, the Phidget Manager will be active until close is called.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
try:
result = PhidgetLibrary.getDll().CPhidgetManager_open(self.handle)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def openRemote(self, serverID, password=""):
"""Open this Manager remotely using a Server ID, and securely using a Password.
ServerID can be NULL to get a listing of all Phidgets on all Servers
This password can be set as a parameter when starting the Phidget Webservice.
The use of a password is optional and calling the function without providing a password will
connect normally.
Parameters:
serverID<string>: ServerID of the Phidget Webservice.
password<string>: The secure password for the Phidget Webservice.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: if the Phidget Webservice cannot be contacted
"""
try:
result = PhidgetLibrary.getDll().CPhidgetManager_openRemote(self.handle, c_char_p(serverID), c_char_p(password))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def openRemoteIP(self, IPAddress, port, password=""):
"""Open this Manager remotely using an IP Address and port, and securely using a password.
This password can be set as a parameter when starting the Phidget Webservice.
The use of a password is optional and calling the function without providing a password will
connect normally.
Parameters:
IPAddress<string>: IP Address or hostname of the Phidget Webservice
port<int>: Port of the Phidget Webservice
password<string>: The secure password for the Phidget Webservice
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: if the Phidget Webservice cannot be contacted
"""
try:
result = PhidgetLibrary.getDll().CPhidgetManager_openRemoteIP(self.handle, c_char_p(IPAddress), c_int(port), c_char_p(password))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def closeManager(self):
"""Shuts down the Phidget Manager.
This method should be called to close down the Phidget Manager.
Events will no longer be recieved. This method gets calledd automatically when the class is destroyed so calling it is not required.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this manager is not opened.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetManager_close(self.handle)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getServerID(self):
"""Returns the Server ID of a Phidget Webservice when this Manager was opened as remote.
This is an arbitrary server identifier, independant of IP address and Port.
Returns:
The serverID <string>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this manager is not opened or opened remotely.
"""
serverID = c_char_p()
try:
result = PhidgetLibrary.getDll().CPhidgetManager_getServerID(self.handle, byref(serverID))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return serverID.value
def getServerAddress(self):
"""Returns the Address of a Phidget Webservice when this Manager was opened as remote.
This may be an IP Address or a hostname.
Returns:
The server address for the webservice <string>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: if the Manager was not opened or opened remotely.
"""
serverAddr = c_char_p()
port = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetManager_getServerAddress(self.handle, byref(serverAddr), byref(port))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return serverAddr.value
def getServerPort(self):
"""Returns the Port of a Phidget Webservice when this Manager was opened as remote.
Returns:
The server port for the webservice.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: if the Manager was not opened or opened remotely.
"""
serverAddr = c_char_p()
port = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetManager_getServerAddress(self.handle, byref(serverAddr), byref(port))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return port.value
def isAttachedToServer(self):
"""Returns the network attached status for remotely opened Phidgets.
This method returns True or False, depending on whether a connection to the Phidget WebService is open - or not.
If this is false for a remote Phidget then the connection is not active - either because a connection has not yet been established,
or because the connection was terminated.
Returns:
The attached status <boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: if the Manager was not opened or opened remotely.
"""
serverStatus = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetManager_getServerStatus(self.handle, byref(serverStatus))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if serverStatus.value == 1:
return True
else:
return False
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import StringIO
from pytest import raises
from aspen.website import Website
from aspen.http.response import Response
from aspen.exceptions import BadLocation
simple_error_spt = """
[---]
[---] text/plain via stdlib_format
{response.body}
"""
# Tests
# =====
def test_basic():
website = Website()
expected = os.getcwd()
actual = website.www_root
assert actual == expected
def test_normal_response_is_returned(harness):
harness.fs.www.mk(('index.html', "Greetings, program!"))
expected = '\r\n'.join("""\
HTTP/1.1
Content-Type: text/html
Greetings, program!
""".splitlines())
actual = harness.client.GET()._to_http('1.1')
assert actual == expected
def test_fatal_error_response_is_returned(harness):
harness.fs.www.mk(('index.html.spt', "[---]\nraise heck\n[---]\n"))
expected = 500
actual = harness.client.GET(raise_immediately=False).code
assert actual == expected
def test_redirect_has_only_location(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
website.redirect('http://elsewhere', code=304)
[---]"""))
actual = harness.client.GET(raise_immediately=False)
assert actual.code == 304
headers = actual.headers
assert headers.keys() == ['Location']
def test_nice_error_response_is_returned(harness):
harness.short_circuit = False
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(500)
[---]"""))
assert harness.client.GET(raise_immediately=False).code == 500
def test_nice_error_response_is_returned_for_404(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404)
[---]"""))
assert harness.client.GET(raise_immediately=False).code == 404
def test_response_body_doesnt_expose_traceback_by_default(harness):
harness.fs.project.mk(('error.spt', simple_error_spt))
harness.fs.www.mk(('index.html.spt', """
[---]
raise Exception("Can I haz traceback ?")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert "Can I haz traceback ?" not in response.body
def test_response_body_exposes_traceback_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.project.mk(('error.spt', simple_error_spt))
harness.fs.www.mk(('index.html.spt', """
[---]
raise Exception("Can I haz traceback ?")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert "Can I haz traceback ?" in response.body
def test_default_error_simplate_doesnt_expose_raised_body_by_default(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404, "Um, yeah.")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 404
assert "Um, yeah." not in response.body
def test_default_error_simplate_exposes_raised_body_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404, "Um, yeah.")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 404
assert "Um, yeah." in response.body
def test_nice_error_response_can_come_from_user_error_spt(harness):
harness.fs.project.mk(('error.spt', '[---]\n[---] text/plain\nTold ya.'))
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(420)
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 420
assert response.body == 'Told ya.'
def test_nice_error_response_can_come_from_user_420_spt(harness):
harness.fs.project.mk(('420.spt', """
[---]
msg = "Enhance your calm." if response.code == 420 else "Ok."
[---] text/plain
%(msg)s"""))
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(420)
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 420
assert response.body == 'Enhance your calm.'
def test_delegate_error_to_simplate_respects_original_accept_header(harness):
harness.fs.project.mk(('error.spt', """[---]
[---] text/fake
Lorem ipsum
[---] text/html
<p>Error</p>
[---] text/plain
Error
"""))
harness.fs.www.mk(('foo.spt',"""
from aspen import Response
[---]
raise Response(404)
[---] text/plain
"""))
response = harness.client.GET('/foo', raise_immediately=False, HTTP_ACCEPT=b'text/fake')
assert response.code == 404
assert 'text/fake' in response.headers['Content-Type']
def test_default_error_spt_handles_text_html(harness):
harness.fs.www.mk(('foo.html.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.html', raise_immediately=False)
assert response.code == 404
assert 'text/html' in response.headers['Content-Type']
def test_default_error_spt_handles_application_json(harness):
harness.fs.www.mk(('foo.json.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.json', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'application/json'
assert response.body == '''\
{ "error_code": 404
, "error_message_short": "Not Found"
, "error_message_long": ""
}
'''
def test_default_error_spt_application_json_includes_msg_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('foo.json.spt',"""
from aspen import Response
[---]
raise Response(404, "Right, sooo...")
[---]
"""))
response = harness.client.GET('/foo.json', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'application/json'
assert response.body == '''\
{ "error_code": 404
, "error_message_short": "Not Found"
, "error_message_long": "Right, sooo..."
}
'''
def test_default_error_spt_falls_through_to_text_plain(harness):
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Not found, program!\n\n"
def test_default_error_spt_fall_through_includes_msg_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404, "Try again!")
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Not found, program!\nTry again!\n"
def test_custom_error_spt_without_text_plain_results_in_406(harness):
harness.fs.project.mk(('error.spt', """
[---]
[---] text/html
<h1>Oh no!</h1>
"""))
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 406
def test_custom_error_spt_with_text_plain_works(harness):
harness.fs.project.mk(('error.spt', """
[---]
[---] text/plain
Oh no!
"""))
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Oh no!\n"
def test_autoindex_response_is_404_by_default(harness):
harness.fs.www.mk(('README', "Greetings, program!"))
assert harness.client.GET(raise_immediately=False).code == 404
def test_autoindex_response_is_returned(harness):
harness.fs.www.mk(('README', "Greetings, program!"))
harness.client.website.list_directories = True
body = harness.client.GET(raise_immediately=False).body
assert 'README' in body
def test_resources_can_import_from_project_root(harness):
harness.fs.project.mk(('foo.py', 'bar = "baz"'))
harness.fs.www.mk(('index.html.spt', "from foo import bar\n[---]\n[---]\nGreetings, %(bar)s!"))
assert harness.client.GET(raise_immediately=False).body == "Greetings, baz!"
def test_non_500_response_exceptions_dont_get_folded_to_500(harness):
harness.fs.www.mk(('index.html.spt', '''
from aspen import Response
[---]
raise Response(400)
[---]
'''))
response = harness.client.GET(raise_immediately=False)
assert response.code == 400
def test_errors_show_tracebacks(harness):
harness.fs.www.mk(('index.html.spt', '''
from aspen import Response
[---]
website.show_tracebacks = 1
raise Response(400,1,2,3,4,5,6,7,8,9)
[---]
'''))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert 'Response(400,1,2,3,4,5,6,7,8,9)' in response.body
class TestMiddleware(object):
"""Simple WSGI middleware for testing."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
if environ['PATH_INFO'] == '/middleware':
start_response('200 OK', [('Content-Type', 'text/plain')])
return ['TestMiddleware']
return self.app(environ, start_response)
def build_environ(path):
"""Build WSGI environ for testing."""
return {
'REQUEST_METHOD': b'GET',
'PATH_INFO': path,
'QUERY_STRING': b'',
'SERVER_SOFTWARE': b'build_environ/1.0',
'SERVER_PROTOCOL': b'HTTP/1.1',
'wsgi.input': StringIO.StringIO()
}
def test_call_wraps_wsgi_middleware(client):
client.website.algorithm.default_short_circuit = False
client.website.wsgi_app = TestMiddleware(client.website.wsgi_app)
respond = [False, False]
def start_response_should_404(status, headers):
assert status.lower().strip() == '404 not found'
respond[0] = True
client.website(build_environ('/'), start_response_should_404)
assert respond[0]
def start_response_should_200(status, headers):
assert status.lower().strip() == '200 ok'
respond[1] = True
client.website(build_environ('/middleware'), start_response_should_200)
assert respond[1]
# redirect
def test_redirect_redirects(website):
assert raises(Response, website.redirect, '/').value.code == 302
def test_redirect_code_is_settable(website):
assert raises(Response, website.redirect, '/', code=8675309).value.code == 8675309
def test_redirect_permanent_is_301(website):
assert raises(Response, website.redirect, '/', permanent=True).value.code == 301
def test_redirect_without_website_base_url_is_fine(website):
assert raises(Response, website.redirect, '/').value.headers['Location'] == '/'
def test_redirect_honors_website_base_url(website):
website.base_url = 'foo'
assert raises(Response, website.redirect, '/').value.headers['Location'] == 'foo/'
def test_redirect_can_override_base_url_per_call(website):
website.base_url = 'foo'
assert raises(Response, website.redirect, '/', base_url='b').value.headers['Location'] == 'b/'
def test_redirect_declines_to_construct_bad_urls(website):
raised = raises(BadLocation, website.redirect, '../foo', base_url='http://www.example.com')
assert raised.value.body == 'Bad redirect location: http://www.example.com../foo'
def test_redirect_declines_to_construct_more_bad_urls(website):
raised = raises(BadLocation, website.redirect, 'http://www.example.org/foo',
base_url='http://www.example.com')
assert raised.value.body == 'Bad redirect location: '\
'http://www.example.comhttp://www.example.org/foo'
def test_redirect_will_construct_a_good_absolute_url(website):
response = raises(Response, website.redirect, '/foo', base_url='http://www.example.com').value
assert response.headers['Location'] == 'http://www.example.com/foo'
def test_redirect_will_allow_a_relative_path(website):
response = raises(Response, website.redirect, '../foo', base_url='').value
assert response.headers['Location'] == '../foo'
def test_redirect_will_allow_an_absolute_url(website):
response = raises(Response, website.redirect, 'http://www.example.org/foo', base_url='').value
assert response.headers['Location'] == 'http://www.example.org/foo'
def test_redirect_can_use_given_response(website):
response = Response(65, 'Greetings, program!', {'Location': 'A Town'})
response = raises(Response, website.redirect, '/flah', response=response).value
assert response.code == 302 # gets clobbered
assert response.headers['Location'] == '/flah' # gets clobbered
assert response.body == 'Greetings, program!' # not clobbered
# canonicalize_base_url
def test_canonicalize_base_url_canonicalizes_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.GxT()
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/'
def test_canonicalize_base_url_includes_path_and_qs_for_GET(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.GxT('/foo/bar?baz=buz')
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/foo/bar?baz=buz'
def test_canonicalize_base_url_redirects_to_homepage_for_POST(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.PxST('/foo/bar?baz=buz')
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/'
def test_canonicalize_base_url_allows_good_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://localhost')
response = harness.client.GET()
assert response.code == 200
assert response.body == 'Greetings, program!'
def test_canonicalize_base_url_is_noop_without_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website()
response = harness.client.GET()
assert response.code == 200
assert response.body == 'Greetings, program!'
|
|
###############################################################################
# copyright 2012, Marouen Mechtri ([email protected]) #
# Institut Mines-Telecom - TELECOM & Management SudParis #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# Implementation of category actions
import sys
import pycompdev
import pypacksrc
srcdirectory=pypacksrc.srcpydir+"/pyaccords/pysrc/"
srcdirectoryc=pypacksrc.srcpydir+"/cocarrier/src/"
sys.path.append(srcdirectory)
sys.path.append(srcdirectoryc)
from intercloudGWClass import *
from actionClass import *
from client import OCCIclient
""" Note:respAction is a python class to describe the occi response with the status and the message
intercloudGW is a python class to interface the accords category :intercloudGW.
-Attributes of this category are members of this class.
-List of attributes:
- name
- node
- account
- price
- state
"""
def intercloudGW_start(intercloudGW):
response=respAction("200","ok")
"""Implement here your function"""
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print(' Receiving action START intercloudGW ')
#---------------------------------------------------intercloudGW---------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle intercloudGW category """
attributePUB= {
'what': 'intercloudGW'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
tempintercloudGW={
'name':intercloudGW.name,
'node':intercloudGW.node,
'account':intercloudGW.account,
'price': intercloudGW.price,
'state':intercloudGW.state,
}
gwOCCI = OCCIclient(publication['host'], publication['port'], 'CO-NETS-PAAS', 'intercloudGW', tempintercloudGW)
uuidintercloudGW=gwOCCI.Get()[0].replace(' ','')
#-------------------------------------------------contract---------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle contract category """
attributePUB= {
'what': 'contract'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
attribute = {
'provider': uuidintercloudGW,
'profile': 'intercloudGW',
'node':intercloudGW.node,
}
client = OCCIclient(publication['host'], publication['port'], 'CO-PARSER', 'contract', attribute)
listinstruction=client.GetLinks(client.GetElement_pathuuid(client.Get()[0])['uuid'])
#print listinstruction
#------------------------------------------------Instruction-------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle instruction category """
attributePUB= {
'what': 'instruction'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
attribute = {}
client = OCCIclient(publication['host'], publication['port'], 'CO-PARSER', 'instruction', attribute)
listNodeToConnect = []
for item in listinstruction:
nodetoConnect = client.GetElement_pathuuid(item)
client.host = nodetoConnect['host']
client.port = nodetoConnect['port']
client.category = nodetoConnect['category']
listNodeToConnect.append(client.GetElement(nodetoConnect['uuid'])['occi.instruction.source'])
listlinkgw = []
for i in range(len(listNodeToConnect)):
for j in range(i+1,len(listNodeToConnect)):
listlinkgw.append( {
'name': intercloudGW.name,
'intercloudGW': intercloudGW.node,
'account': intercloudGW.account,
'gwsrc': '',
'gwdst': '',
'tunnelproto': 'gre',
'addressgresrc': '10.3.3.1',
'addressgredst': '10.3.3.2',
'prefix': '30',
'authenticationkey': 'test_key_1',
'endpointsrc': listNodeToConnect[i],
'endpointdst': listNodeToConnect[j],
'state': '0',
})
#------------------------------------------------linkgw------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle linkgw category """
attributePUB= {
'what': 'linkgw'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
listuuidlinkgw = []
for i in listlinkgw:
conetsOCCI = OCCIclient(publication['host'], publication['port'], 'CO-NETS-PAAS', 'linkgw', i)
uuidlinkgw = conetsOCCI.GetElement_pathuuid(conetsOCCI.Post())['uuid']
listuuidlinkgw.append(uuidlinkgw)
print('Sending Post to linkgw')
for item in listuuidlinkgw:
print('Sending START to linkgw')
conetsOCCI.action(item, 'start')
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
tempintercloudGW={
'name':intercloudGW.name,
'node':intercloudGW.node,
'account':intercloudGW.account,
'price':intercloudGW.price,
'state': intercloudGW.state,
}
attribute = {
'state': '1',
}
conetsOCCI.category = 'intercloudGW'
conetsOCCI.attributes = tempintercloudGW
uuidintercloudGW=conetsOCCI.Get()
conetsOCCI.attributes=attribute
conetsOCCI.Put(conetsOCCI.GetElement_pathuuid(uuidintercloudGW[0])['uuid'])
print('Changing the state of the intercloudGW to 1')
return response
def intercloudGW_stop(intercloudGW):
response=respAction("200","ok")
"""Implement here your function"""
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print(' Receiving action STOP intercloudGW ')
#------------------------------------------------linkgw------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle linkgw category """
attributePUB= {
'what': 'linkgw'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
attribute = {
'intercloudGW': intercloudGW.node,
'state': '1',
}
conetsOCCI = OCCIclient(publication['host'], publication['port'], 'CO-NETS-PAAS', 'linkgw', attribute)
listInstance = conetsOCCI.Get()
for item in listInstance:
linkgws = conetsOCCI.GetElement_pathuuid(item)
conetsOCCI.host = linkgws['host']
conetsOCCI.port = linkgws['port']
conetsOCCI.category = linkgws['category']
linkgws = conetsOCCI.GetElement(linkgws['uuid'])
print('Sending STOP to linkgw')
conetsOCCI.action(linkgws['occi.core.id'], 'stop')
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
tempintercloudGW={
'name':intercloudGW.name,
'node':intercloudGW.node,
'account':intercloudGW.account,
'price':intercloudGW.price,
'state': intercloudGW.state,
}
attribute = {
'state': '0',
}
conetsOCCI.category = 'intercloudGW'
conetsOCCI.attributes = tempintercloudGW
uuidintercloudGW=conetsOCCI.Get()
conetsOCCI.attributes=attribute
conetsOCCI.Put(conetsOCCI.GetElement_pathuuid(uuidintercloudGW[0])['uuid'])
print('Changing the state of the intercloudGW to 0')
return response
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD300 Model Configuration.
References:
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
SSD: Single Shot MultiBox Detector
arXiv:1512.02325
Ported from MLPerf reference implementation:
https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import re
import threading
import tensorflow as tf
import constants
import mlperf
import ssd_constants
from cnn_util import log_fn
from models import model as model_lib
from models import resnet_model
BACKBONE_MODEL_SCOPE_NAME = 'resnet34_backbone'
class SSD300Model(model_lib.CNNModel):
"""Single Shot Multibox Detection (SSD) model for 300x300 image datasets."""
def __init__(self, label_num=ssd_constants.NUM_CLASSES, batch_size=32,
learning_rate=1e-3, backbone='resnet34', params=None):
super(SSD300Model, self).__init__('ssd300', 300, batch_size, learning_rate,
params=params)
# For COCO dataset, 80 categories + 1 background = 81 labels
self.label_num = label_num
# Currently only support ResNet-34 as backbone model
if backbone != 'resnet34':
raise ValueError('Invalid backbone model %s for SSD.' % backbone)
mlperf.logger.log(key=mlperf.tags.BACKBONE, value=backbone)
# Number of channels and default boxes associated with the following layers:
# ResNet34 layer, Conv7, Conv8_2, Conv9_2, Conv10_2, Conv11_2
self.out_chan = [256, 512, 512, 256, 256, 256]
mlperf.logger.log(key=mlperf.tags.LOC_CONF_OUT_CHANNELS,
value=self.out_chan)
# Number of default boxes from layers of different scales
# 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
self.num_dboxes = [4, 6, 6, 6, 4, 4]
mlperf.logger.log(key=mlperf.tags.NUM_DEFAULTS_PER_CELL,
value=self.num_dboxes)
# TODO(haoyuzhang): in order to correctly restore in replicated mode, need
# to create a saver for each tower before graph is finalized. Use variable
# manager for better efficiency.
self.backbone_savers = []
# Collected predictions for eval stage. It maps each image id in eval
# dataset to a dict containing the following information:
# source_id: raw ID of image
# raw_shape: raw shape of image
# pred_box: encoded box coordinates of prediction
# pred_scores: scores of classes in prediction
self.predictions = {}
# Global step when predictions are collected.
self.eval_global_step = 0
# Average precision. In asynchronous eval mode, this is the latest AP we
# get so far and may not be the results at current eval step.
self.eval_coco_ap = 0
# Process, queues, and thread for asynchronous evaluation. When enabled,
# create a separte process (async_eval_process) that continously pull
# intermediate results from the predictions queue (a multiprocessing queue),
# process them, and push final results into results queue (another
# multiprocessing queue). The main thread is responsible to push message
# into predictions queue, and start a separate thread to continuously pull
# messages from results queue to update final results.
# Message in predictions queue should be a tuple of two elements:
# (evaluation step, predictions)
# Message in results queue should be a tuple of two elements:
# (evaluation step, final results)
self.async_eval_process = None
self.async_eval_predictions_queue = None
self.async_eval_results_queue = None
self.async_eval_results_getter_thread = None
# The MLPerf reference uses a starting lr of 1e-3 at bs=32.
self.base_lr_batch_size = 32
def skip_final_affine_layer(self):
return True
def add_backbone_model(self, cnn):
# --------------------------------------------------------------------------
# Resnet-34 backbone model -- modified for SSD
# --------------------------------------------------------------------------
# Input 300x300, output 150x150
cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)
cnn.mpool(3, 3, 2, 2, mode='SAME')
resnet34_layers = [3, 4, 6, 3]
version = 'v1'
# ResNet-34 block group 1
# Input 150x150, output 75x75
for i in range(resnet34_layers[0]):
# Last argument forces residual_block to use projection shortcut, even
# though the numbers of input and output channels are equal
resnet_model.residual_block(cnn, 64, 1, version)
# ResNet-34 block group 2
# Input 75x75, output 38x38
for i in range(resnet34_layers[1]):
stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 128, stride, version, i == 0)
# ResNet-34 block group 3
# This block group is modified: first layer uses stride=1 so that the image
# size does not change in group of layers
# Input 38x38, output 38x38
for i in range(resnet34_layers[2]):
# The following line is intentionally commented out to differentiate from
# the original ResNet-34 model
# stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 256, stride, version, i == 0)
# ResNet-34 block group 4: removed final block group
# The following 3 lines are intentially commented out to differentiate from
# the original ResNet-34 model
# for i in range(resnet34_layers[3]):
# stride = 2 if i == 0 else 1
# resnet_model.residual_block(cnn, 512, stride, version, i == 0)
def add_inference(self, cnn):
cnn.use_batch_norm = True
cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,
'epsilon': ssd_constants.BATCH_NORM_EPSILON,
'scale': True}
with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):
self.add_backbone_model(cnn)
# --------------------------------------------------------------------------
# SSD additional layers
# --------------------------------------------------------------------------
def add_ssd_layer(cnn, depth, k_size, stride, mode):
return cnn.conv(depth, k_size, k_size, stride, stride,
mode=mode, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
# Activations for feature maps of different layers
self.activations = [cnn.top_layer]
# Conv7_1, Conv7_2
# Input 38x38, output 19x19
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv8_1, Conv8_2
# Input 19x19, output 10x10
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv9_1, Conv9_2
# Input 10x10, output 5x5
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))
# Conv10_1, Conv10_2
# Input 5x5, output 3x3
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
# Conv11_1, Conv11_2
# Input 3x3, output 1x1
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
self.loc = []
self.conf = []
for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):
l = cnn.conv(nd * 4, 3, 3, 1, 1, input_layer=ac,
num_channels_in=oc, activation=None, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
scale = l.get_shape()[-1]
# shape = [batch_size, nd * 4, scale, scale]
l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])
# shape = [batch_size, nd, 4, scale, scale]
l = tf.transpose(l, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, 4]
self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))
# shape = [batch_size, nd * scale * scale, 4]
c = cnn.conv(nd * self.label_num, 3, 3, 1, 1, input_layer=ac,
num_channels_in=oc, activation=None, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
# shape = [batch_size, nd * label_num, scale, scale]
c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])
# shape = [batch_size, nd, label_num, scale, scale]
c = tf.transpose(c, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, label_num]
self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))
# shape = [batch_size, nd * scale * scale, label_num]
# Shape of locs: [batch_size, NUM_SSD_BOXES, 4]
# Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]
locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)
# Pack location and confidence outputs into a single output layer
# Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]
logits = tf.concat([locs, confs], 2)
cnn.top_layer = logits
cnn.top_size = 4 + self.label_num
return cnn.top_layer
def get_learning_rate(self, global_step, batch_size):
rescaled_lr = self.get_scaled_base_learning_rate(batch_size)
# Defined in MLPerf reference model
boundaries = [160000, 200000]
boundaries = [b * self.base_lr_batch_size // batch_size for b in boundaries]
decays = [1, 0.1, 0.01]
learning_rates = [rescaled_lr * d for d in decays]
lr = tf.train.piecewise_constant(global_step, boundaries, learning_rates)
warmup_steps = int(118287 / batch_size * 5)
warmup_lr = (
rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
def get_scaled_base_learning_rate(self, batch_size):
"""Calculates base learning rate for creating lr schedule.
In replicated mode, gradients are summed rather than averaged which, with
the sgd and momentum optimizers, increases the effective learning rate by
lr * num_gpus. Dividing the base lr by num_gpus negates the increase.
Args:
batch_size: Total batch-size.
Returns:
Base learning rate to use to create lr schedule.
"""
base_lr = self.learning_rate
if self.params.variable_update == 'replicated':
base_lr = self.learning_rate / self.params.num_gpus
scaled_lr = base_lr * (batch_size / self.base_lr_batch_size)
return scaled_lr
def _collect_backbone_vars(self):
backbone_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME)
var_list = {}
# Assume variables in the checkpoint are following the naming convention of
# a model checkpoint trained with TF official model
# TODO(haoyuzhang): the following variable name parsing is hacky and easy
# to break if there is change in naming convention of either benchmarks or
# official models.
for v in backbone_vars:
# conv2d variable example (model <-- checkpoint):
# v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel
if 'conv2d' in v.name:
re_match = re.search(r'conv(\d+)/conv2d/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'conv2d', layer_id, param_name)
var_list[vname_in_ckpt] = v
# batchnorm varariable example:
# v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma
elif 'batchnorm' in v.name:
re_match = re.search(r'batchnorm(\d+)/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'batch_normalization', layer_id, param_name)
var_list[vname_in_ckpt] = v
return var_list
def _var_name_in_official_model_ckpt(self, layer_name, layer_id, param_name):
"""Return variable names according to convention in TF official models."""
vname_in_ckpt = layer_name
if layer_id > 0:
vname_in_ckpt += '_' + str(layer_id)
vname_in_ckpt += '/' + param_name
return vname_in_ckpt
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):
"""Computes the localization loss.
Computes the localization loss using smooth l1 loss.
Args:
pred_loc: a flatten tensor that includes all predicted locations. The
shape is [batch_size, num_anchors, 4].
gt_loc: a tensor representing box regression targets in
[batch_size, num_anchors, 4].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets, used as the loss normalizater. The shape is [batch_size].
Returns:
box_loss: a float32 representing total box regression loss.
"""
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(
gt_loc, pred_loc,
reduction=tf.losses.Reduction.NONE
), axis=2)
smooth_l1 = tf.multiply(smooth_l1, float_mask)
box_loss = tf.reduce_sum(smooth_l1, axis=1)
return tf.reduce_mean(box_loss / num_matched_boxes)
def _classification_loss(self, pred_label, gt_label, num_matched_boxes):
"""Computes the classification loss.
Computes the classification loss with hard negative mining.
Args:
pred_label: a flatten tensor that includes all predicted class. The shape
is [batch_size, num_anchors, num_classes].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets. This is used as the loss normalizater.
Returns:
box_loss: a float32 representing total box regression loss.
"""
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
gt_label, pred_label, reduction=tf.losses.Reduction.NONE)
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
# Hard example mining
neg_masked_cross_entropy = cross_entropy * (1 - float_mask)
relative_position = tf.contrib.framework.argsort(
tf.contrib.framework.argsort(
neg_masked_cross_entropy, direction='DESCENDING'))
num_neg_boxes = tf.minimum(
tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,
ssd_constants.NUM_SSD_BOXES)
top_k_neg_mask = tf.cast(tf.less(
relative_position,
tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))
), tf.float32)
class_loss = tf.reduce_sum(
tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)
return tf.reduce_mean(class_loss / num_matched_boxes)
def add_backbone_saver(self):
# Create saver with mapping from variable names in checkpoint of backbone
# model to variables in SSD model
backbone_var_list = self._collect_backbone_vars()
self.backbone_savers.append(tf.train.Saver(backbone_var_list))
def load_backbone_model(self, sess, backbone_model_path):
for saver in self.backbone_savers:
saver.restore(sess, backbone_model_path)
def get_input_data_types(self, subset):
if subset == 'validation':
return [self.data_type, tf.float32, tf.float32, tf.float32, tf.int32]
return [self.data_type, tf.float32, tf.float32, tf.float32]
def get_input_shapes(self, subset):
"""Return encoded tensor shapes for train and eval data respectively."""
if subset == 'validation':
# Validation data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. source image IDs
# 5. raw image shapes
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 4],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 1],
[self.batch_size],
[self.batch_size, 3],
]
# Training data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. numbers of objects in images
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1],
[self.batch_size]
]
def accuracy_function(self, inputs, logits):
"""Returns the ops to measure the mean precision of the model."""
try:
import ssd_dataloader # pylint: disable=g-import-not-at-top
from object_detection.box_coders import faster_rcnn_box_coder # pylint: disable=g-import-not-at-top
from object_detection.core import box_coder # pylint: disable=g-import-not-at-top
from object_detection.core import box_list # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation ; To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
# Unpack model output back to locations and confidence scores of predictions
# pred_locs: relative locations (coordiates) of objects in all SSD boxes
# shape: [batch_size, NUM_SSD_BOXES, 4]
# pred_labels: confidence scores of objects being of all categories
# shape: [batch_size, NUM_SSD_BOXES, label_num]
pred_locs, pred_labels = tf.split(logits, [4, self.label_num], 2)
ssd_box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=ssd_constants.BOX_CODER_SCALES)
anchors = box_list.BoxList(
tf.convert_to_tensor(ssd_dataloader.DefaultBoxes()('ltrb')))
pred_boxes = box_coder.batch_decode(
encoded_boxes=pred_locs, box_coder=ssd_box_coder, anchors=anchors)
pred_scores = tf.nn.softmax(pred_labels, axis=2)
# TODO(haoyuzhang): maybe use `gt_boxes` and `gt_classes` for visualization.
_, gt_boxes, gt_classes, source_id, raw_shape = inputs # pylint: disable=unused-variable
return {
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_BOXES): pred_boxes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_SCORES): pred_scores,
# TODO(haoyuzhang): maybe use these values for visualization.
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_boxes': gt_boxes,
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_classes': gt_classes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.SOURCE_ID): source_id,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.RAW_SHAPE): raw_shape
}
def postprocess(self, results):
"""Postprocess results returned from model."""
try:
import coco_metric # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation ; To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
pred_boxes = results[ssd_constants.PRED_BOXES]
pred_scores = results[ssd_constants.PRED_SCORES]
# TODO(haoyuzhang): maybe use these values for visualization.
# gt_boxes = results['gt_boxes']
# gt_classes = results['gt_classes']
source_id = results[ssd_constants.SOURCE_ID]
raw_shape = results[ssd_constants.RAW_SHAPE]
# COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due
# to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting
# `num_eval_epochs` to 1 is not enough and will often miss some images. We
# expect user to set `num_eval_epochs` to >1, which will leave some unused
# images from previous steps in `predictions`. Here we check if we are doing
# eval at a new global step.
if results['global_step'] > self.eval_global_step:
self.eval_global_step = results['global_step']
self.predictions.clear()
for i, sid in enumerate(source_id):
self.predictions[int(sid)] = {
ssd_constants.PRED_BOXES: pred_boxes[i],
ssd_constants.PRED_SCORES: pred_scores[i],
ssd_constants.SOURCE_ID: source_id[i],
ssd_constants.RAW_SHAPE: raw_shape[i]
}
# COCO metric calculates mAP only after a full epoch of evaluation. Return
# dummy results for top_N_accuracy to be compatible with benchmar_cnn.py.
if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES:
log_fn('Got results for all {:d} eval examples. Calculate mAP...'.format(
ssd_constants.COCO_NUM_VAL_IMAGES))
annotation_file = os.path.join(self.params.data_dir,
ssd_constants.ANNOTATION_FILE)
# Size of predictions before decoding about 15--30GB, while size after
# decoding is 100--200MB. When using async eval mode, decoding takes
# 20--30 seconds of main thread time but is necessary to avoid OOM during
# inter-process communication.
decoded_preds = coco_metric.decode_predictions(self.predictions.values())
self.predictions.clear()
if self.params.collect_eval_results_async:
def _eval_results_getter():
"""Iteratively get eval results from async eval process."""
while True:
step, eval_results = self.async_eval_results_queue.get()
self.eval_coco_ap = eval_results['COCO/AP']
mlperf.logger.log_eval_accuracy(
self.eval_coco_ap, step, self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
if self.reached_target():
# Reached target, clear all pending messages in predictions queue
# and insert poison pill to stop the async eval process.
while not self.async_eval_predictions_queue.empty():
self.async_eval_predictions_queue.get()
self.async_eval_predictions_queue.put('STOP')
break
if not self.async_eval_process:
# Limiting the number of messages in predictions queue to prevent OOM.
# Each message (predictions data) can potentially consume a lot of
# memory, and normally there should only be few messages in the queue.
# If often blocked on this, consider reducing eval frequency.
self.async_eval_predictions_queue = multiprocessing.Queue(2)
self.async_eval_results_queue = multiprocessing.Queue()
# Reason to use a Process as opposed to Thread is mainly the
# computationally intensive eval runner. Python multithreading is not
# truly running in parallel, a runner thread would get significantly
# delayed (or alternatively delay the main thread).
self.async_eval_process = multiprocessing.Process(
target=coco_metric.async_eval_runner,
args=(self.async_eval_predictions_queue,
self.async_eval_results_queue,
annotation_file))
self.async_eval_process.daemon = True
self.async_eval_process.start()
self.async_eval_results_getter_thread = threading.Thread(
target=_eval_results_getter, args=())
self.async_eval_results_getter_thread.daemon = True
self.async_eval_results_getter_thread.start()
self.async_eval_predictions_queue.put(
(self.eval_global_step, decoded_preds))
return {'top_1_accuracy': 0, 'top_5_accuracy': 0.}
eval_results = coco_metric.compute_map(decoded_preds, annotation_file)
self.eval_coco_ap = eval_results['COCO/AP']
ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
for metric_key, metric_value in eval_results.items():
ret[constants.SIMPLE_VALUE_RESULT_PREFIX + metric_key] = metric_value
mlperf.logger.log_eval_accuracy(self.eval_coco_ap, self.eval_global_step,
self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
return ret
log_fn('Got {:d} out of {:d} eval examples.'
' Waiting for the remaining to calculate mAP...'.format(
len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES))
return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
def get_synthetic_inputs(self, input_name, nclass):
"""Generating synthetic data matching real data shape and type."""
inputs = tf.random_uniform(
self.get_input_shapes('train')[0], dtype=self.data_type)
inputs = tf.contrib.framework.local_variable(inputs, name=input_name)
boxes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4], dtype=tf.float32)
classes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1], dtype=tf.float32)
nboxes = tf.random_uniform(
[self.batch_size], minval=1, maxval=10, dtype=tf.float32)
return (inputs, boxes, classes, nboxes)
def reached_target(self):
return (self.params.stop_at_top_1_accuracy and
self.eval_coco_ap >= self.params.stop_at_top_1_accuracy)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the GPL-v3.0 license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal>
from math import trunc
import itertools
from .utils import amod
from . import gregorian
EPOCH = 584282.5
HAAB_MONTHS = ["Pop", "Wo'", "Zip", "Sotz'", "Sek", "Xul",
"Yaxk'in'", "Mol", "Ch'en", "Yax", "Sak'", "Keh",
"Mak", "K'ank'in", "Muwan'", "Pax", "K'ayab", "Kumk'u", "Wayeb'"]
HAAB_TRANSLATIONS = [
"Mat", "Frog", "Red", "Bat", "Bee", "Dog", "First Sun", "Water", "Cave", "Green",
"White", "Red", "Encloser", "Yellow Sun", "Screech Owl", "Planting Time", "Turtle", "Ripe Corn", "Nameless"]
TZOLKIN_NAMES = ["Imix'", "Ik'", "Ak'b'al", "K'an", "Chikchan",
"Kimi", "Manik'", "Lamat", "Muluk", "Ok",
"Chuwen", "Eb'", "B'en", "Ix", "Men",
"K'ib'", "Kab'an", "Etz'nab'", "Kawak", "Ajaw"]
TZOLKIN_TRANSLATIONS = ['Water', 'Wind', 'Darkness', 'Net', 'Feathered Serpent',
'Death', 'Deer', 'Seed', 'Jade', 'Dog',
'Thread', 'Path', 'Maize', 'Tiger', 'Bird', 'Will',
'Wisdom', 'Obsidian Knife', 'Thunder', 'Sun']
def to_jd(baktun, katun, tun, uinal, kin):
'''Determine Julian day from Mayan long count'''
return EPOCH + (baktun * 144000) + (katun * 7200) + (tun * 360) + (uinal * 20) + kin
def from_jd(jd):
'''Calculate Mayan long count from Julian day'''
d = jd - EPOCH
baktun = trunc(d / 144000)
d = (d % 144000)
katun = trunc(d / 7200)
d = (d % 7200)
tun = trunc(d / 360)
d = (d % 360)
uinal = trunc(d / 20)
kin = int((d % 20))
return (baktun, katun, tun, uinal, kin)
def to_gregorian(baktun, katun, tun, uinal, kin):
jd = to_jd(baktun, katun, tun, uinal, kin)
return gregorian.from_jd(jd)
def from_gregorian(year, month, day):
jd = gregorian.to_jd(year, month, day)
return from_jd(jd)
def to_haab(jd):
'''Determine Mayan Haab "month" and day from Julian day'''
# Number of days since the start of the long count
lcount = trunc(jd) + 0.5 - EPOCH
# Long Count begins 348 days after the start of the cycle
day = (lcount + 348) % 365
count = day % 20
month = trunc(day / 20)
return int(count), HAAB_MONTHS[month]
def to_tzolkin(jd):
'''Determine Mayan Tzolkin "month" and day from Julian day'''
lcount = trunc(jd) + 0.5 - EPOCH
day = amod(lcount + 4, 13)
name = amod(lcount + 20, 20)
return int(day), TZOLKIN_NAMES[int(name) - 1]
def lc_to_haab(baktun, katun, tun, uinal, kin):
jd = to_jd(baktun, katun, tun, uinal, kin)
return to_haab(jd)
def lc_to_tzolkin(baktun, katun, tun, uinal, kin):
jd = to_jd(baktun, katun, tun, uinal, kin)
return to_tzolkin(jd)
def lc_to_haab_tzolkin(baktun, katun, tun, uinal, kin):
jd = to_jd(baktun, katun, tun, uinal, kin)
dates = to_tzolkin(jd) + to_haab(jd)
return "{0} {1} {2} {3}".format(*dates)
def translate_haab(h):
return dict(list(zip(HAAB_MONTHS, HAAB_TRANSLATIONS))).get(h)
def translate_tzolkin(tz):
return dict(list(zip(TZOLKIN_NAMES, TZOLKIN_TRANSLATIONS))).get(tz)
def _haab_count(day, month):
'''Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365'''
if day < 0 or day > 19:
raise IndexError("Invalid day number")
try:
i = HAAB_MONTHS.index(month)
except ValueError:
raise ValueError("'{0}' is not a valid Haab' month".format(month))
return min(i * 20, 360) + day
def _tzolkin_from_count(count):
number = amod(count, 13)
name = TZOLKIN_NAMES[count % 20 - 1]
return number, name
def _tzolkin_count(day, name):
if day < 1 or day > 13:
raise IndexError("Invalid day number")
days = set(x + day for x in range(0, 260, 13))
try:
n = 1 + TZOLKIN_NAMES.index(name)
except ValueError:
raise ValueError("'{0}' is not a valid Tzolk'in day name".format(name))
names = set(y + n for y in range(0, 260, 20))
return days.intersection(names).pop()
def tzolkin_generator(number=None, name=None):
'''For a given tzolkin name/number combination, return a generator
that gives cycle, starting with the input'''
# By default, it will start at the beginning
number = number or 13
name = name or "Ajaw"
if number > 13:
raise ValueError("Invalid day number")
if name not in TZOLKIN_NAMES:
raise ValueError("Invalid day name")
count = _tzolkin_count(number, name)
ranged = itertools.chain(list(range(count, 260)), list(range(1, count)))
for i in ranged:
yield _tzolkin_from_count(i)
def longcount_generator(baktun, katun, tun, uinal, kin):
'''Generate long counts, starting with input'''
j = to_jd(baktun, katun, tun, uinal, kin)
while True:
yield from_jd(j)
j = j + 1
def next_haab(month, jd):
'''For a given haab month and a julian day count, find the next start of that month on or after the JDC'''
if jd < EPOCH:
raise IndexError("Input day is before Mayan epoch.")
hday, hmonth = to_haab(jd)
if hmonth == month:
days = 1 - hday
else:
count1 = _haab_count(hday, hmonth)
count2 = _haab_count(1, month)
# Find number of days between haab of given jd and desired haab
days = (count2 - count1) % 365
# add in the number of days and return new jd
return jd + days
def next_tzolkin(tzolkin, jd):
'''For a given tzolk'in day, and a julian day count, find the next occurrance of that tzolk'in after the date'''
if jd < EPOCH:
raise IndexError("Input day is before Mayan epoch.")
count1 = _tzolkin_count(*to_tzolkin(jd))
count2 = _tzolkin_count(*tzolkin)
add_days = (count2 - count1) % 260
return jd + add_days
def next_tzolkin_haab(tzolkin, haab, jd):
'''For a given haab-tzolk'in combination, and a Julian day count, find the next occurrance of the combination after the date'''
# get H & T of input jd, and their place in the 18,980 day cycle
haabcount = _haab_count(*to_haab(jd))
haab_desired_count = _haab_count(*haab)
# How many days between the input day and the desired day?
haab_days = (haab_desired_count - haabcount) % 365
possible_haab = set(h + haab_days for h in range(0, 18980, 365))
tzcount = _tzolkin_count(*to_tzolkin(jd))
tz_desired_count = _tzolkin_count(*tzolkin)
# How many days between the input day and the desired day?
tzolkin_days = (tz_desired_count - tzcount) % 260
possible_tz = set(t + tzolkin_days for t in range(0, 18980, 260))
try:
return possible_tz.intersection(possible_haab).pop() + jd
except KeyError:
raise IndexError("That Haab'-Tzolk'in combination isn't possible")
def month_length(month):
"""Not the actual length of the month, but accounts for the 5 unlucky/nameless days"""
if month == "Wayeb'":
return 5
else:
return 20
def haab_monthcalendar(baktun=None, katun=None, tun=None, uinal=None, kin=None, jdc=None):
'''For a given long count, return a calender of the current haab month, divided into tzolkin "weeks"'''
if not jdc:
jdc = to_jd(baktun, katun, tun, uinal, kin)
haab_number, haab_month = to_haab(jdc)
first_j = jdc - haab_number + 1
tzolkin_start_number, tzolkin_start_name = to_tzolkin(first_j)
gen_longcount = longcount_generator(*from_jd(first_j))
gen_tzolkin = tzolkin_generator(tzolkin_start_number, tzolkin_start_name)
# 13 day long tzolkin 'weeks'
lpad = tzolkin_start_number - 1
rpad = 13 - (tzolkin_start_number + 19 % 13)
monlen = month_length(haab_month)
days = [None] * lpad + list(range(1, monlen + 1)) + rpad * [None]
def g(x, generate):
if x is None:
return None
return next(generate)
return [[(k, g(k, gen_tzolkin), g(k, gen_longcount)) for k in days[i:i + 13]] for i in range(0, len(days), 13)]
def haab_monthcalendar_prospective(haabmonth, jdc):
'''Give the monthcalendar for the next occurance of given haab month after jdc'''
return haab_monthcalendar(jdc=next_haab(haabmonth, jdc))
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# astrotess.py - Luke Bouma ([email protected]) - 09/2018
'''
Contains various tools for analyzing TESS light curves.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
##################
## MAIN IMPORTS ##
##################
import pickle
import os.path
import gzip
import sys
import glob
import numpy as np
from astropy.io import fits as pyfits
#######################################
## UTILITY FUNCTIONS FOR FLUXES/MAGS ##
#######################################
def normalized_flux_to_mag(lcdict,
columns=('sap.sap_flux',
'sap.sap_flux_err',
'sap.sap_bkg',
'sap.sap_bkg_err',
'pdc.pdcsap_flux',
'pdc.pdcsap_flux_err')):
'''This converts the normalized fluxes in the TESS lcdicts to TESS mags.
Uses the object's TESS mag stored in lcdict['objectinfo']['tessmag']::
mag - object_tess_mag = -2.5 log (flux/median_flux)
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `read_tess_fitslc` or
`consolidate_tess_fitslc`. This must have normalized fluxes in its
measurement columns (use the `normalize` kwarg for these functions).
columns : sequence of str
The column keys of the normalized flux and background measurements in
the `lcdict` to operate on and convert to magnitudes in TESS band (T).
Returns
-------
lcdict
The returned `lcdict` will contain extra columns corresponding to
magnitudes for each input normalized flux/background column.
'''
tess_mag = lcdict['objectinfo']['tessmag']
for key in columns:
k1, k2 = key.split('.')
if 'err' not in k2:
lcdict[k1][k2.replace('flux','mag')] = (
tess_mag - 2.5*np.log10(lcdict[k1][k2])
)
else:
lcdict[k1][k2.replace('flux','mag')] = (
- 2.5*np.log10(1.0 - lcdict[k1][k2])
)
return lcdict
#########################################################
## LCDICT MAKING FUNCTIONS FOR TESS HLSP LC.FITS FILES ##
#########################################################
# these appear to be similar to Kepler LCs, so we'll copy over stuff from
# astrokep.py
# this is the list of keys to pull out of the top header of the FITS
LCTOPKEYS = [
'DATE-OBS',
'DATE-END',
'PROCVER',
'ORIGIN',
'DATA_REL',
'TIMVERSN',
'OBJECT',
'TICID',
'SECTOR',
'CAMERA',
'CCD',
'PXTABLE',
'RADESYS',
'RA_OBJ',
'DEC_OBJ',
'EQUINOX',
'PMRA',
'PMDEC',
'PMTOTAL',
'TESSMAG',
'TEFF',
'LOGG',
'MH',
'RADIUS',
'TICVER',
'CRMITEN',
'CRBLKSZ',
'CRSPOC',
]
# this is the list of keys to pull out of the light curve header
LCHEADERKEYS = [
'EXPOSURE',
'TIMEREF',
'TASSIGN',
'TIMESYS',
'BJDREFI',
'BJDREFF',
'TELAPSE',
'LIVETIME',
'INT_TIME',
'NUM_FRM',
'TIMEDEL',
'BACKAPP',
'DEADAPP',
'VIGNAPP',
'GAINA',
'GAINB',
'GAINC',
'GAIND',
'READNOIA',
'READNOIB',
'READNOIC',
'READNOID',
'CDPP0_5',
'CDPP1_0',
'CDPP2_0',
'PDCVAR',
'PDCMETHD',
'CROWDSAP',
'FLFRCSAP',
'NSPSDDET',
'NSPSDCOR'
]
# this is the list of keys to pull out of the light curve FITS table
LCDATAKEYS = [
'TIME',
'TIMECORR',
'CADENCENO',
'QUALITY',
'PSF_CENTR1','PSF_CENTR1_ERR','PSF_CENTR2','PSF_CENTR2_ERR',
'MOM_CENTR1','MOM_CENTR1_ERR','MOM_CENTR2','MOM_CENTR2_ERR',
'POS_CORR1','POS_CORR2'
]
# this is the list of columns to use for fluxes, backgrounds, errors
LCSAPKEYS = ['SAP_FLUX','SAP_FLUX_ERR','SAP_BKG','SAP_BKG_ERR']
LCPDCKEYS = ['PDCSAP_FLUX','PDCSAP_FLUX_ERR']
# this is the list of keys to pull out of the aperture part of the light curve
# we also pull out the whole pixel mask, which looks something like:
#
# array([[65, 69, 69, 69, 69, 69, 69, 69, 69, 65, 65],
# [69, 69, 69, 69, 65, 65, 65, 65, 69, 69, 65],
# [65, 65, 65, 65, 65, 65, 65, 65, 65, 69, 65],
# [65, 65, 65, 65, 75, 75, 65, 65, 65, 69, 65],
# [65, 65, 65, 75, 75, 75, 75, 65, 65, 65, 65],
# [65, 65, 65, 75, 75, 75, 75, 65, 65, 69, 65],
# [65, 65, 65, 65, 75, 75, 65, 65, 65, 69, 65],
# [65, 65, 65, 65, 65, 65, 65, 65, 65, 69, 65],
# [69, 69, 69, 65, 69, 65, 65, 65, 69, 69, 65],
# [65, 69, 69, 69, 69, 69, 65, 69, 69, 65, 65],
# [65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65]], dtype=int32)
#
# FIXME: figure out what these values mean (probably flux-collected = 75 /
# flux-available = 69 / flux-in-stamp = 65). we use CDELT1 and CDELT2 below to
# get the pixel scale in arcsec/px
LCAPERTUREKEYS = ['NPIXSAP','NPIXMISS',
'CDELT1','CDELT2']
def read_tess_fitslc(lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
normalize=False,
appendto=None,
filterqualityflags=False,
nanfilter=None,
timestoignore=None):
'''This extracts the light curve from a single TESS .lc.fits file.
This works on the light curves available at MAST.
TODO: look at:
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf
for details on the column descriptions and to fill in any other info we
need.
Parameters
----------
lcfits : str
The filename of a MAST Kepler/K2 light curve FITS file.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (sectors/cameras/CCDs?). The appending
does not care about the time order. To consolidate light curves in time
order, use `consolidate_tess_fitslc` below.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
# now get the values we want from the header
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# get the info from the lcaperturehdr
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
# if we're appending to another lcdict
if appendto and isinstance(appendto, dict):
lcdict = appendto
# update lcinfo
lcdict['lcinfo']['timesys'].append(hdrinfo['timesys'])
lcdict['lcinfo']['bjdoffset'].append(
hdrinfo['bjdrefi'] + hdrinfo['bjdreff']
)
lcdict['lcinfo']['lcaperture'].append(lcaperturedata)
lcdict['lcinfo']['aperpix_used'].append(hdrinfo['npixsap'])
lcdict['lcinfo']['aperpix_unused'].append(hdrinfo['npixmiss'])
lcdict['lcinfo']['pixarcsec'].append(
(np.abs(hdrinfo['cdelt1']) +
np.abs(hdrinfo['cdelt2']))*3600.0/2.0
)
lcdict['lcinfo']['ndet'].append(ndet)
lcdict['lcinfo']['exptime'].append(hdrinfo['exposure'])
lcdict['lcinfo']['sector'].append(hdrinfo['sector'])
lcdict['lcinfo']['camera'].append(hdrinfo['camera'])
lcdict['lcinfo']['ccd'].append(hdrinfo['ccd'])
lcdict['lcinfo']['date_obs_start'].append(hdrinfo['date-obs'])
lcdict['lcinfo']['date_obs_end'].append(hdrinfo['date-end'])
lcdict['lcinfo']['pixel_table_id'].append(hdrinfo['pxtable'])
lcdict['lcinfo']['origin'].append(hdrinfo['origin'])
lcdict['lcinfo']['datarelease'].append(hdrinfo['data_rel'])
lcdict['lcinfo']['procversion'].append(hdrinfo['procver'])
lcdict['lcinfo']['tic_version'].append(hdrinfo['ticver'])
lcdict['lcinfo']['cr_mitigation'].append(hdrinfo['crmiten'])
lcdict['lcinfo']['cr_blocksize'].append(hdrinfo['crblksz'])
lcdict['lcinfo']['cr_spocclean'].append(hdrinfo['crspoc'])
# update the varinfo for this light curve
lcdict['varinfo']['cdpp0_5'].append(hdrinfo['cdpp0_5'])
lcdict['varinfo']['cdpp1_0'].append(hdrinfo['cdpp1_0'])
lcdict['varinfo']['cdpp2_0'].append(hdrinfo['cdpp2_0'])
lcdict['varinfo']['pdcvar'].append(hdrinfo['pdcvar'])
lcdict['varinfo']['pdcmethod'].append(hdrinfo['pdcmethd'])
lcdict['varinfo']['target_flux_total_flux_ratio_in_aper'].append(
hdrinfo['crowdsap']
)
lcdict['varinfo']['target_flux_fraction_in_aper'].append(
hdrinfo['flfrcsap']
)
# update the light curve columns now
for key in datakeys:
if key.lower() in lcdict:
lcdict[key.lower()] = (
np.concatenate((lcdict[key.lower()], lcdata[key]))
)
for key in sapkeys:
if key.lower() in lcdict['sap']:
sapflux_median = np.nanmedian(lcdata['SAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'SAP_FLUX':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_FLUX_ERR':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG_ERR':
thislcdata = lcdata[key] / sapflux_median
else:
thislcdata = lcdata[key]
lcdict['sap'][key.lower()] = (
np.concatenate((lcdict['sap'][key.lower()], thislcdata))
)
for key in pdckeys:
if key.lower() in lcdict['pdc']:
pdcsap_flux_median = np.nanmedian(lcdata['PDCSAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'PDCSAP_FLUX':
thislcdata = lcdata[key] / pdcsap_flux_median
elif normalize and key == 'PDCSAP_FLUX_ERR':
thislcdata = lcdata[key] / pdcsap_flux_median
else:
thislcdata = lcdata[key]
lcdict['pdc'][key.lower()] = (
np.concatenate((lcdict['pdc'][key.lower()], thislcdata))
)
# append some of the light curve information into existing numpy arrays
# so we can sort on them later
lcdict['exptime'] = np.concatenate(
(lcdict['exptime'],
np.full_like(lcdata['TIME'],
hdrinfo['exposure'],
dtype=np.float64))
)
lcdict['sector'] = np.concatenate(
(lcdict['sector'],
np.full_like(lcdata['TIME'],
hdrinfo['sector'],
dtype=np.int64))
)
lcdict['camera'] = np.concatenate(
(lcdict['camera'],
np.full_like(lcdata['TIME'],
hdrinfo['camera'],
dtype=np.int64))
)
lcdict['ccd'] = np.concatenate(
(lcdict['ccd'],
np.full_like(lcdata['TIME'],
hdrinfo['ccd'],
dtype=np.int64))
)
lcdict['pixel_table_id'] = np.concatenate(
(lcdict['pixel_table_id'],
np.full_like(lcdata['TIME'],
hdrinfo['pxtable'],
dtype=np.int64))
)
lcdict['origin'] = np.concatenate(
(lcdict['origin'],
np.full_like(lcdata['TIME'],
hdrinfo['origin'],
dtype='U100'))
)
lcdict['date_obs_start'] = np.concatenate(
(lcdict['date_obs_start'],
np.full_like(lcdata['TIME'],
hdrinfo['date-obs'],
dtype='U100'))
)
lcdict['date_obs_end'] = np.concatenate(
(lcdict['date_obs_end'],
np.full_like(lcdata['TIME'],
hdrinfo['date-end'],
dtype='U100'))
)
lcdict['procversion'] = np.concatenate(
(lcdict['procversion'],
np.full_like(lcdata['TIME'],
hdrinfo['procver'],
dtype='U255'))
)
lcdict['datarelease'] = np.concatenate(
(lcdict['datarelease'],
np.full_like(lcdata['TIME'],
hdrinfo['data_rel'],
dtype=np.int64))
)
# otherwise, this is a new lcdict
else:
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'objectid':hdrinfo['object'],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcaperture':[lcaperturedata],
'aperpix_used':[hdrinfo['npixsap']],
'aperpix_unused':[hdrinfo['npixmiss']],
'pixarcsec':[(np.abs(hdrinfo['cdelt1']) +
np.abs(hdrinfo['cdelt2']))*3600.0/2.0],
'ndet':[ndet],
'origin':[hdrinfo['origin']],
'procversion':[hdrinfo['procver']],
'datarelease':[hdrinfo['data_rel']],
'sector':[hdrinfo['sector']],
'camera':[hdrinfo['camera']],
'ccd':[hdrinfo['ccd']],
'pixel_table_id':[hdrinfo['pxtable']],
'date_obs_start':[hdrinfo['date-obs']],
'date_obs_end':[hdrinfo['date-end']],
'tic_version':[hdrinfo['ticver']],
'cr_mitigation':[hdrinfo['crmiten']],
'cr_blocksize':[hdrinfo['crblksz']],
'cr_spocclean':[hdrinfo['crspoc']],
},
'objectinfo':{
'objectid':hdrinfo['object'],
'ticid':hdrinfo['ticid'],
'tessmag':hdrinfo['tessmag'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'star_teff':hdrinfo['teff'],
'star_logg':hdrinfo['logg'],
'star_mh':hdrinfo['mh'],
'star_radius':hdrinfo['radius'],
'observatory':'TESS',
'telescope':'TESS photometer',
},
'varinfo':{
'cdpp0_5':[hdrinfo['cdpp0_5']],
'cdpp1_0':[hdrinfo['cdpp1_0']],
'cdpp2_0':[hdrinfo['cdpp2_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'target_flux_total_flux_ratio_in_aper':[hdrinfo['crowdsap']],
'target_flux_fraction_in_aper':[hdrinfo['flfrcsap']],
},
'sap':{},
'pdc':{},
}
# get the LC columns
for key in datakeys:
lcdict[key.lower()] = lcdata[key]
for key in sapkeys:
lcdict['sap'][key.lower()] = lcdata[key]
for key in pdckeys:
lcdict['pdc'][key.lower()] = lcdata[key]
# turn some of the light curve information into numpy arrays so we can
# sort on them later
lcdict['exptime'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['exptime'][0],
dtype=np.float64)
lcdict['sector'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['sector'][0],
dtype=np.int64)
lcdict['camera'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['camera'][0],
dtype=np.int64)
lcdict['ccd'] = np.full_like(lcdict['time'],
lcdict['lcinfo']['ccd'][0],
dtype=np.int64)
lcdict['pixel_table_id'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['pixel_table_id'][0],
dtype=np.int64,
)
lcdict['origin'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['origin'][0],
dtype='U100',
)
lcdict['date_obs_start'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['date_obs_start'][0],
dtype='U100',
)
lcdict['date_obs_end'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['date_obs_end'][0],
dtype='U100',
)
lcdict['procversion'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['procversion'][0],
dtype='U255',
)
lcdict['datarelease'] = np.full_like(
lcdict['time'],
lcdict['lcinfo']['datarelease'][0],
dtype=np.int64,
)
# normalize the SAP and PDCSAP fluxes, errs, and backgrounds if needed
if normalize:
sapflux_median = np.nanmedian(lcdict['sap']['sap_flux'])
pdcsap_flux_median = np.nanmedian(lcdict['pdc']['pdcsap_flux'])
lcdict['sap']['sap_flux'] = (
lcdict['sap']['sap_flux'] /
sapflux_median
)
lcdict['sap']['sap_flux_err'] = (
lcdict['sap']['sap_flux_err'] /
sapflux_median
)
lcdict['sap']['sap_bkg'] = (
lcdict['sap']['sap_bkg'] /
sapflux_median
)
lcdict['sap']['sap_bkg_err'] = (
lcdict['sap']['sap_bkg_err'] /
sapflux_median
)
lcdict['pdc']['pdcsap_flux'] = (
lcdict['pdc']['pdcsap_flux'] /
pdcsap_flux_median
)
lcdict['pdc']['pdcsap_flux_err'] = (
lcdict['pdc']['pdcsap_flux_err'] /
pdcsap_flux_median
)
## END OF LIGHT CURVE CONSTRUCTION ##
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in datakeys] +
['sap.%s' % x.lower() for x in sapkeys] +
['pdc.%s' % x.lower() for x in pdckeys] +
['exptime','sector','camera','ccd', 'pixel_table_id',
'origin', 'date_obs_start', 'date_obs_end',
'procversion', 'datarelease']
)
# update the ndet key in the objectinfo with the sum of all observations
lcdict['objectinfo']['ndet'] = sum(lcdict['lcinfo']['ndet'])
# filter the LC dict if requested
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
lcdict = filter_tess_lcdict(lcdict,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
# return the lcdict at the end
return lcdict
def consolidate_tess_fitslc(lclist,
normalize=True,
filterqualityflags=False,
nanfilter=None,
timestoignore=None,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS):
'''This consolidates a list of LCs for a single TIC object.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
lclist : list of str, or str
`lclist` is either a list of actual light curve files or a string that
is valid for glob.glob to search for and generate a light curve list
based on the file glob. This is useful for consolidating LC FITS files
across different TESS sectors for a single TIC ID using a glob like
`*<TICID>*_lc.fits`.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# if the lclist is a string, assume that we're passing in a fileglob
if isinstance(lclist, str):
matching = glob.glob(lclist,
recursive=True)
LOGINFO('found %s LCs: %r' % (len(matching), matching))
if len(matching) == 0:
LOGERROR('could not find any TESS LC files matching glob: %s' %
lclist)
return None
# if the lclist is an actual list of LCs, then use it directly
else:
matching = lclist
# get the first file
consolidated = read_tess_fitslc(matching[0],
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the rest of the files
if len(matching) > 1:
for lcf in matching[1:]:
consolidated = read_tess_fitslc(lcf,
appendto=consolidated,
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the sort indices. we use time for the columns and sectors for the
# bits in lcinfo and varinfo
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = np.isfinite(consolidated['time'])
if np.sum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! '
'measurements at these times will be '
'sorted to the end of the column arrays.')
# get the time sort index
column_sort_ind = np.argsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
key, subkey = col.split('.')
consolidated[key][subkey] = (
consolidated[key][subkey][column_sort_ind]
)
else:
consolidated[col] = consolidated[col][column_sort_ind]
info_sort_ind = np.argsort(consolidated['lcinfo']['sector'])
# sort the keys in lcinfo
for key in consolidated['lcinfo']:
consolidated['lcinfo'][key] = (
np.array(consolidated['lcinfo'][key])[info_sort_ind].tolist()
)
# sort the keys in varinfo
for key in consolidated['varinfo']:
consolidated['varinfo'][key] = (
np.array(consolidated['varinfo'][key])[info_sort_ind].tolist()
)
# filter the LC dict if requested
# we do this at the end
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
consolidated = filter_tess_lcdict(consolidated,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
return consolidated
##################
## INPUT/OUTPUT ##
##################
def tess_lcdict_to_pkl(lcdict,
outfile=None):
'''This writes the `lcdict` to a Python pickle.
Parameters
----------
lcdict : lcdict
This is the input `lcdict` to write to a pickle.
outfile : str or None
If this is None, the object's Kepler ID/EPIC ID will determined from the
`lcdict` and used to form the filename of the output pickle file. If
this is a `str`, the provided filename will be used.
Returns
-------
str
The absolute path to the written pickle file.
'''
if not outfile:
outfile = '%s-tesslc.pkl' % lcdict['objectid'].replace(' ','')
# we're using pickle.HIGHEST_PROTOCOL here, this will make Py3 pickles
# unreadable for Python 2.7
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return os.path.abspath(outfile)
def read_tess_pklc(picklefile):
'''This turns the pickled lightcurve file back into an `lcdict`.
Parameters
----------
picklefile : str
The path to a previously written Kepler LC picklefile generated by
`tess_lcdict_to_pkl` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
if picklefile.endswith('.gz'):
infd = gzip.open(picklefile, 'rb')
else:
infd = open(picklefile, 'rb')
try:
with infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(picklefile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % picklefile)
return lcdict
################################
## TESS LIGHTCURVE PROCESSING ##
################################
def filter_tess_lcdict(lcdict,
filterqualityflags=True,
nanfilter='sap,pdc,time',
timestoignore=None,
quiet=False):
'''This filters the provided TESS `lcdict`, removing nans and bad
observations.
By default, this function removes points in the TESS LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_tess_fitslc` or
`read_tess_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterqualityflags:
nbefore = lcdict['time'].size
filterind = lcdict['quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('applied quality flag filter, '
'ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'sap,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'pdc,time':
notnanind = (
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter is None:
pass
else:
raise NotImplementedError
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = np.full_like(lcdict['time'],True).astype(bool)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = ~((lcdict['time'] >= time0) & (lcdict['time'] <= time1))
exclind = exclind & thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
return lcdict
|
|
"""
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
import matplotlib.pyplot as plt
y_train = np.load("data/solutions_train.npy")
ra.y_train=y_train
# split training data into training + a small validation set
ra.num_train = y_train.shape[0]
ra.num_valid = ra.num_train // 10 # integer division, is defining validation size
ra.num_train -= ra.num_valid
ra.y_valid = ra.y_train[ra.num_train:]
ra.y_train = ra.y_train[:ra.num_train]
load_data.num_train=y_train.shape[0]
load_data.train_ids = np.load("data/train_ids.npy")
ra.load_data.num_train = load_data.num_train
ra.load_data.train_ids = load_data.train_ids
ra.valid_ids = load_data.train_ids[ra.num_train:]
ra.train_ids = load_data.train_ids[:ra.num_train]
BATCH_SIZE = 16 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 32 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_29epochs_next_nexttoLearningRate0.002.pkl"
DO_VALID = False # disable this to not bother with the validation set evaluation
DO_TEST = False # disable this to not generate predictions on the testset
DO_PRINT_FILTERS =True
IMAGE_OUTPUT_PATH = "filterImages_initialRand"
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
print("num test %s " % (num_test))
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
#l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=False)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
#l12 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=4)
#l3a = cc_layers.CudaConvnetConv2DLayer(l12, n_filters=64, filter_size=7, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
#l3 = cc_layers.CudaConvnetPooling2DLayer(l3a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
#l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=2) # 4) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
#l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
#l4bc = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
## l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
#l5 = layers.DenseLayer(l4bc, n_outputs=37, weights_std=0.01, init_bias_value=0.1, nonlinearity=layers.identity)
## l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
print("output shapes: l0 %s , l0r %s , l0s %s , l1a %s , l2a %s, l3 %s , j3 %s , l4 %s , l5 %s " % ( l0.get_output_shape(), l0r.get_output_shape(), l0s.get_output_shape(), l1a.get_output_shape(),l2a.get_output_shape(), l3.get_output_shape(), j3.get_output_shape(), l4.get_output_shape(), l5.get_output_shape()))
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
print(CHUNK_SIZE)
print(input_sizes)
#augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
#augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
if DO_PRINT_FILTERS:
if not os.path.isdir(IMAGE_OUTPUT_PATH): os.mkdir(IMAGE_OUTPUT_PATH)
plt.gray()
#os.chdir("..")
print "print filtered"
print "1st image"
print(test_gen)
print(valid_gen)
print(BATCH_SIZE)
chunk_data, chunk_length = test_gen.next()
print(chunk_length)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
predictions = compute_output(0)
l0_output = theano.function([idx], l0r.output(), givens=givens)
l1_output = theano.function([idx], l1a.output(), givens=givens)
l3_output = theano.function([idx], l2a.output(), givens=givens)
os.chdir(IMAGE_OUTPUT_PATH)
#filter of layer 1 , output format should be (32, 45, 45, 128)
input_img = l0_output(0)[0]
if not os.path.isdir("l0real"): os.mkdir("l0real")
for i in range(0,3):
plt.imshow(input_img[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l0real/%s.jpg" % i)
input_img = l0_output(0)[1]
if not os.path.isdir("l0real2"): os.mkdir("l0real2")
for i in range(0,3):
plt.imshow(input_img[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l0real2/%s.jpg" % i)
filters1 = l1_output(0).transpose(3,0,1,2)[0]
if not os.path.isdir("l1real"): os.mkdir("l1real")
for i in range(0,32):
plt.imshow(filters1[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1real/%s.jpg" % i)
filters2 = l1_output(0).transpose(3,0,1,2)[1]
if not os.path.isdir("l1real2"): os.mkdir("l1real2")
for i in range(0,32):
plt.imshow(filters2[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1real2/%s.jpg" % i)
filters3 = l3_output(0).transpose(3,0,1,2)[0]
if not os.path.isdir("l2real"): os.mkdir("l2real")
for i in range(0,64):
plt.imshow(filters3[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l2real/%s.jpg" % i)
print "2nd image"
# for e, (chunk_data, chunk_length) in enumerate(test_gen):
# if e>0: break
# xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
# xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
# print " load data onto GPU"
# for x_shared, x_chunk in zip(xs_shared, xs_chunk):
# x_shared.set_value(x_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# predictions = compute_output(8)
#l0_output = theano.function([idx], l0r.output(), givens=givens)
#l1_output = theano.function([idx], l1a.output(), givens=givens)
#l3_output = theano.function([idx], l2a.output(), givens=givens)
input_img = l0_output(0)[128/16]
if not os.path.isdir("l0real_2"): os.mkdir("l0real_2")
for i in range(0,3):
plt.imshow(input_img[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l0real_2/%s.jpg" % i)
input_img = l0_output(0)[128/16+1]
if not os.path.isdir("l0real2_2"): os.mkdir("l0real2_2")
for i in range(0,3):
plt.imshow(input_img[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l0real2_2/%s.jpg" % i)
#filter of layer 1 , output format should be (32, 45, 45, 128)
filters1 = l1_output(0).transpose(3,0,1,2)[128/16]
if not os.path.isdir("l1real_2"): os.mkdir("l1real_2")
for i in range(0,32):
plt.imshow(filters1[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1real_2/%s.jpg" % i)
filters2 = l1_output(0).transpose(3,0,1,2)[128/16+1]
if not os.path.isdir("l1real2_2"): os.mkdir("l1real2_2")
for i in range(0,32):
plt.imshow(filters2[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1real2_2/%s.jpg" % i)
filters3 = l3_output(0).transpose(3,0,1,2)[128/16]
if not os.path.isdir("l2real_2"): os.mkdir("l2real_2")
for i in range(0,64):
plt.imshow(filters3[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l2real_2/%s.jpg" % i)
print "3rd image"
# for e, (chunk_data, chunk_length) in enumerate(test_gen):
# if e>0: break
# xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
# xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
# print " load data onto GPU"
# for x_shared, x_chunk in zip(xs_shared, xs_chunk):
# x_shared.set_value(x_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# predictions = compute_output(8)
#l0_output = theano.function([idx], l0r.output(), givens=givens)
#l1_output = theano.function([idx], l1a.output(), givens=givens)
#l3_output = theano.function([idx], l2a.output(), givens=givens)
input_img = l0_output(0)[2*128/16]
if not os.path.isdir("l0real_3"): os.mkdir("l0real_3")
for i in range(0,3):
plt.imshow(input_img[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l0real_3/%s.jpg" % i)
input_img = l0_output(0)[2*128/16+1]
if not os.path.isdir("l0real2_3"): os.mkdir("l0real2_3")
for i in range(0,3):
plt.imshow(input_img[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l0real2_3/%s.jpg" % i)
#filter of layer 1 , output format should be (32, 45, 45, 128)
filters1 = l1_output(0).transpose(3,0,1,2)[2*128/16]
if not os.path.isdir("l1real_3"): os.mkdir("l1real_3")
for i in range(0,32):
plt.imshow(filters1[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1real_3/%s.jpg" % i)
filters2 = l1_output(0).transpose(3,0,1,2)[2*128/16+1]
if not os.path.isdir("l1real2_3"): os.mkdir("l1real2_3")
for i in range(0,32):
plt.imshow(filters2[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1real2_3/%s.jpg" % i)
filters3 = l3_output(0).transpose(3,0,1,2)[2*128/16]
if not os.path.isdir("l2real_3"): os.mkdir("l2real_3")
for i in range(0,64):
plt.imshow(filters3[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l2real_3/%s.jpg" % i)
print "print filter"
print "black input"
# os.chdir(IMAGE_OUTPUT_PATH)
start_time = time.time()
inputBlack=np.zeros((BATCH_SIZE, NUM_INPUT_FEATURES, 69, 69),dtype=theano.config.floatX)
inputWhite=np.ones((BATCH_SIZE, NUM_INPUT_FEATURES, 69, 69),dtype=theano.config.floatX)
#black and white are switched!!!!
for x_shared in xs_shared:
x_shared.set_value(inputWhite)
# whitePrediction=compute_output(0)
# with open("blackPrediction.txt", 'w')as f:
# f.write(" %s " % (whitePrediction))
l1_output = theano.function([idx], l1a.output(), givens=givens)
l3_output = theano.function([idx], l2a.output(), givens=givens)
#filter of layer 1 , output format should be (32, 45, 45, 128)
filters1 = l1_output(0).transpose(3,0,1,2)[0]
if not os.path.isdir("l1black"): os.mkdir("l1black")
for i in range(0,32):
plt.imshow(filters1[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1black/%s.jpg" % i)
filters2 = l1_output(0).transpose(3,0,1,2)[1]
if not os.path.isdir("l1black2"): os.mkdir("l1black2")
for i in range(0,32):
plt.imshow(filters2[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1black2/%s.jpg" % i)
filters3 = l3_output(0).transpose(3,0,1,2)[0]
if not os.path.isdir("l2black"): os.mkdir("l2black")
for i in range(0,64):
plt.imshow(filters3[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l2black/%s.jpg" % i)
print "white input"
for x_shared in xs_shared:
x_shared.set_value(inputBlack)
# blackPrediction=compute_output(0)
# with open("whitePrediction.txt", 'w')as f:
# f.write(" %s " % (blackPrediction))
l1_output = theano.function([idx], l1a.output(), givens=givens)
l3_output = theano.function([idx], l2a.output(), givens=givens)
filters1 = l1_output(0).transpose(3,0,1,2)[0]
if not os.path.isdir("l1white"): os.mkdir("l1white")
for i in range(0,32):
# if i==0:
# print("writing one filter image as txt")
# with open("imageTest.txt", 'w')as f:
# np.savetxt(f,filters1[i])
plt.imshow(filters1[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1white/%s.jpg" % i)
filters2 = l1_output(0).transpose(3,0,1,2)[1]
if not os.path.isdir("l1white2"): os.mkdir("l1white2")
for i in range(0,32):
plt.imshow(filters2[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l1white2/%s.jpg" % i)
filters3 = l3_output(0).transpose(3,0,1,2)[0]
if not os.path.isdir("l2white"): os.mkdir("l2white")
for i in range(0,64):
plt.imshow(filters3[i],interpolation='none') #Needs to be in row,col order
plt.savefig("l2white/%s.jpg" % i)
print "end"
exit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.