gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
""" Sahana Eden Guided Tour Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
@todo: update for new template path modules/template
"""
__all__ = ("S3GuidedTourModel",
"tour_rheader",
"tour_builder",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3GuidedTourModel(S3Model):
""" Details about which guided tours this Person has completed """
names = ("tour_config",
"tour_details",
"tour_user",
)
def model(self):
T = current.T
db = current.db
NONE = current.messages["NONE"]
s3 = current.response.s3
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
person_id = self.pr_person_id
# ---------------------------------------------------------------------
# Guided tours that are available
#
tablename = "tour_config"
define_table(tablename,
Field("name",
represent=lambda v: v or NONE,
label=T("Display name"),
requires = IS_NOT_EMPTY(),
),
Field("code",
length=255,
notnull=True,
unique=True,
represent=lambda v: v or NONE,
label=T("Unique code")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller tour is activated")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function tour is activated")),
Field("autostart", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Auto start")),
Field("role", "string",
represent=lambda v: v or NONE,
label=T("User's role")),
* s3_meta_fields()
)
# CRUD strings
ADD_TOUR = T("Create Tour")
crud_strings[tablename] = Storage(
label_create = ADD_TOUR,
title_display = T("Tour Configuration"),
title_list = T("Tours"),
title_update = T("Edit Tour"),
label_list_button = T("List Tours"),
label_delete_button = T("Delete Tour"),
msg_record_created = T("Tour added"),
msg_record_modified = T("Tour updated"),
msg_record_deleted = T("Tour deleted"),
msg_list_empty = T("No Tours currently registered"))
represent = S3Represent(lookup=tablename, translate=True)
tour_config_id = S3ReusableField("tour_config_id", "reference %s" % tablename,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "tour_config.id",
represent,
sort=True)),
represent=represent,
label=T("Tour Name"),
ondelete="SET NULL")
# Components
add_components(tablename,
# Details
tour_details="tour_config_id",
# Users
tour_user="tour_config_id",
)
# ---------------------------------------------------------------------
# Details of the tour.
#
tablename = "tour_details"
define_table(tablename,
tour_config_id(empty = False),
Field("posn", "integer",
default=0,
label=T("Position in tour")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller name")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function name")),
Field("args",
represent=lambda v: v or NONE,
label=T("Arguments")),
Field("tip_title",
represent=lambda v: v or NONE,
label=T("Title")),
Field("tip_details",
represent=lambda v: v or NONE,
label=T("Details")),
Field("html_id",
represent=lambda v: v or NONE,
label=T("HTML ID")),
Field("html_class",
represent=lambda v: v or NONE,
label=T("HTML class")),
Field("button",
represent=lambda v: v or NONE,
label=T("Button name")),
Field("tip_location",
represent=lambda v: v or NONE,
label=T("Loctaion of tip")),
Field("datatable_id",
represent=lambda v: v or NONE,
label=T("DataTable ID")),
Field("datatable_row",
represent=lambda v: v or NONE,
label=T("DataTable row")),
Field("redirect",
represent=lambda v: v or NONE,
label=T("Redirect URL")),
)
# CRUD strings
ADD_DETAILS = T("Create Details")
crud_strings[tablename] = Storage(
label_create = ADD_DETAILS,
title_display = T("Tour Details"),
title_list = T("Details"),
title_update = T("Edit Details"),
label_list_button = T("List Details"),
label_delete_button = T("Delete Detail"),
msg_record_created = T("Detail added"),
msg_record_modified = T("Detail updated"),
msg_record_deleted = T("Detail deleted"),
msg_list_empty = T("No Details currently registered"))
configure(tablename,
orderby = "tour_details.tour_config_id,tour_details.posn"
)
# ---------------------------------------------------------------------
# Details of the tours that the user has taken.
#
tablename = "tour_user"
define_table(tablename,
person_id(label = T("Person"),
ondelete="CASCADE",
empty = False,
),
tour_config_id(),
Field("place",
represent=lambda v: v or NONE,
label=T("Where reached")),
Field("resume",
represent=lambda v: v or NONE,
label=T("URL to resume tour")),
Field("completed", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Completed tour?")),
Field("trip_counter", "integer",
default=0,
label=T("Times Completed")),
)
# CRUD strings
ADD_USER = T("Create User")
crud_strings[tablename] = Storage(
label_create = ADD_USER,
title_display = T("Tour User"),
title_list = T("Users"),
title_update = T("Edit User"),
label_list_button = T("List Users"),
label_delete_button = T("Delete User"),
msg_record_created = T("User added"),
msg_record_modified = T("User updated"),
msg_record_deleted = T("User deleted"),
msg_list_empty = T("No users have taken a tour"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(tour_config_id = tour_config_id,
)
# =============================================================================
def tour_rheader(r):
""" Resource Header for Guided Tour """
if r.representation == "html":
tour = r.record
if tour:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Details"), "details"),
(T("People"), "user"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
tour.name,
),
TR(TH("%s: " % table.code.label),
tour.code,
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
def tour_builder(output):
"""
Helper function to attach a guided tour (if required) to the output
"""
auth = current.auth
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
T = current.T
req_vars = request.vars
tour_id = req_vars.tour
# Now see if the details are on the database for this user
tour = None
user_id = None
if auth.is_logged_in():
user_id = auth.s3_logged_in_person()
# Find out if the user has done this tour before
utable = s3db.tour_user
uquery = (utable.person_id == user_id) & \
(utable.tour_config_id == tour_id)
tour = db(uquery).select(utable.id,
utable.completed,
utable.place,
utable.resume,
limitby=(0, 1)).first()
# If the tour has just been started (from the menu) then
# it may be necessary to redirect to a different controller
# @todo: does place need to be changed to controller and function?
if not req_vars.tour_running:
if (tour and not tour.completed and tour.place != request.controller):
redirect("%s?tour=%s" %(tour.resume, tour_id))
# get the details from the database
dtable = s3db.tour_details
dquery = (dtable.tour_config_id == tour_id) &\
(dtable.controller == request.controller) &\
(dtable.function == request.function)
details = db(dquery).select(dtable.args,
dtable.tip_title,
dtable.tip_details,
dtable.button,
dtable.tip_location,
dtable.html_id,
dtable.html_class,
dtable.datatable_id,
dtable.datatable_row,
dtable.redirect,
orderby=(dtable.posn)
)
# tour_filename = os.path.join(request.folder,
# "private",
# "tour",
# tour_name)
# tour_file = open (tour_filename, "rb")
# # now open the details of the guided_tour into a dictionary
# import csv
# tour_details = csv.DictReader(tour_file, skipinitialspace=True)
# load the list of tour items in the html
joyride_OL = OL(_id="joyrideID_1")
pre_step_data = []
post_step_data = []
post_ride_data = []
last_row = None
last_used = None
req_args = request.args
cnt = -1
for row in details:
if row.args:
args = row.args.split(",")
else:
args = []
# if the page has a nested login form then "login" will be added to
# the req_args list so it needs to be added to the args list as well
if "login" in req_args:
if "login" not in args:
args.append("login")
# The following will capture the actual id used for the req_arg
# Example org/organisation/10, where 10 is the id from the database
posn = 0
for arg in args:
if arg == "dt_id":
args[posn] = req_args[posn]
posn += 1
# Now check that the tour url matches the current url
if (args == req_args):
cnt += 1 # number of records used in this part of the tour
if row.datatable_id:
dt_id = row.datatable_id
# cols = []
# if "DataTable_columns" in row:
# cols = row["DataTable_columns"].split(",")
row_num = 0
if row.datatable_row:
row_num = row.datatable_row
# Now set this up for the pre-processor hook in joyride
pre_step_data.append([cnt, dt_id, row_num])
if row.redirect:
redirect_row = row.redirect.split(",")
if len(redirect_row) >= 3:
url = URL(c=redirect_row[0],
f=redirect_row[1],
args=redirect_row[2:],
vars={"tour_running":True,
"tour":tour_id}
)
if "dt_id" in redirect_row[2]:
post_step_data.append([cnt, url, dt_id, row_num])
elif len(redirect_row) == 2:
url = URL(c=redirect_row[0],
f=redirect_row[1],
vars={"tour_running":True,
"tour":tour_id}
)
post_step_data.append([cnt, url])
else:
url = URL(c=redirect_row[0],vars={"tour_running":True,
"tour":tour_id})
post_step_data.append([cnt, url])
extra = {}
if row.html_id:
extra["_data-id"] = row.html_id
elif row.html_class:
extra["_data-class"] = row.html_class
if row.button:
extra["_data-button"] = row.button
else:
extra["_data-button"] = "Next"
if row.tip_location:
extra["_data-options"] = "tipLocation:%s" % row.tip_location.lower()
else:
extra["_data-options"] = "tipLocation:right"
joyride_OL.append(LI(H2(T(row.tip_title)),
P(T(row.tip_details)),
**extra
)
)
last_used = row
last_row = row
# The following redirect will be triggered if the user has moved away
# from the tour, such as by clicking on a tab. However if a tab
# is part of the tour we are unable to determine if they have moved
# away or just visiting as part of the tour and so it will continue.
if len(joyride_OL) == 0:
del request.vars.tour
redirect(URL(args=req_args,
vars=request.vars))
if (user_id != None) and (last_row == last_used):
# set up an AJAX call to record that the tour has been completed
post_ride_data = [cnt, tour_id]
joyride_div = DIV(joyride_OL,
_class="hidden")
# Add the javascript configuration data
from gluon.serializers import json as jsons
if pre_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="prestep_data",
_name="prestep_data",
_value=jsons(pre_step_data))
)
if post_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="poststep_data",
_name="poststep_data",
_value=jsons(post_step_data))
)
if post_ride_data:
joyride_div.append(INPUT(_type="hidden",
_id="postride_data",
_name="postride_data",
_value=jsons(post_ride_data))
)
# Now add the details to the tour_user table
if user_id != None:
if tour == None:
# this user has never done this tour before so create a new record
utable.insert(person_id = user_id,
tour_config_id = tour_id,
place = request.controller,
resume = request.url)
else:
# the user has done some of this tour so update the record
db(uquery).update(place = request.controller,
resume = request.url,
completed = False)
output["joyride_div"] = joyride_div
if s3.debug:
appname = request.application
s3.scripts.append("/%s/static/scripts/jquery.joyride.js" % appname)
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.js" % appname)
s3.stylesheets.append("plugins/joyride.min.css")
else:
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.min.js" % request.application)
s3.stylesheets.append("plugins/joyride.css")
return output
# END =========================================================================
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Operations for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
if platform.system() != "Windows":
# pylint: disable=wildcard-import,unused-import,g-import-not-at-top
from tensorflow.contrib.tpu.ops import gen_tpu_ops
from tensorflow.contrib.tpu.ops.gen_tpu_ops import *
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=wildcard-import,unused-import,g-import-not-at-top
_tpu_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_tpu_ops.so"))
def _create_default_group_assignment():
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is None:
logging.warning(
"cross_replica_sum should be used within a tpu_shard_context, but "
"got unset number_of_shards. Assuming 1.")
num_shards = 1
group_assignment = [list(range(num_shards))]
return group_assignment
def all_to_all(x,
concat_dimension,
split_dimension,
split_count,
group_assignment=None,
name=None):
"""Exchange data across TPU replicas.
Args:
x: The local tensor.
concat_dimension: The dimension number to concatenate.
split_dimension: The dimension number to split.
split_count: The number of splits, this number must equal to the sub-group
size(group_assignment.get_shape()[1])
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is concatenated by data from different replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.all_to_all(
x,
group_assignment,
concat_dimension=concat_dimension,
split_dimension=split_dimension,
split_count=split_count,
name=name)
@ops.RegisterGradient("AllToAll")
def _all_to_all_grad(op, grad):
# The gradient of a all-to-all is also a all-to-all but the
# split_dimension and concat_dimension is swapped.
# The graident with respect to group_assignment is None.
return [
gen_tpu_ops.all_to_all(
grad,
op.inputs[1],
concat_dimension=op.get_attr("split_dimension"),
split_dimension=op.get_attr("concat_dimension"),
split_count=op.get_attr("split_count")), None
]
def cross_replica_sum(x, group_assignment=None, name=None):
"""Sum the input tensor across replicas according to group_assignment.
Args:
x: The local tensor to the sum.
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is summed across replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)
def collective_permute(x, source_target_pairs, name=None):
"""Permute the input tensor across replicas given source_target_pairs.
For each source_target_pair <a, b>, we send replica a's input to replica b.
Each replica id must only appear once in the source column. Also it must
only appear once in the target column.
For the replica id not in the target column, this op returns a zero tensor
with the same shape and dtype of the input x.
For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
source_target_pairs=`[[0,1],[1,2],[2,3]]` gets the outputs:
`[0, A, B, C]`.
Args:
x: The local tensor to be permuted.
source_target_pairs: 2d int lists with shape [num_pairs, 2].
source_target_pairs[i][0] represents the source replica id and
source_target_pairs[i][1] represents the target replica id.
name: Optional op name.
Returns:
A `Tensor` which is permuted.
"""
return gen_tpu_ops.collective_permute(x, source_target_pairs, name=name)
@ops.RegisterGradient("CrossReplicaSum")
def _cross_replica_sum_grad(op, grad):
# The gradient of a cross replica sum is also a cross-replica sum.
# The graident with respect to group_assignment is None.
return [gen_tpu_ops.cross_replica_sum(grad, op.inputs[1]), None]
# This extra type checking exists to give a more helpful error message in
# the common case that uint8 and int64 values are infed. Remove when both
# types are supported.
_SUPPORTED_INFEED_DTYPES = set([
dtypes.bool, dtypes.int32, dtypes.int64, dtypes.bfloat16, dtypes.float32,
dtypes.complex64
])
def infeed_dequeue(dtype, shape, name=None):
"""A placeholder op for a value that will be fed into the computation.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A tensor that will be provided using the infeed mechanism.
Raises:
TypeError: If 'dtype` is not a supported infeed type.
"""
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)
# pylint: disable=redefined-outer-name
def infeed_dequeue_tuple(dtypes, shapes, name=None):
"""A placeholder op for values fed into the TPU simultaneously as a tuple.
Args:
dtypes: A list of `tf.DType`s that has length `>= 1`.
The element types of each element in `outputs`.
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shapes of each tensor in `outputs`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
A list of tensors that will be provided using the infeed mechanism.
Raises:
TypeError: If a type in 'dtypes` is not a supported infeed type.
"""
for dtype in dtypes:
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)
# pylint: enable=redefined-outer-name
# pylint: disable=protected-access
def send_tpu_embedding_gradients(inputs,
config,
learning_rates=None,
name=None):
"""A placeholder op for feeding per-sample gradients to the embedding layer.
Args:
inputs: A TensorList of gradients with which to update embedding tables.
Contains one tensor per embedding table in the model.
config: Serialized TPUEmbeddingConfiguration proto.
learning_rates: A TensorList of float32 scalars, one for each embedding
table, containing the learning rates for each table when dynamic
learning rate is enabled through the OptimizationParameters in
TPUEmbeddingConfiguration. When the learning rate is constant, the list
should be empty (optional).
name: A name for the operation (optional).
Returns:
A SendTPUEmbeddingGradients operation.
"""
if learning_rates is None:
learning_rates = []
return gen_tpu_ops._send_tpu_embedding_gradients(
inputs=inputs, learning_rates=learning_rates, config=config, name=name)
send_tpu_embedding_gradients.__doc__ = (
gen_tpu_ops._send_tpu_embedding_gradients.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_integer_batch(batch,
device_ordinal,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
batch: A list of 1D tensors, one for each embedding table, containing the
indices into the tables.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingIntegerBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops._enqueue_tpu_embedding_integer_batch(
batch=batch,
device_ordinal=device_ordinal,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_integer_batch.__doc__ = (
gen_tpu_ops._enqueue_tpu_embedding_integer_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_batch(sample_indices,
embedding_indices,
aggregation_weights,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
and feature to which the corresponding embedding_indices and
aggregation_weights values belong. sample_indices[i] must equal b * nf +
f, where nf is the number of features from the corresponding table, f is
in [0, nf), and b is in [0, batch size).
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables.
aggregation_weights: A list of rank 1 Tensors containing per sample --
i.e. per (training example, feature) -- aggregation weights.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops._enqueue_tpu_embedding_sparse_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_batch.__doc__ = (
gen_tpu_ops._enqueue_tpu_embedding_sparse_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices,
embedding_indices,
aggregation_weights,
table_ids,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
to which the corresponding embedding_indices and aggregation_weights
values
belong. It corresponds to sp_ids.indices[:,0] in
embedding_lookup_sparse().
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. It corresponds to sp_ids.values in embedding_lookup_sparse().
aggregation_weights: A list of rank 1 Tensors containing per training
example aggregation weights. It corresponds to sp_weights.values in
embedding_lookup_sparse().
table_ids: A list of integers specifying the identifier of the embedding
table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to
lookup the corresponding input. The ith input is looked up using
table_ids[i]. The size of the table_ids list must be equal to that of
sample_indices, embedding_indices and aggregation_weights.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseTensorBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops._enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
table_ids=table_ids,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_tensor_batch.__doc__ = (
gen_tpu_ops._enqueue_tpu_embedding_sparse_tensor_batch.__doc__)
else:
# We have already built the appropriate libraries into the binary via CMake
# if we have built contrib, so we don't need this
pass
|
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""PyTest fixtures"""
import pytest # pylint: disable=import-error
from lib import constants
from lib.page.widget import info_widget
from lib.constants.test import batch
from lib.service.rest_service import (ProgramsService, AuditsService,
ControlsService)
from lib.utils import conftest_utils
from lib.utils import test_utils
# pylint: disable=redefined-outer-name
pytest_plugins = "selenium", "xdist", "xvfb", "timeout", "flask", \
"rerunfailures", "timeout", "repeat", "pycharm"
@pytest.yield_fixture(scope="session")
def db_drop():
"""Reset the DB"""
# todo
pass
@pytest.yield_fixture(scope="session")
def db_migrate():
"""Make sure the DB is up to date"""
# todo
pass
@pytest.yield_fixture(scope="function")
def selenium(selenium):
"""Setup test resources for running test in headless mode"""
# todo: here we should use selenium.maximize_window() and env variables to
# set xvfb resolution in the docker container. Setting the env vars
# currently doesn't work for selenium-docker
selenium.set_window_size(1440, 900)
yield selenium
@pytest.yield_fixture(scope="function")
def new_control(selenium):
"""Creates a new control object.
Returns:
lib.page.widget.Controls
"""
control_info_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.CONTROLS)
yield control_info_page
@pytest.yield_fixture(scope="function")
def new_program(selenium, new_control):
"""Creates a new program object and returns the program info page with the
saved modal"""
# pylint: disable=redefined-outer-name
modal = conftest_utils.get_lhn_accordion(
selenium, constants.element.Lhn.PROGRAMS)\
.create_new()
test_utils.ModalNewPrograms.enter_test_data(modal)
modal.save_and_close()
program_info_page = info_widget.Programs(selenium)
yield modal, program_info_page
@pytest.yield_fixture(scope="function")
def new_org_group(selenium):
"""Creates a new org group object.
Returns:
lib.page.widget.OrgGroupInfo
"""
org_group_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.ORG_GROUPS)
yield org_group_page
@pytest.yield_fixture(scope="function")
def new_risk(selenium):
"""Creates a new risk group object.
Returns:
lib.page.widget.Risks
"""
risk_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.RISKS)
yield risk_page
@pytest.yield_fixture(scope="function")
def new_issue(selenium):
"""Creates a new issue object.
Returns:
lib.page.widget.IssueInfo
"""
issue_info_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.ISSUES)
yield issue_info_page
@pytest.yield_fixture(scope="function")
def new_process(selenium):
"""Creates a new process object.
Returns:
lib.page.widget.Processes
"""
process_info_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.PROCESSES)
yield process_info_page
@pytest.yield_fixture(scope="function")
def new_data_asset(selenium):
"""Creates a new data asset object.
Returns:
lib.page.widget.DataAssetInfo
"""
data_asset_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.DATA_ASSETS)
yield data_asset_page
@pytest.yield_fixture(scope="function")
def new_system(selenium):
"""Creates a new system object.
Returns:
lib.page.widget.IssueInfo
"""
system_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.SYSTEMS)
yield system_page
@pytest.yield_fixture(scope="function")
def new_product(selenium):
"""Creates a new product object.
Returns:
lib.page.widget.Product
"""
product_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.PRODUCTS)
yield product_page
@pytest.yield_fixture(scope="function")
def new_project(selenium):
"""Creates a new project object.
Returns:
lib.page.widget.ProjectInfo
"""
project_page = conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.PROJECTS)
yield project_page
@pytest.yield_fixture(scope="function")
def battery_of_controls(selenium):
"""Creates 3 control objects"""
controls = []
for _ in xrange(batch.BATTERY):
controls.append(conftest_utils.create_lhn_object(
selenium, constants.element.Lhn.CONTROLS))
yield controls
@pytest.yield_fixture(scope="function")
def new_program_rest():
"""Creates Program via REST API"""
service = ProgramsService()
yield service.create_programs(1)[0]
@pytest.yield_fixture(scope="function")
def new_audit_rest(new_program_rest):
"""Creates Audit via REST API"""
service = AuditsService()
yield service.create_audits(1, program=new_program_rest)[0]
@pytest.yield_fixture(scope="function")
def new_control_rest():
"""Creates Control via REST API"""
service = ControlsService()
yield service.create_controls(1)[0]
@pytest.yield_fixture(scope="function")
def battery_of_controls_rest(count=batch.BATTERY):
"""Creates batch of Controls via REST API"""
service = ControlsService()
yield service.create_controls(count=count)
|
|
import os
####### Database config. This assumes Postgres #######
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq',
'USER': os.environ['POSTGRES_ENV_POSTGRES_USER'],
'PASSWORD': os.environ['POSTGRES_ENV_POSTGRES_PASSWORD'],
'HOST': 'postgres',
'PORT': os.environ['POSTGRES_PORT_5432_TCP_PORT']
}
}
####### Couch Config ######
COUCH_HTTPS = False # recommended production value is True if enabling https
COUCH_SERVER_ROOT = 'couchdb:{}'.format(os.environ['COUCHDB_PORT_5984_TCP_PORT']) #6984 for https couch
COUCH_USERNAME = 'commcarehq'
COUCH_PASSWORD = 'commcarehq'
COUCH_DATABASE_NAME = 'commcarehq'
### Staging Replication Setup for Production/Staging
#Staging domains you want to replicate
STAGING_DOMAINS = []
#COUCHDB URI {http|https}://username:password@host:optionalport/dbname
PRODUCTION_COUCHDB_URI = ""
####### # Email setup ########
# email settings: these ones are the custom hq ones
EMAIL_LOGIN = "[email protected]"
EMAIL_PASSWORD = "******"
EMAIL_SMTP_HOST = "smtp.gmail.com"
EMAIL_SMTP_PORT = 587
# Print emails to console so there is no danger of spamming, but you can still get registration URLs
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ADMINS = (('HQ Dev Team', '[email protected]'),)
BUG_REPORT_RECIPIENTS = ['[email protected]']
NEW_DOMAIN_RECIPIENTS = ['[email protected]']
EXCHANGE_NOTIFICATION_RECIPIENTS = ['[email protected]']
SERVER_EMAIL = '[email protected]' #the physical server emailing - differentiate if needed
DEFAULT_FROM_EMAIL = '[email protected]'
SUPPORT_EMAIL = "[email protected]"
EMAIL_SUBJECT_PREFIX = '[commcarehq] '
SERVER_ENVIRONMENT = 'changeme' #Modify this value if you are deploying multiple environments of HQ to the same machine. Identify the target type of this running environment
####### Log/debug setup ########
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# log directories must exist and be writeable!
DJANGO_LOG_FILE = "/tmp/commcare-hq.django.log"
LOG_FILE = "/tmp/commcare-hq.log"
SEND_BROKEN_LINK_EMAILS = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_PERIODIC_QUEUE = 'celery' # change this to something else if you want a different queue for periodic tasks
CELERY_FLOWER_URL = 'http://127.0.0.1:5555'
####### Django Compressor ########
COMPRESS_ENABLED = False # this will allow less.js to compile less files on the client side
####### Bitly ########
BITLY_LOGIN = 'dimagi' # set to None to disable bitly app url shortening (useful offline)
BITLY_APIKEY = '*******'
####### Jar signing config ########
#_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
#JAR_SIGN = {
# 'jad_tool': os.path.join(_ROOT_DIR, "corehq", "apps", "app_manager", "JadTool.jar"),
# 'key_store': os.path.join(os.path.dirname(os.path.dirname(_ROOT_DIR)), "DimagiKeyStore"),
# 'key_alias': "javarosakey",
# 'store_pass': "*******",
# 'key_pass': "*******",
#}
####### SMS Config ########
# Mach
SMS_GATEWAY_URL = "http://gw1.promessaging.com/sms.php"
SMS_GATEWAY_PARAMS = "id=******&pw=******&dnr=%(phone_number)s&msg=%(message)s&snr=DIMAGI"
# Unicel
UNICEL_CONFIG = {"username": "Dimagi",
"password": "******",
"sender": "Promo" }
####### Domain sync / de-id ########
DOMAIN_SYNCS = {
"domain_name": {
"target": "target_db_name",
"transform": "corehq.apps.domainsync.transforms.deidentify_domain"
}
}
DOMAIN_SYNC_APP_NAME_MAP = {"app_name": "new_app_name"}
####### Touchforms config - for CloudCare #######
XFORMS_PLAYER_URL = 'http://127.0.0.1:4444'
# email and password for an admin django user, such as one created with
# ./manage.py bootstrap <project-name> <email> <password>
TOUCHFORMS_API_USER = '[email protected]'
TOUCHFORMS_API_PASSWORD = 'password'
####### Misc / HQ-specific Config ########
DEFAULT_PROTOCOL = "http" # or https
OVERRIDE_LOCATION = "https://www.commcarehq.org"
# Set to something like "192.168.1.5:8000" (with your IP address).
# See corehq/apps/builds/README.md for more information.
BASE_ADDRESS = '{}:8000'.format(os.environ.get('BASE_HOST', 'localhost'))
# Set your analytics IDs here for GA and pingdom RUM
ANALYTICS_IDS = {
'GOOGLE_ANALYTICS_ID': '*******',
'PINGDOM_ID': '*****',
'ANALYTICS_ID_PUBLIC_COMMCARE': '*****',
'KISSMETRICS_KEY': '*****',
}
ANALYTICS_CONFIG = {
"HQ_INSTANCE": '', # e.g. "www" or "staging"
}
AXES_LOCK_OUT_AT_FAILURE = False
LUCENE_ENABLED = True
PREVIEWER_RE = r'^.*@dimagi\.com$'
GMAPS_API_KEY = '******'
MAPS_LAYERS = {
'Maps': {
'family': 'mapbox',
'args': {
'apikey': '*****'
}
},
'Satellite': {
'family': 'mapbox',
'args': {
'apikey': '*****'
}
},
}
FORMTRANSLATE_TIMEOUT = 5
LOCAL_APPS = (
# 'django_coverage', # Adds `python manage.py test_coverage` (settings below)
# 'debug_toolbar', # Adds a retractable panel to every page giving profiling & debugging info
# 'couchdebugpanel', # Adds couch info to said toolbar
# 'devserver', # Adds improved dev server that also prints SQL on the console (for AJAX, etc, when you cannot use debug_toolbar)
# 'django_cpserver', # Another choice for a replacement server
# 'dimagi.utils'
)
# list of domains to enable ADM reporting on
ADM_ENABLED_PROJECTS = []
# prod settings
SOIL_DEFAULT_CACHE = "redis"
SOIL_BACKEND = "soil.CachedDownload"
# reports cache
REPORT_CACHE = 'default' # or e.g. 'redis'
redis_cache = {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'redis:{}:0'.format(os.environ['REDIS_PORT_6379_TCP_PORT']),
'OPTIONS': {},
}
CACHES = {
'default': redis_cache,
'redis': redis_cache,
}
# on both a local and a distributed environment this should be localhost
ELASTICSEARCH_HOST = 'elasticsearch'
ELASTICSEARCH_PORT = 9200
# our production logstash aggregation
LOGSTASH_DEVICELOG_PORT = 10777
LOGSTASH_COUCHLOG_PORT = 10888
LOGSTASH_AUDITCARE_PORT = 10999
LOGSTASH_HOST = 'localhost'
LOCAL_PILLOWTOPS = {
# 'my_pillows': ['some.pillow.Class', ],
# 'and_more': []
}
# If there are existing doc_ids and case_ids you want to check directly - they are referenced
# in your localsettings for more accurate direct checks, otherwise use view based which can be inaccurate.
ES_CASE_CHECK_DIRECT_DOC_ID = None
ES_XFORM_CHECK_DIRECT_DOC_ID = None
####### API throttling #####
CCHQ_API_THROTTLE_REQUESTS = 200 # number of requests allowed per timeframe
# Use a lower value in production. This is set
# to 200 to prevent AssertionError: 429 != 200
# test failures in development environsments.
CCHQ_API_THROTTLE_TIMEFRAME = 10 # seconds
####### django-coverage config ########
COVERAGE_REPORT_HTML_OUTPUT_DIR='coverage-html'
COVERAGE_MODULE_EXCLUDES= ['tests$', 'settings$', 'urls$', 'locale$',
'common.views.test', '^django', 'management', 'migrations',
'^south', '^djcelery', '^debug_toolbar', '^rosetta']
####### Selenium tests config ########
SELENIUM_SETUP = {
# Firefox, Chrome, Ie, or Remote
'BROWSER': 'Chrome',
# Necessary if using Remote selenium driver
'REMOTE_URL': None,
# If not using Remote, allows you to open browsers in a hidden virtual X Server
'USE_XVFB': True,
'XVFB_DISPLAY_SIZE': (1024, 768),
}
SELENIUM_USERS = {
# 'WEB_USER' is optional; if not set, some tests that want a web user will
# try to use ADMIN instead
'ADMIN': {
'USERNAME': '[email protected]',
'PASSWORD': 'password',
'URL': 'http://localhost:8000',
'PROJECT': 'project_name',
'IS_SUPERUSER': False
},
'WEB_USER': {
'USERNAME': '[email protected]',
'PASSWORD': 'password',
'URL': 'http://localhost:8000',
'PROJECT': 'mike',
'IS_SUPERUSER': False
},
'MOBILE_WORKER': {
'USERNAME': 'user@project_name.commcarehq.org',
'PASSWORD': 'password',
'URL': 'http://localhost:8000'
}
}
SELENIUM_APP_SETTINGS = {
'reports': {
'MAX_PRELOAD_TIME': 20,
'MAX_LOAD_TIME': 30,
},
}
INTERNAL_DATA = {
"business_unit": [],
"product": ["CommCare", "CommConnect", "CommCare Supply", "RapidSMS", "Custom"],
"services": [],
"account_types": [],
"initiatives": [],
"contract_type": [],
"area": [
{
"name": "Health",
"sub_areas": ["Maternal, Newborn, & Child Health", "Family Planning", "HIV/AIDS"]
},
{
"name": "Other",
"sub_areas": ["Emergency Response"]
},
],
"country": ["Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua & Deps", "Argentina", "Armenia",
"Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus",
"Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia Herzegovina", "Botswana", "Brazil",
"Brunei", "Bulgaria", "Burkina", "Burundi", "Cambodia", "Cameroon", "Canada", "Cape Verde",
"Central African Rep", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo",
"Congo {Democratic Rep}", "Costa Rica", "Croatia", "Cuba", "Cyprus", "Czech Republic", "Denmark",
"Djibouti", "Dominica", "Dominican Republic", "East Timor", "Ecuador", "Egypt", "El Salvador",
"Equatorial Guinea", "Eritrea", "Estonia", "Ethiopia", "Fiji", "Finland", "France", "Gabon", "Gambia",
"Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana",
"Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland {Republic}",
"Israel", "Italy", "Ivory Coast", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati",
"Korea North", "Korea South", "Kosovo", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho",
"Liberia", "Libya", "Liechtenstein", "Lithuania", "Luxembourg", "Macedonia", "Madagascar", "Malawi",
"Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Mauritania", "Mauritius", "Mexico",
"Micronesia", "Moldova", "Monaco", "Mongolia", "Montenegro", "Morocco", "Mozambique", "Myanmar, {Burma}",
"Namibia", "Nauru", "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "Norway",
"Oman", "Pakistan", "Palau", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland",
"Portugal", "Qatar", "Romania", "Russian Federation", "Rwanda", "St Kitts & Nevis", "St Lucia",
"Saint Vincent & the Grenadines", "Samoa", "San Marino", "Sao Tome & Principe", "Saudi Arabia",
"Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia",
"Solomon Islands", "Somalia", "South Africa", "South Sudan", "Spain", "Sri Lanka", "Sudan", "Suriname",
"Swaziland", "Sweden", "Switzerland", "Syria", "Taiwan", "Tajikistan", "Tanzania", "Thailand", "Togo",
"Tonga", "Trinidad & Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda", "Ukraine",
"United Arab Emirates", "United Kingdom", "United States", "Uruguay", "Uzbekistan", "Vanuatu",
"Vatican City", "Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe"]
}
|
|
#!/usr/bin/env python
from collections import namedtuple
import hashlib
import math
import unittest
from ct.crypto import error
from ct.crypto import merkle
class TreeHasherTest(unittest.TestCase):
sha256_empty_hash = ("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495"
"991b7852b855")
sha256_leaves = [
("",
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"),
("101112131415161718191a1b1c1d1e1f",
"3bfb960453ebaebf33727da7a1f4db38acc051d381b6da20d6d4e88f0eabfd7a")
]
sha256_nodes = [
("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
"202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f",
"1a378704c17da31e2d05b6d121c2bb2c7d76f6ee6fa8f983e596c2d034963c57")]
# array of bytestrings of the following literals in hex
test_vector_leaves = ["".join(chr(int(n, 16)) for n in s.split()) for s in [
"",
"00",
"10",
"20 21",
"30 31",
"40 41 42 43",
"50 51 52 53 54 55 56 57",
"60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f",
]]
test_vector_hashes = [
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
"fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125",
"aeb6bcfe274b70a14fb067a5e5578264db0fa9b51af5e0ba159158f329e06e77",
"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7",
"4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4",
"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
"ddb89be403809e325750d3d263cd78929c2942b7942a34b77e122c9594a74c8c",
"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
]
def test_empty_hash(self):
hasher = merkle.TreeHasher()
self.assertEqual(hasher.hash_empty().encode("hex"),
TreeHasherTest.sha256_empty_hash)
def test_hash_leaves(self):
hasher = merkle.TreeHasher()
for leaf, val in TreeHasherTest.sha256_leaves:
self.assertEqual(hasher.hash_leaf(leaf.decode("hex")).encode("hex"),
val)
def test_hash_children(self):
hasher = merkle.TreeHasher()
for left, right, val in TreeHasherTest.sha256_nodes:
self.assertEqual(hasher.hash_children(
left.decode("hex"), right.decode("hex")).encode("hex"), val)
def test_hash_full_invalid_index(self):
hasher = merkle.TreeHasher()
self.assertRaises(IndexError, hasher._hash_full, "abcd", -5, -1)
self.assertRaises(IndexError, hasher._hash_full, "abcd", -1, 1)
self.assertRaises(IndexError, hasher._hash_full, "abcd", 1, 5)
self.assertRaises(IndexError, hasher._hash_full, "abcd", 2, 1)
def test_hash_full_empty(self):
hasher = merkle.TreeHasher()
for i in xrange(0, 5):
self.assertEqual(hasher._hash_full("abcd", i, i)[0].encode("hex"),
TreeHasherTest.sha256_empty_hash)
def test_hash_full_tree(self):
hasher = merkle.TreeHasher()
self.assertEqual(hasher.hash_full_tree([]), hasher.hash_empty())
l = iter(hasher.hash_leaf(c) for c in "abcde").next
h = hasher.hash_children
root_hash = h(h(h(l(), l()), h(l(), l())), l())
self.assertEqual(hasher.hash_full_tree("abcde"), root_hash)
def test_hash_full_tree_test_vector(self):
hasher = merkle.TreeHasher()
for i in xrange(len(TreeHasherTest.test_vector_leaves)):
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
expected_hash = TreeHasherTest.test_vector_hashes[i].decode("hex")
self.assertEqual(hasher.hash_full_tree(test_vector), expected_hash)
class HexTreeHasher(merkle.TreeHasher):
def __init__(self, hashfunc=hashlib.sha256):
self.hasher = merkle.TreeHasher(hashfunc)
def hash_empty(self):
return self.hasher.hash_empty().encode("hex")
def hash_leaf(self, data):
return self.hasher.hash_leaf(data.decode("hex")).encode("hex")
def hash_children(self, left, right):
return self.hasher.hash_children(left.decode("hex"),
right.decode("hex")).encode("hex")
class CompactMerkleTreeTest(unittest.TestCase):
def setUp(self):
self.tree = merkle.CompactMerkleTree(HexTreeHasher())
def test_extend_from_empty(self):
for i in xrange(len(TreeHasherTest.test_vector_leaves)):
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
expected_hash = TreeHasherTest.test_vector_hashes[i]
self.tree = merkle.CompactMerkleTree()
self.tree.extend(test_vector)
self.assertEqual(self.tree.root_hash().encode("hex"), expected_hash)
def test_push_subtree_1(self):
for i in xrange(len(TreeHasherTest.test_vector_leaves)):
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
self.tree = merkle.CompactMerkleTree()
self.tree.extend(test_vector)
self.tree._push_subtree(["test leaf"])
self.assertEqual(len(self.tree), len(test_vector) + 1)
def test_extend_from_partial(self):
z = len(TreeHasherTest.test_vector_leaves)
for i in xrange(z):
self.tree = merkle.CompactMerkleTree()
# add up to i
test_vector = TreeHasherTest.test_vector_leaves[:i+1]
expected_hash = TreeHasherTest.test_vector_hashes[i]
self.tree.extend(test_vector)
self.assertEqual(self.tree.root_hash().encode("hex"), expected_hash)
# add up to z
test_vector = TreeHasherTest.test_vector_leaves[i+1:]
expected_hash = TreeHasherTest.test_vector_hashes[z-1]
self.tree.extend(test_vector)
self.assertEqual(self.tree.root_hash().encode("hex"), expected_hash)
class MerkleVerifierTest(unittest.TestCase):
# (old_tree_size, new_tree_size, old_root, new_root, proof)
# Test vectors lifted from the C++ branch.
sha256_proofs = [
(1, 1,
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
[]),
(1, 8,
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
["96a296d224f285c67bee93c30f8a309157f0daa35dc5b87e410b78630a09cfc7",
"5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e",
"6b47aaf29ee3c2af9af889bc1fb9254dabd31177f16232dd6aab035ca39bf6e4"]),
(6, 8,
"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
["0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]),
(2, 5,
"fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125",
"4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4",
["5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e",
"bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"])
]
# Data for leaf inclusion proof test
sha256_audit_path = [
"1a208aeebcd1b39fe2de247ee8db9454e1e93a312d206b87f6ca9cc6ec6f1ddd",
"0a1b78b383f580856f433c01a5741e160d451c185910027f6cc9f828687a40c4",
"3d1745789bc63f2da15850de1c12a5bf46ed81e1cc90f086148b1662e79aab3d",
"9095b61e14d8990acf390905621e62b1714fb8e399fbb71de5510e0aef45affe",
"0a332b91b8fab564e6afd1dd452449e04619b18accc0ff9aa8393cd4928451f2",
"2336f0181d264aed6d8f3a6507ca14a8d3b3c3a23791ac263e845d208c1ee330",
"b4ce56e300590500360c146c6452edbede25d4ed83919278749ee5dbe178e048",
"933f6ddc848ea562e4f9c5cfb5f176941301dad0c6fdb9d1fbbe34fac1be6966",
"b95a6222958a86f74c030be27c44f57dbe313e5e7c7f4ffb98bcbd3a03bb52f2",
"daeeb3ce5923defd0faeb8e0c210b753b85b809445d7d3d3cd537a9aabaa9c45",
"7fadd0a13e9138a2aa6c3fdec4e2275af233b94812784f66bcca9aa8e989f2bc",
"1864e6ba3e32878610546539734fb5eeae2529991f130c575c73a7e25a2a7c56",
"12842d1202b1dc6828a17ab253c02e7ce9409b5192430feba44189f39cc02d66",
"29af64b16fa3053c13d02ac63aa75b23aa468506e44c3a2315edc85d2dc22b11",
"b527b99934a0bd9edd154e449b0502e2c499bba783f3bc3dfe23364b6b532009",
"4584db8ae8e351ace08e01f306378a92bfd43611714814f3d834a2842d69faa8",
"86a9a41573b0d6e4292f01e93243d6cc65b30f06606fc6fa57390e7e90ed580f",
"a88b98fbe84d4c6aae8db9d1605dfac059d9f03fe0fcb0d5dff1295dacba09e6",
"06326dc617a6d1f7021dc536026dbfd5fffc6f7c5531d48ef6ccd1ed1569f2a1",
"f41fe8fdc3a2e4e8345e30216e7ebecffee26ff266eeced208a6c2a3cf08f960",
"40cf5bde8abb76983f3e98ba97aa36240402975674e120f234b3448911090f8d",
"b3222dc8658538079883d980d7fdc2bef9285344ea34338968f736b04aeb387a"]
raw_hex_leaf = (
"00000000013de9d2b29b000000055b308205573082043fa00302010202072b777b56df"
"7bc5300d06092a864886f70d01010505003081ca310b30090603550406130255533110"
"300e060355040813074172697a6f6e61311330110603550407130a53636f7474736461"
"6c65311a3018060355040a1311476f44616464792e636f6d2c20496e632e3133303106"
"0355040b132a687474703a2f2f6365727469666963617465732e676f64616464792e63"
"6f6d2f7265706f7369746f72793130302e06035504031327476f204461646479205365"
"637572652043657274696669636174696f6e20417574686f726974793111300f060355"
"040513083037393639323837301e170d3133303131343038353035305a170d31353031"
"31343038353035305a305331163014060355040a130d7777772e69646e65742e6e6574"
"3121301f060355040b1318446f6d61696e20436f6e74726f6c2056616c696461746564"
"311630140603550403130d7777772e69646e65742e6e657430820122300d06092a8648"
"86f70d01010105000382010f003082010a0282010100d4e4a4b1bbc981c9b8166f0737"
"c113000aa5370b21ad86a831a379de929db258f056ba0681c50211552b249a02ec00c5"
"37e014805a5b5f4d09c84fdcdfc49310f4a9f9004245d119ce5461bc5c42fd99694b88"
"388e035e333ac77a24762d2a97ea15622459cc4adcd37474a11c7cff6239f810120f85"
"e014d2066a3592be604b310055e84a74c91c6f401cb7f78bdb45636fb0b1516b04c5ee"
"7b3fa1507865ff885d2ace21cbb28fdaa464efaa1d5faab1c65e4c46d2139175448f54"
"b5da5aea956719de836ac69cd3a74ca049557cee96f5e09e07ba7e7b4ebf9bf167f4c3"
"bf8039a4cab4bec068c899e997bca58672bd7686b5c85ea24841e48c46f76830390203"
"010001a38201b6308201b2300f0603551d130101ff04053003010100301d0603551d25"
"0416301406082b0601050507030106082b06010505070302300e0603551d0f0101ff04"
"04030205a030330603551d1f042c302a3028a026a0248622687474703a2f2f63726c2e"
"676f64616464792e636f6d2f676473312d38332e63726c30530603551d20044c304a30"
"48060b6086480186fd6d010717013039303706082b06010505070201162b687474703a"
"2f2f6365727469666963617465732e676f64616464792e636f6d2f7265706f7369746f"
"72792f30818006082b0601050507010104743072302406082b06010505073001861868"
"7474703a2f2f6f6373702e676f64616464792e636f6d2f304a06082b06010505073002"
"863e687474703a2f2f6365727469666963617465732e676f64616464792e636f6d2f72"
"65706f7369746f72792f67645f696e7465726d6564696174652e637274301f0603551d"
"23041830168014fdac6132936c45d6e2ee855f9abae7769968cce730230603551d1104"
"1c301a820d7777772e69646e65742e6e6574820969646e65742e6e6574301d0603551d"
"0e041604144d3ae8a87ddcf046764021b87e7d8d39ddd18ea0300d06092a864886f70d"
"01010505000382010100ad651b199f340f043732a71178c0af48e22877b9e5d99a70f5"
"d78537c31d6516e19669aa6bfdb8b2cc7a145ba7d77b35101f9519e03b58e692732314"
"1383c3ab45dc219bd5a584a2b6333b6e1bbef5f76e89b3c187ef1d3b853b4910e895a4"
"57dbe7627e759f56c8484c30b22a74fb00f7b1d7c41533a1fd176cd2a2b06076acd7ca"
"ddc6ca6d0c2a815f9eb3ef0d03d27e7eebd7824c78fdb51679c03278cfbb2d85ae65a4"
"7485cb733fc1c7407834f7471ababd68f140983817c6f388b2f2e2bfe9e26608f9924f"
"16473462d136427d1f2801e4b870b078c20ec4ba21e22ab32a00b76522d523825bcabb"
"8c7b6142d624be8d2af69ecc36fb5689572a0f59c00000")
leaf_hash = (
"7a395c866d5ecdb0cccb623e011dbc392cd348d1d1d72776174e127a24b09c78")
leaf_index = 848049
tree_size = 3630887
expected_root_hash = (
"78316a05c9bcf14a3a4548f5b854a9adfcd46a4c034401b3ce7eb7ac2f1d0ecb")
def setUp(self):
self.verifier = merkle.MerkleVerifier(HexTreeHasher())
self.STH = namedtuple("STH", ["sha256_root_hash", "tree_size"])
self.ones = "11" * 32
self.zeros = "00" * 32
def test_verify_tree_consistency(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
for test_vector in MerkleVerifierTest.sha256_proofs:
self.assertTrue(verifier.verify_tree_consistency(*test_vector))
def test_verify_tree_consistency_always_accepts_empty_tree(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
# Give some bogus proof too; it should be ignored.
self.assertTrue(verifier.verify_tree_consistency(
0, 1,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
["6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"]
))
def test_verify_tree_consistency_for_equal_tree_sizes(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
# Equal tree sizes and hashes, and a bogus proof that should be ignored.
self.assertTrue(verifier.verify_tree_consistency(
3, 3,
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
["6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"]
))
# Equal tree sizes but different hashes.
self.assertRaises(
error.ConsistencyError, verifier.verify_tree_consistency, 3, 3,
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01e",
"6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d",
[])
def test_verify_tree_consistency_newer_tree_is_smaller(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
self.assertRaises(
ValueError, verifier.verify_tree_consistency, 5, 2,
"4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4",
"fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125",
["5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e",
"bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"]
)
def test_verify_tree_consistency_proof_too_short(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
self.assertRaises(
error.ProofError, verifier.verify_tree_consistency, 6, 8,
"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
["0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0"]
)
def test_verify_tree_consistency_bad_second_hash(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
# A bit has been flipped in the second hash.
self.assertRaises(
error.ProofError, verifier.verify_tree_consistency, 6, 8,
"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef",
"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604329",
["0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]
)
def test_verify_tree_consistency_both_hashes_bad(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
# A bit has been flipped in both hashes.
self.assertRaises(
error.ProofError, verifier.verify_tree_consistency, 6, 8,
"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ee",
"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604329",
["0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]
)
def test_verify_tree_consistency_bad_first_hash(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
# A bit has been flipped in the first hash.
self.assertRaises(
error.ConsistencyError, verifier.verify_tree_consistency, 6, 8,
"76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ee",
"5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328",
["0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a",
"ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0",
"d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"]
)
def test_calculate_root_hash_good_proof(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
self.assertEqual(
verifier._calculate_root_hash_from_audit_path(
self.leaf_hash, self.leaf_index, self.sha256_audit_path[:],
self.tree_size),
self.expected_root_hash)
def test_calculate_root_too_short_proof(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
leaf_index = self.leaf_index + int(
math.pow(2, len(self.sha256_audit_path) + 1))
self.assertRaises(
error.ProofError,
verifier._calculate_root_hash_from_audit_path,
self.leaf_hash, leaf_index, self.sha256_audit_path[:],
self.tree_size)
def test_verify_leaf_inclusion_good_proof(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
sth = self.STH(self.expected_root_hash, self.tree_size)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.raw_hex_leaf, self.leaf_index, self.sha256_audit_path,
sth))
def test_verify_leaf_inclusion_bad_proof(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
# Expect this test to fail by providing an incorrect root hash.
sth = self.STH(self.zeros, self.tree_size)
self.assertRaises(
error.ProofError, verifier.verify_leaf_inclusion,
self.raw_hex_leaf, self.leaf_index, self.sha256_audit_path, sth)
def test_verify_leaf_inclusion_incorrect_length_proof(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
sth = self.STH(self.zeros, 4)
# Too long a proof
self.assertRaises(
error.ProofError, verifier.verify_leaf_inclusion,
self.ones, 0, [self.zeros, self.zeros, self.zeros], sth)
# Too short a proof
self.assertRaises(
error.ProofError, verifier.verify_leaf_inclusion,
self.ones, 0, [self.zeros], sth)
def test_verify_leaf_inclusion_single_node_in_tree(self):
# If there is only one entry in the tree, the tree root hash should be
# equal to the leaf hash.
verifier = merkle.MerkleVerifier(HexTreeHasher())
sth = self.STH(self.leaf_hash, 1)
self.assertTrue(
verifier.verify_leaf_inclusion(self.raw_hex_leaf, 0, [], sth))
def test_verify_leaf_inclusion_rightmost_node_in_tree(self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most node: In a tree of 8 nodes, ask for inclusion
# proof check for leaf 7.
verifier = merkle.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_c3 = hh.hash_children(self.zeros, h_s1)
h_c2 = hh.hash_children(self.zeros, h_c3)
h_root = hh.hash_children(self.zeros, h_c2)
sth = self.STH(h_root, 8)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.ones, 7, [self.zeros, self.zeros, self.zeros], sth))
def test_verify_leaf_inclusion_rightmost_node_in_unbalanced_odd_tree(
self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most, even-indexed node: In a tree of 5 nodes, ask for
# inclusion proof check for leaf 4 (the 5th).
verifier = merkle.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_root = hh.hash_children(self.zeros, h_s1)
sth = self.STH(h_root, 5)
self.assertTrue(
verifier.verify_leaf_inclusion(self.ones, 4, [self.zeros, ], sth))
def test_verify_leaf_inclusion_rightmost_node_in_unbalanced_tree_odd_node(
self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most, odd-indexed node: In a tree of 6 nodes, ask for
# inclusion proof check for leaf 5 (the 6th).
verifier = merkle.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_l2 = hh.hash_children(self.zeros, h_s1)
h_root = hh.hash_children(self.zeros, h_l2)
sth = self.STH(h_root, 6)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.ones, 5, [self.zeros, self.zeros], sth))
def test_verify_leaf_inclusion_rightmost_node_in_unbalanced_even_tree(
self):
# Show that verify_leaf_inclusion works when required to check a proof
# for the right-most, odd-indexed node: In a tree of 6 nodes, ask for
# inclusion proof check for leaf 4 (the 5th).
verifier = merkle.MerkleVerifier(HexTreeHasher())
hh = HexTreeHasher()
h_s1 = hh.hash_leaf(self.ones)
h_l2 = hh.hash_children(h_s1, self.zeros)
h_root = hh.hash_children(self.zeros, h_l2)
sth = self.STH(h_root, 6)
self.assertTrue(
verifier.verify_leaf_inclusion(
self.ones, 4, [self.zeros, self.zeros], sth))
def test_verify_leaf_inclusion_throws_on_bad_indices(self):
verifier = merkle.MerkleVerifier(HexTreeHasher())
sth = self.STH("", 6)
self.assertRaises(ValueError,
verifier.verify_leaf_inclusion, "", -3, [], sth)
negative_sth = self.STH("", -3)
self.assertRaises(ValueError,
verifier.verify_leaf_inclusion, "", 3, [], negative_sth)
def test_verify_leaf_inclusion_all_nodes_all_tree_sizes_up_to_4(self):
leaves = ["aa", "bb", "cc", "dd"]
hh = HexTreeHasher()
leaf_hashes = [hh.hash_leaf(l) for l in leaves]
hc = hh.hash_children
proofs_per_tree_size = {
1: [[] ],
2: [[leaf_hashes[1]], [leaf_hashes[0]]],
3: [[leaf_hashes[1], leaf_hashes[2]], # leaf 0
[leaf_hashes[0], leaf_hashes[2]], # leaf 1
[hc(leaf_hashes[0], leaf_hashes[1])]], # leaf 2
4: [[leaf_hashes[1], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 0
[leaf_hashes[0], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 1
[leaf_hashes[3], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 2
[leaf_hashes[2], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 3
]
}
tree = merkle.CompactMerkleTree(hasher=HexTreeHasher())
verifier = merkle.MerkleVerifier(HexTreeHasher())
# Increase the tree by one leaf each time
for i in range(4):
tree.append(leaves[i])
tree_size = i + 1
# ... and check inclusion proof validates for each node
# of the tree
for j in range(tree_size):
proof = proofs_per_tree_size[tree_size][j]
sth = self.STH(tree.root_hash(), tree_size)
self.assertTrue(
verifier.verify_leaf_inclusion(
leaves[j], j, proof, sth))
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
import multiprocessing
#import concurrent.futures
import logging
import weakref
import functools
import claripy
import sys
from ..sim_state import SimState
l = logging.getLogger("angr.surveyor")
#
# Surveyor debugging
#
STOP_RUNS = False
PAUSE_RUNS = False
def enable_singlestep():
global PAUSE_RUNS
PAUSE_RUNS = True
def disable_singlestep():
global PAUSE_RUNS
PAUSE_RUNS = False
def stop_analyses():
global STOP_RUNS
STOP_RUNS = True
def resume_analyses():
global STOP_RUNS
STOP_RUNS = False
import signal
def handler(signum, frame): # pylint: disable=W0613,
if signum == signal.SIGUSR1:
stop_analyses()
elif signum == signal.SIGUSR2:
enable_singlestep()
try:
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGUSR2, handler)
except AttributeError:
l.warning("Platform doesn't support SIGUSR")
# function that produces unpredictable results that should appease pylint's
# static analysis and stop giving us those awful errors!!!!
def dummy_func(*args, **kwargs):
return args + list(kwargs)
#
# Surveyor list
#
class Surveyors(object):
def __init__(self, project):
self._project = project
self.started = [ ]
self.Explorer = dummy_func
self.Caller = dummy_func
self.Escaper = dummy_func
for surveyor_name,surveyor in all_surveyors.items():
setattr(self, surveyor_name, functools.partial(self._start_surveyor, surveyor))
def _surveyor_finished(self, proxy):
self.started.remove(proxy)
def _start_surveyor(self, surveyor, *args, **kwargs):
"""
Calls a surveyor and adds result to the .started list. See
the arguments for the specific surveyor for its documentation.
"""
s = surveyor(self._project, *args, **kwargs)
self.started.append(weakref.proxy(s, self._surveyor_finished))
return s
def __getstate__(self):
return self._project
def __setstate__(self, s):
self.__init__(s)
class Surveyor(object):
"""
The surveyor class eases the implementation of symbolic analyses. This
provides a base upon which analyses can be implemented.
Surveyors provide at least the following members:
:ivar active: The paths that are still active in the analysis.
:ivar deadended: The paths that are still active in the analysis.
:ivar spilled: The paths that are still active in the analysis.
:ivar errored: The paths that have at least one error-state exit.
:ivar pruned: The paths that were pruned because their ancestors were unsat.
:ivar unconstrained: The paths that have a successor with an unconstrained instruction pointer.
A Surveryor has the following overloadable properties:
:ivar done: returns True if the analysis is done (by default, this is when self.active is empty).
:ivar run: runs a loop of tick()ing and spill()ing until self.done is True.
:ivar tick: ticks all paths forward. The default implementation calls tick_path() on every path.
A Surveyor has the following overloadable functions :
:func:`tick_path` moves a provided path forward, returning a set of new paths.
:func:`spill` spills all paths, in-place. The default implementation first calls :func:`spill_path` on every
path, then :func:`spill_paths` on the resulting sequence, then keeps the rest.
:func:`spill_path` returns a spilled sequence of paths from a provided sequence of paths.
An analysis can overload either the specific sub-portions of surveyor
(i.e, the tick_path and spill_path functions) or bigger and bigger pieces
to implement more and more customizeable analyses.
"""
# TODO: what about errored? It's a problem cause those paths are duplicates, and could cause confusion...
path_lists = ['active', 'deadended', 'spilled', 'errored', 'unconstrained', 'suspended', 'pruned' ]
def __init__(self, project, start=None, max_active=None, max_concurrency=None, pickle_paths=None,
save_deadends=None, enable_veritesting=False, veritesting_options=None, keep_pruned=None):
"""
Creates the Surveyor.
:param project: the angr.Project to analyze.
:param start: a path (or set of paths) to start the analysis from
:param max_active: the maximum number of paths to explore at a time
:param max_concurrency: the maximum number of worker threads
:param pickle_paths: pickle spilled paths to save memory
:param save_deadends: save deadended paths
:param enable_veritesting: use static symbolic execution to speed up exploration
:param veritesting_options: special options to be passed to Veritesting
:param keep_pruned: keep pruned unsat states
"""
self._project = project
self._max_concurrency = 1 if max_concurrency is None else max_concurrency
self._max_active = multiprocessing.cpu_count() if max_active is None else max_active
self._pickle_paths = False if pickle_paths is None else pickle_paths
self._save_deadends = True if save_deadends is None else save_deadends
self._keep_pruned = False if keep_pruned is None else keep_pruned
self._enable_veritesting = enable_veritesting
self._veritesting_options = { } if veritesting_options is None else veritesting_options
# the paths
self.active = []
self.deadended = []
self.spilled = []
self.errored = []
self.pruned = []
self.suspended = []
self.unconstrained = []
self.split_paths = {}
self._current_step = 0
self._hierarchy = StateHierarchy()
if isinstance(start, SimState):
self.active.append(start)
elif isinstance(start, (tuple, list, set)):
self.active.extend(start)
elif start is None:
self.active.append(self._project.factory.entry_state())
else:
raise AngrError('invalid "start" argument')
#
# Quick list access
#
@property
def _a(self):
return self.active[0]
@property
def _d(self):
return self.deadended[0]
@property
def _spl(self):
return self.spilled[0]
@property
def _e(self):
return self.errored[0]
#
# Overall analysis.
#
def pre_tick(self):
"""
Provided for analyses to use for pre-tick actions.
"""
pass
def post_tick(self):
"""
Provided for analyses to use for pre-tick actions.
"""
pass
def step(self):
"""
Takes one step in the analysis (called by run()).
"""
self.pre_tick()
self.tick()
#self.filter()
self.spill()
self.post_tick()
self._current_step += 1
l.debug("After iteration: %s", self)
return self
def run(self, n=None):
"""
Runs the analysis through completion (until done() returns True) or, if n is provided, n times.
:param n: the maximum number of ticks
:returns: itself for chaining
"""
global STOP_RUNS, PAUSE_RUNS # pylint: disable=W0602,
# We do a round of filtering first
self.active = self.filter_paths(self.active)
while not self.done and (n is None or n > 0):
self.step()
if STOP_RUNS:
l.warning("%s stopping due to STOP_RUNS being set.", self)
l.warning("... please call resume_analyses() and then this.run() if you want to resume execution.")
break
if PAUSE_RUNS:
l.warning("%s pausing due to PAUSE_RUNS being set.", self)
l.warning("... please call disable_singlestep() before continuing if you don't want to single-step.")
try:
import ipdb as pdb # pylint: disable=F0401,
except ImportError:
import pdb
pdb.set_trace()
if n is not None:
n -= 1
return self
@property
def done(self):
"""
True if the analysis is done.
"""
return len(self.active) == 0
#
# Utility functions.
#
def __repr__(self):
return "%d active, %d spilled, %d deadended, %d errored, %d unconstrained" % (
len(self.active), len(self.spilled), len(self.deadended), len(self.errored), len(self.unconstrained))
#
# Analysis progression
#
def tick(self):
"""
Takes one step in the analysis. Typically, this moves all active paths forward.
:return: itself, for chaining
"""
new_active = []
#with concurrent.futures.ThreadPoolExecutor(max_workers=self._max_concurrency) as executor:
# future_to_path = {executor.submit(self.safe_tick_path, p): p for p in self.active}
# for future in concurrent.futures.as_completed(future_to_path):
# p = future_to_path[future]
# successors = future.result()
for state in self.active:
#if state.errored:
# if isinstance(state.error, PathUnreachableError):
# if self._keep_pruned:
# self.pruned.append(state)
# else:
# self._hierarchy.unreachable_state(state)
# self._hierarchy.simplify()
# self.errored.append(state)
# continue
try:
all_successors = self._step_path(state)
except (SimUnsatError, claripy.UnsatError, PathUnreachableError):
if self._keep_pruned:
self.pruned.append(state)
self._hierarchy.unreachable_state(state)
self._hierarchy.simplify()
continue
except (AngrError, SimError, claripy.ClaripyError) as e:
self.errored.append(ErrorRecord(state, e, sys.exc_info()[2]))
continue
except (TypeError, ValueError, ArithmeticError, MemoryError) as e:
self.errored.append(ErrorRecord(state, e, sys.exc_info()[2]))
continue
if not all_successors.flat_successors and not all_successors.unconstrained_successors:
l.debug("State %s has deadended.", state)
self.suspend_path(state)
self.deadended.append(state)
else:
if self._enable_veritesting: # and len(p.successors) > 1:
# Try to use Veritesting!
if hasattr(self, '_find') and hasattr(self, '_avoid'):
# pylint: disable=no-member
boundaries = [ ]
if self._find is not None:
boundaries.extend(list(self._find))
if self._avoid is not None:
boundaries.extend(list(self._avoid))
veritesting = self._project.analyses.Veritesting(state,
boundaries=boundaries,
**self._veritesting_options)
else:
veritesting = self._project.analyses.Veritesting(state,
**self._veritesting_options)
if veritesting.result and veritesting.final_manager:
pg = veritesting.final_manager
self.deadended.extend(pg.deadended)
self.errored.extend(pg.errored)
succ_list = pg.successful + pg.deviated
for suc in succ_list:
l.info('Veritesting yields a new IP: 0x%x', suc.addr)
successors = self._tick_path(state, successors=succ_list)
else:
successors = self.tick_path(state, successors=all_successors.flat_successors)
else:
successors = self.tick_path(state, successors=all_successors.flat_successors)
new_active.extend(successors)
if len(all_successors.unconstrained_successors) > 0:
self.unconstrained.append(state)
self.active = new_active
return self
def _step_path(self, state): #pylint:disable=no-self-use
return self._project.factory.successors(state)
def _tick_path(self, state, successors=None):
if successors is None:
successors = self._step_path(state).flat_successors
elif type(successors) not in (list, tuple, set):
successors = successors.flat_successors
l.debug("Ticking state %s", state)
for s in successors:
self._hierarchy.add_state(s)
self._hierarchy.simplify()
l.debug("... state %s has produced %d successors.", state, len(successors))
l.debug("... addresses: %s", ["%#x" % s.addr for s in successors])
filtered_successors = self.filter_paths(successors)
l.debug("Remaining: %d successors out of %d", len(filtered_successors), len(successors))
# track the path ID for visualization
# TODO: what on earth do we do about this
#if len(filtered_successors) == 1:
# filtered_successors[0].path_id = p.path_id
#else:
# self.split_paths[p.path_id] = [sp.path_id for sp in filtered_successors]
return filtered_successors
def tick_path(self, state, **kwargs):
"""
Ticks a single state forward. Returns a SimSuccessors object.
"""
return self._tick_path(state, **kwargs)
def prune(self):
"""
Prune unsat paths.
"""
for state in list(self.active):
if not state.satisfiable():
self._hierarchy.unreachable_state(state)
self._hierarchy.simplify()
self.active.remove(state)
self.pruned.append(state)
for state in list(self.spilled):
if not state.satisfiable():
self._hierarchy.unreachable_state(state)
self._hierarchy.simplify()
self.spilled.remove(state)
self.pruned.append(state)
###
### Path termination.
###
def filter_path(self, state): # pylint: disable=W0613,R0201
"""
Returns True if the given path should be kept in the analysis, False
otherwise.
"""
return True
def filter_paths(self, states):
"""
Given a list of paths, returns filters them and returns the rest.
"""
return [state for state in states if self.filter_path(state)]
#def filter(self):
# """
# Filters the active paths, in-place.
# """
# old_active = self.active[ :: ]
# l.debug("before filter: %d paths", len(self.active))
# self.active = self.filter_paths(self.active)
# l.debug("after filter: %d paths", len(self.active))
# for a in old_active:
# if a not in self.active:
# self.deadended.append(a)
###
### State explosion control (spilling paths).
###
def path_comparator(self, a, b): # pylint: disable=W0613,R0201
"""
This function should compare paths a and b, to determine which should
have a higher priority in the analysis. It's used as the cmp argument
to sort.
"""
return 0
def prioritize_paths(self, paths):
"""
This function is called to sort a list of paths, to prioritize
the analysis of paths. Should return a list of paths, with higher-
priority paths first.
"""
paths.sort(cmp=self.path_comparator)
return paths
def spill_paths(self, active, spilled): # pylint: disable=R0201
"""
Called with the currently active and spilled paths to spill some
paths. Should return the new active and spilled paths.
"""
l.debug("spill_paths received %d active and %d spilled paths.", len(active), len(spilled))
prioritized = self.prioritize_paths(active + spilled)
new_active = prioritized[:self._max_active]
new_spilled = prioritized[self._max_active:]
l.debug("... %d active and %d spilled paths.", len(new_active), len(new_spilled))
return new_active, new_spilled
def spill(self):
"""
Spills/unspills paths, in-place.
"""
new_active, new_spilled = self.spill_paths(self.active, self.spilled)
num_suspended = 0
num_resumed = 0
for p in new_active:
if p in self.spilled:
num_resumed += 1
#p.resume(self._project)
for p in new_spilled:
if p in self.active:
num_suspended += 1
self.suspend_path(p)
l.debug("resumed %d and suspended %d", num_resumed, num_suspended)
self.active, self.spilled = new_active, new_spilled
def suspend_path(self, state): #pylint:disable=no-self-use
"""
Suspends and returns a state.
:param state: the state
:returns: the state
"""
# TODO: Path doesn't provide suspend() now. What should we replace it with?
# p.suspend(do_pickle=self._pickle_paths)
# TODO: that todo was from... at least 3 or 4 refactors ago, what is this supposed to do
state.downsize()
return state
from ..errors import AngrError, PathUnreachableError, SimUnsatError, SimError
from ..state_hierarchy import StateHierarchy
from . import all_surveyors
from ..manager import ErrorRecord
|
|
"""Unit test for lvm - Linux Volume Manager
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
import mock
import treadmill
from treadmill import lvm
class LVMTest(unittest.TestCase):
"""Tests for teadmill.fs."""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_pvcreate(self):
"""Test LVM Physical Volume creation"""
lvm.pvcreate('some_blockdev')
treadmill.subproc.check_call.assert_called_with(
[
'lvm', 'pvcreate',
'--force',
'--yes',
'some_blockdev',
]
)
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_pvremove(self):
"""Test LVM Physical Volume removal"""
lvm.pvremove('some_blockdev')
treadmill.subproc.check_call.assert_called_with(
[
'lvm', 'pvremove',
'--force',
'some_blockdev',
]
)
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_vgcreate(self):
"""Test LVM Volume Group creation"""
lvm.vgcreate('some_group', 'some_blockdev')
treadmill.subproc.check_call.assert_called_with(
[
'lvm', 'vgcreate',
'--autobackup', 'n',
'some_group',
'some_blockdev',
]
)
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_vgremove(self):
"""Test LVM Volume Group deletion"""
lvm.vgremove('some_group')
treadmill.subproc.check_call.assert_called_with(
[
'lvm', 'vgremove',
'--force',
'some_group',
]
)
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_vgactivate(self):
"""Test LVM Volume Group activation"""
lvm.vgactivate('some_group')
treadmill.subproc.check_call.assert_called_with(
[
'lvm', 'vgchange',
'--activate', 'y',
'some_group',
]
)
@mock.patch('treadmill.subproc.check_output', mock.Mock())
def test_vgdisplay(self):
"""Test display of LVM group information.
"""
treadmill.subproc.check_output.return_value = (
' test:r/w:772:-1:0:0:0:-1:0:1:1:98304:4096:'
'24:0:24:Vsj4xA-45Ad-v4Rp-VOOf-XzEf-Gxwr-erL7Zu\n'
)
vg = lvm.vgdisplay('test')
treadmill.subproc.check_output.assert_called_with(
[
'lvm',
'vgdisplay',
'--colon',
'test',
]
)
self.assertEqual(
vg,
{
'access': 'r/w',
'extent_alloc': 0,
'extent_free': 24,
'extent_nb': 24,
'extent_size': 4096,
'lv_cur': 0,
'lv_max': 0,
'lv_open_count': 0,
'max_size': -1,
'name': 'test',
'number': -1,
'pv_actual': 1,
'pv_cur': 1,
'pv_max': 0,
'size': 98304,
'status': '772',
'uuid': 'Vsj4xA-45Ad-v4Rp-VOOf-XzEf-Gxwr-erL7Zu',
},
)
@mock.patch('treadmill.subproc.check_output', mock.Mock())
def test_vgsdisplay(self):
"""Test display of list of LVM groups informations.
"""
treadmill.subproc.check_output.return_value = (
' test:r/w:772:-1:0:0:0:-1:0:1:1:98304:4096:'
'24:0:24:Vsj4xA-45Ad-v4Rp-VOOf-XzEf-Gxwr-erL7Zu\n'
' treadmill:r/w:772:-1:0:5:5:-1:0:1:1:35467264:4096:'
'8659:1711:6948:MXvxzQ-gnXF-BXia-1pVo-KOH1-aJ4m-pIfnY8\n'
)
vgs = lvm.vgsdisplay()
treadmill.subproc.check_output.assert_called_with(
[
'lvm',
'vgdisplay',
'--colon',
]
)
self.assertEqual(
vgs,
[
{
'access': 'r/w',
'extent_alloc': 0,
'extent_free': 24,
'extent_nb': 24,
'extent_size': 4096,
'lv_cur': 0,
'lv_max': 0,
'lv_open_count': 0,
'max_size': -1,
'name': 'test',
'number': -1,
'pv_actual': 1,
'pv_cur': 1,
'pv_max': 0,
'size': 98304,
'status': '772',
'uuid': 'Vsj4xA-45Ad-v4Rp-VOOf-XzEf-Gxwr-erL7Zu',
},
{
'access': 'r/w',
'extent_alloc': 1711,
'extent_free': 6948,
'extent_nb': 8659,
'extent_size': 4096,
'lv_cur': 5,
'lv_max': 0,
'lv_open_count': 5,
'max_size': -1,
'name': 'treadmill',
'number': -1,
'pv_actual': 1,
'pv_cur': 1,
'pv_max': 0,
'size': 35467264,
'status': '772',
'uuid': 'MXvxzQ-gnXF-BXia-1pVo-KOH1-aJ4m-pIfnY8',
},
]
)
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_lvcreate(self):
"""Test LVM Logical Volume creation.
"""
lvm.lvcreate('some_volume', '1024', 'some_group')
treadmill.subproc.check_call.assert_called_with(
[
'lvm', 'lvcreate',
'--autobackup', 'n',
'--wipesignatures', 'y',
'--size', '1024B',
'--name', 'some_volume',
'some_group',
]
)
@mock.patch('treadmill.subproc.check_call', mock.Mock())
def test_lvremove(self):
"""Test LVM Logical Volume deletion.
"""
lvm.lvremove('some_volume', 'some_group')
treadmill.subproc.check_call.assert_called_with(
[
'lvm', 'lvremove',
'--autobackup', 'n',
'--force',
'some_group/some_volume',
]
)
@mock.patch('treadmill.subproc.check_output', mock.Mock())
def test_lvdisplay(self):
"""Test display of LVM volume information.
"""
treadmill.subproc.check_output.return_value = (
' /dev/test/test-lv:test:3:1:-1:0:24576:'
'3:-1:0:-1:253:5\n'
)
lv = lvm.lvdisplay('test-lv', 'test')
treadmill.subproc.check_output.assert_called_with(
[
'lvm',
'lvdisplay',
'--colon',
'test/test-lv',
]
)
self.assertEqual(
lv,
{
'block_dev': '/dev/test/test-lv',
'dev_major': 253,
'dev_minor': 5,
'extent_alloc': -1,
'extent_size': 3,
'group': 'test',
'name': 'test-lv',
'open_count': 0,
},
)
@mock.patch('treadmill.subproc.check_output', mock.Mock())
def test_lvsdisplay(self):
"""Test display of list of LVM volumes informations.
"""
treadmill.subproc.check_output.return_value = (
' /dev/test/test-lv:test:3:1:-1:0:24576:'
'3:-1:0:-1:253:5\n'
' /dev/treadmill/oRHxZN5QldMdz:treadmill:3:1:-1:1:10485760:'
'1280:-1:0:-1:253:0\n'
' /dev/treadmill/ESE0g3hyf7nxv:treadmill:3:1:-1:1:2097152:'
'256:-1:0:-1:253:1\n'
' /dev/treadmill/p8my37oRJGcd5:treadmill:3:1:-1:1:204800:'
'25:-1:0:-1:253:2\n'
' /dev/treadmill/njZhRefmf6jQp:treadmill:3:1:-1:1:1024000:'
'125:-1:0:-1:253:3\n'
' /dev/treadmill/yRImNK9cnix2T:treadmill:3:1:-1:1:204800:'
'25:-1:0:-1:253:4\n'
)
lvs = lvm.lvsdisplay()
treadmill.subproc.check_output.assert_called_with(
[
'lvm',
'lvdisplay',
'--colon',
]
)
self.assertEqual(
lvs,
[
{
'block_dev': '/dev/test/test-lv',
'dev_major': 253,
'dev_minor': 5,
'extent_alloc': -1,
'extent_size': 3,
'group': 'test',
'name': 'test-lv',
'open_count': 0,
},
{
'block_dev': '/dev/treadmill/oRHxZN5QldMdz',
'dev_major': 253,
'dev_minor': 0,
'extent_alloc': -1,
'extent_size': 1280,
'group': 'treadmill',
'name': 'oRHxZN5QldMdz',
'open_count': 1,
},
{
'block_dev': '/dev/treadmill/ESE0g3hyf7nxv',
'dev_major': 253,
'dev_minor': 1,
'extent_alloc': -1,
'extent_size': 256,
'group': 'treadmill',
'name': 'ESE0g3hyf7nxv',
'open_count': 1,
},
{
'block_dev': '/dev/treadmill/p8my37oRJGcd5',
'dev_major': 253,
'dev_minor': 2,
'extent_alloc': -1,
'extent_size': 25,
'group': 'treadmill',
'name': 'p8my37oRJGcd5',
'open_count': 1,
},
{
'block_dev': '/dev/treadmill/njZhRefmf6jQp',
'dev_major': 253,
'dev_minor': 3,
'extent_alloc': -1,
'extent_size': 125,
'group': 'treadmill',
'name': 'njZhRefmf6jQp',
'open_count': 1,
},
{
'block_dev': '/dev/treadmill/yRImNK9cnix2T',
'dev_major': 253,
'dev_minor': 4,
'extent_alloc': -1,
'extent_size': 25,
'group': 'treadmill',
'name': 'yRImNK9cnix2T',
'open_count': 1,
},
]
)
if __name__ == '__main__':
unittest.main()
|
|
"""
NOTE: for now this is just intended to be run as python -m tests from the
compiler directory (parent directory of this file).
"""
import logging
import os
import select
import subprocess
import unittest
class CompilerTestBase(unittest.TestCase):
"""Contains the code that actually runs tests and reports their results.
"""
# TODO: make this a command line argument
maxDiff = None
@classmethod
def setUpClass(cls):
# Static Configuration
# TODO: Move static configuration to a config file.
cls.verbose = False
cls.compiler_name = "cradle"
cls.paths_to_test = ["./", "../"]
# End static configuration
cls.compiler_path = cls.get_compiler_path(cls.compiler_name)
# Command that is used to run the compiler.
cls.run_compiler_string = "{}".format(cls.compiler_path)
# This is the command and arguments that will build the compiler
cls.file_extension = "pas"
build_compiler_commands = ["fpc", "{}.{}".format(
cls.compiler_path,
cls.file_extension)]
if cls is CompilerTestBase:
cls._build_compiler()
def potential_paths(cls, potential_paths):
for path in potential_paths:
yield "{}{}".format(path, cls.compiler_name)
@classmethod
def get_compiler_path(cls, compiler_name):
"""
# TODO: change into a class attribute function.
"""
# Currently only supporting calling from test directory and project
# directory.
for compiler_path in cls.potential_paths(cls, cls.paths_to_test):
if os.path.isfile(compiler_path):
return compiler_path
raise RuntimeError("No compiler could be found to test. Either the"
"compiler paths are incorrectly configured or the compiler name"
"is incorrectly configured.")
@classmethod
def _build_compiler(cls):
"""Runs the compliation step before running any tests. If this fails,
abort the tests.
Subprocess handling code from
https://stackoverflow.com/questions/18273962/python-subprocess-call-hangs
# TODO: COMPILE USING THE PROJECT MAKEFILE!!!
# No, really, that needs to be the next workflow improvement.
"""
logger = logging.getLogger(__name__)
is_running = lambda: compile_process.poll() is None
compile_process = subprocess.Popen(
cls.build_compiler_commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
# Grab all the output from stdout and stderr and log it
while is_running():
rlist, wlist, xlist = select.select(
[compile_process.stdout, compile_process.stderr], [], [], 1)
# Log stdout, but don't spam the log
if compile_process.stdout in rlist and cls.verbose:
# Adjust the number of bytes read however you like, 1024 seems to work
# pretty well for me.
logger.debug(compile_process.stdout.read(1024))
# Log stderr, always
if compile_process.stderr in rlist:
# Same as with stdout, adjust the bytes read as needed.
logger.error(compile_process.stderr.read(1024))
def clean_formatting(self, raw_text):
"""Simple string cleaner that ignores all prior whitespace and left
hand side whitespace.
"""
raw_text = raw_text.decode("utf-8")
# Remove all whitespace leading up to actual text.
raw_text = raw_text.strip()
split_text = raw_text.split("\n")
cleaned_text = [line.lstrip() for line in split_text]
return "\n".join(cleaned_text)
def run_test(self, test_program, expected_assembly):
sub_process = subprocess.Popen(
[self.run_compiler_string],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
test_program = bytes(test_program, "utf-8")
except AttributeError:
# Already a string, do nothing.
pass
actual_assembly = sub_process.communicate(test_program)
actual_assembly = self.clean_formatting(actual_assembly[0])
expected_assembly = self.clean_formatting(expected_assembly)
if self.verbose:
# TODO: switch to logging statements.
print("------- actual assembly ----------")
print(actual_assembly)
print("------- expected assembly ----------")
print(expected_assembly)
print("------- end ----------")
self.assertEqual(actual_assembly, expected_assembly)
class TestInitialization(CompilerTestBase):
def test_single_literal(self):
test_program = "1"
expected_assembly = b"""
.text
.globl _main
_main:
subq $8, %rsp
movq $1, %rax
movq $0, %rdi
call _exit
"""
self.run_test(test_program, expected_assembly)
def test_blank_program(self):
test_program = ""
expected_assembly = b"""
.text
.globl _main
_main:
subq $8, %rsp
movq $0, %rdi
call _exit
"""
self.run_test(test_program, expected_assembly)
class TestArithmetic(CompilerTestBase):
division_assembly = b"""
.text
.globl _main
_main:
subq $8, %rsp
movq $2, %rax
push %rax
movq $2, %rax
movq %rax, %rbx
pop %rax
xor %rdx, %rdx
div %rbx
movq $0, %rdi
call _exit
"""
multiplication_assembly = b"""
.text
.globl _main
_main:
subq $8, %rsp
movq $1, %rax
push %rax
movq $2, %rax
pop %rbx
mul %rbx
movq $0, %rdi
call _exit
"""
addition_assembly = b"""
.text
.globl _main
_main:
subq $8, %rsp
movq $1, %rax
push %rax
movq $2, %rax
pop %rbx
add %rax, %rbx
movq $0, %rdi
call _exit
"""
subtraction_assembly = b"""
.text
.globl _main
_main:
subq $8, %rsp
movq $1, %rax
push %rax
movq $2, %rax
pop %rbx
sub %rax, %rbx
movq $0, %rdi
call _exit
"""
def test_addition_no_space(self):
test_program = "1+2"
self.run_test(
test_program,
expected_assembly=self.addition_assembly)
@unittest.skip("Spaces not included yet.")
def test_addition_spaces(self):
test_program = "1 + 2"
self.run_test(
test_program,
expected_assembly=self.addition_assembly)
@unittest.skip("Errors not included yet.")
def test_addition_missing_left_literal(self):
test_program = "+2"
expected_assembly = b"Error: Addop Expected"
self.run_test(test_program, expected_assembly)
@unittest.skip("Errors not included yet.")
def test_addition_missing_right_literal(self):
test_program = "1+"
expected_assembly = b"Error: Addop Expected"
self.run_test(test_program, expected_assembly)
def test_subtraction_no_space(self):
test_program = "1-2"
self.run_test(
test_program,
expected_assembly=self.subtraction_assembly)
@unittest.skip("Spaces not included yet.")
def test_subtraction_spaces(self):
test_program = "1 - 2"
self.run_test(
test_program,
expected_assembly=self.subtraction_assembly)
@unittest.skip("Errors not included yet.")
def test_subtraction_missing_right_literal(self):
test_program = "1-"
expected_assembly = b"Error: Subop Expected"
self.run_test(test_program, expected_assembly)
def test_multiplciation_no_spaces(self):
test_program = "1*2"
self.run_test(
test_program,
expected_assembly=self.multiplication_assembly)
@unittest.skip("Spaces not included yet.")
def test_multiplciation_spaces(self):
test_program = "1 * 2"
self.run_test(
test_program,
expected_assembly=self.multiplication_assembly)
@unittest.skip("Errors not included yet.")
def test_invalid_mul_operation(self):
test_program = "1*"
expected_assembly = b"Error: Mulop Expected"
self.run_test(test_program, expected_assembly)
@unittest.skip("Spaces not included yet.")
def test_division_spaces(self):
test_program = "2 / 2"
self.run_test(
test_program,
expected_assembly=self.division_assembly)
def test_division_no_spaces(self):
test_program = "2/2"
self.run_test(
test_program,
expected_assembly=self.division_assembly)
@unittest.skip("Errors not included yet.")
def test_invalid_div_operation(self):
test_program = "1/"
expected_assembly = b"Error: Divop Expected"
self.run_test(test_program, expected_assembly)
class TestParenthese(CompilerTestBase):
pre_boiler_plate = b"""
.text
.globl _main
_main:
subq $8, %rsp""".lstrip()
post_boiler_plate = b"""
movq $0, %rdi
call _exit""".lstrip()
def add_boiler_plate(self, assembly):
boilered_assembly = self.pre_boiler_plate
boilered_assembly += assembly
boilered_assembly += self.post_boiler_plate
return boilered_assembly
def constant_assembly(self, integer):
constant_assembly = b"""\n movq $1, %rax
"""
constant_assembly = self.add_boiler_plate(constant_assembly)
return constant_assembly
def addition_assembly(self, left_int, right_int):
addition_string = """
movq ${}, %rax
push %rax
movq ${}, %rax
pop %rbx
add %rax, %rbx
""".format(left_int, right_int)
addition_assembly = bytes(addition_string, "utf-8")
return addition_assembly
def test_parens_no_expression(self):
test_program = "()"
expected_assembly = self.pre_boiler_plate
expected_assembly += b"\n"
expected_assembly += self.post_boiler_plate
self.run_test(test_program, expected_assembly)
def test_parens_signle_digit(self):
test_program = "(1)"
expected_assembly = self.constant_assembly(1)
self.run_test(test_program, expected_assembly)
def test_parens_sigle_expression(self):
test_program = "(1+2)"
expected_assembly = self.addition_assembly(1, 2)
expected_assembly = self.add_boiler_plate(expected_assembly)
self.run_test(test_program, expected_assembly)
def test_parens_nested_expression(self):
test_program = "(1(2+(3)))"
expected_assembly = self.addition_assembly(2, 3)
expected_assembly += self.addition_assembly(1, 5)
expected_assembly = self.add_boiler_plate(expected_assembly)
self.run_test(test_program, expected_assembly)
def test_parens_broad_expressions(self):
test_program = "(1+(2+3))+4"
expected_assembly = self.addition_assembly(2, 3)
expected_assembly += self.addition_assembly(1, 5)
expected_assembly += self.addition_assembly(6, 5)
expected_assembly = self.add_boiler_plate(expected_assembly)
self.run_test(test_program, expected_assembly)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
|
|
from django.contrib.auth import get_user_model
from rest_framework import status
from datetime import datetime, timedelta
from time import sleep
from coupons.tests.base import BasicTest
class CouponRedeemTests(BasicTest):
def setUp(self):
u = get_user_model()
u.objects.create_superuser('admin', '[email protected]', self.PW)
self.user = u.objects.create_user('user', '[email protected]', self.PW)
def test_cant_redeem_expired(self):
"""
Verify that if a coupon is expired, it can't be redeemed.
"""
future = datetime.utcnow() + timedelta(seconds=5)
coupon = {
'code': 'ASDF',
'type': 'percent',
'expires': str(future),
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
self.logout()
# sleep until it's expired.
sleep(5)
self.login(username='admin')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.logout()
def test_cant_redeem_wrong_user(self):
"""
Verify that you can't redeem a coupon that is bound to another user.
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
'bound': True,
'user': self.user.id,
'repeat': 1,
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
self.logout()
coupon['code_l'] = coupon['code'].lower()
self.verify_built(coupon, response.data)
self.login(username='admin')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.logout()
def test_can_redeem_nonbound(self):
"""
Verify that you can redeem a coupon that isn't bound to a specific user.
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
self.logout()
self.login(username='admin')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
def test_can_redeem_bound_to_you(self):
"""
Verify that you can redeem a bound coupon if it's bound to you.
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
'bound': True,
'user': self.user.id,
'repeat': 1,
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
self.logout()
coupon['code_l'] = coupon['code'].lower()
self.verify_built(coupon, response.data)
self.login(username='user')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
def test_cant_redeem_beyond_repeat(self):
"""
Verify you can't redeem a coupon more than allowed.
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
'repeat': 2,
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.logout()
def test_cant_redeem_beyond_repeat_singleuse(self):
"""
Verify you can't redeem a coupon more than allowed. No huge difference for this, but just in case.
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
'repeat': 1,
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.logout()
def test_cant_redeem_beyond_repeat_multiple_users(self):
"""
Verify that it only takes into account your claims and not other users.
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
'repeat': 1,
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
self.logout()
self.login(username='admin')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
self.login(username='user')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
self.login(username='user')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.logout()
def test_can_redeem_repeat_infinite(self):
"""
Verify that it does support repeat being 0.
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
'repeat': 0,
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
self.logout()
self.login(username='admin')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
self.login(username='user')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
self.login(username='user')
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
def test_can_redeem_beyond_repeat_singleuse_after_coupon_updated(self):
"""
Verify if the coupon is updated, you can claim it more if they increase the count. :)
"""
coupon = {
'code': 'ASDF',
'type': 'percent',
'repeat': 1,
}
with self.settings(ROOT_URLCONF='coupons.urls'):
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
coupon_id = response.data['id']
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get('/coupon/%s' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
coupon = response.data
coupon['repeat'] = 2
del coupon['created']
del coupon['updated']
del coupon['id']
del coupon['expires']
response = self.client.put('/coupon/%s' % coupon_id, coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.put('/coupon/%s/redeem' % coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.logout()
|
|
import pytest
from tests.utils import async
import io
import os
import json
import base64
import hashlib
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.core.provider import build_url
from waterbutler.providers.github import GitHubProvider
from waterbutler.providers.github import settings as github_settings
from waterbutler.providers.github.metadata import GitHubRevision
from waterbutler.providers.github.metadata import GitHubFileTreeMetadata
from waterbutler.providers.github.metadata import GitHubFolderTreeMetadata
from waterbutler.providers.github.metadata import GitHubFileContentMetadata
from waterbutler.providers.github.metadata import GitHubFolderContentMetadata
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': '[email protected]',
}
@pytest.fixture
def credentials():
return {'token': 'naps'}
@pytest.fixture
def settings():
return {
'owner': 'cat',
'repo': 'food',
}
@pytest.fixture
def file_content():
return b'hungry'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def upload_response():
return {
"content": {
"name": "hello.txt",
"path": "notes/hello.txt",
"sha": "95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"size": 9,
"url": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"html_url": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt",
"git_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"type": "file",
"_links": {
"self": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"git": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"html": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt"
}
},
"commit": {
"sha": "7638417db6d59f3c431d3e1f261cc637155684cd",
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/7638417db6d59f3c431d3e1f261cc637155684cd",
"html_url": "https://github.com/octocat/Hello-World/git/commit/7638417db6d59f3c431d3e1f261cc637155684cd",
"author": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "[email protected]"
},
"committer": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "[email protected]"
},
"message": "my commit message",
"tree": {
"url": "https://api.github.com/repos/octocat/Hello-World/git/trees/691272480426f78a0138979dd3ce63b77f706feb",
"sha": "691272480426f78a0138979dd3ce63b77f706feb"
},
"parents": [
{
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"html_url": "https://github.com/octocat/Hello-World/git/commit/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"sha": "1acc419d4d6a9ce985db7be48c6349a0475975b5"
}
]
}
}
@pytest.fixture
def create_folder_response():
return {
"content": {
"name": ".gitkeep",
"path": "i/like/trains/.gitkeep",
"sha": "95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"size": 9,
"url": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"html_url": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt",
"git_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"type": "file",
"_links": {
"self": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"git": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"html": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt"
}
},
"commit": {
"sha": "7638417db6d59f3c431d3e1f261cc637155684cd",
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/7638417db6d59f3c431d3e1f261cc637155684cd",
"html_url": "https://github.com/octocat/Hello-World/git/commit/7638417db6d59f3c431d3e1f261cc637155684cd",
"author": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "[email protected]"
},
"committer": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "[email protected]"
},
"message": "my commit message",
"tree": {
"url": "https://api.github.com/repos/octocat/Hello-World/git/trees/691272480426f78a0138979dd3ce63b77f706feb",
"sha": "691272480426f78a0138979dd3ce63b77f706feb"
},
"parents": [
{
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"html_url": "https://github.com/octocat/Hello-World/git/commit/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"sha": "1acc419d4d6a9ce985db7be48c6349a0475975b5"
}
]
}
}
@pytest.fixture
def repo_metadata():
return {
'full_name': 'octocat/Hello-World',
'permissions': {
'push': False,
'admin': False,
'pull': True
},
'has_downloads': True,
'notifications_url': 'https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}',
'releases_url': 'https://api.github.com/repos/octocat/Hello-World/releases{/id}',
'downloads_url': 'https://api.github.com/repos/octocat/Hello-World/downloads',
'merges_url': 'https://api.github.com/repos/octocat/Hello-World/merges',
'owner': {
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'type': 'User',
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'url': 'https://api.github.com/users/octocat',
'html_url': 'https://github.com/octocat',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'site_admin': False,
'gravatar_id': '',
'repos_url': 'https://api.github.com/users/octocat/repos',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'id': 583231,
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'login': 'octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers'
},
'html_url': 'https://github.com/octocat/Hello-World',
'comments_url': 'https://api.github.com/repos/octocat/Hello-World/comments{/number}',
'git_url': 'git://github.com/octocat/Hello-World.git',
'ssh_url': '[email protected]:octocat/Hello-World.git',
'language': None,
'pulls_url': 'https://api.github.com/repos/octocat/Hello-World/pulls{/number}',
'subscribers_count': 1850,
'forks_count': 1085,
'watchers_count': 1407,
'id': 1296269,
'keys_url': 'https://api.github.com/repos/octocat/Hello-World/keys{/key_id}',
'default_branch': 'master',
'stargazers_count': 1407,
'tags_url': 'https://api.github.com/repos/octocat/Hello-World/tags',
'clone_url': 'https://github.com/octocat/Hello-World.git',
'homepage': '',
'forks_url': 'https://api.github.com/repos/octocat/Hello-World/forks',
'branches_url': 'https://api.github.com/repos/octocat/Hello-World/branches{/branch}',
'url': 'https://api.github.com/repos/octocat/Hello-World',
'contents_url': 'https://api.github.com/repos/octocat/Hello-World/contents/{+path}',
'hooks_url': 'https://api.github.com/repos/octocat/Hello-World/hooks',
'git_tags_url': 'https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}',
'statuses_url': 'https://api.github.com/repos/octocat/Hello-World/statuses/{sha}',
'trees_url': 'https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}',
'contributors_url': 'https://api.github.com/repos/octocat/Hello-World/contributors',
'open_issues': 126,
'has_pages': False,
'pushed_at': '2014-06-11T21:51:23Z',
'network_count': 1085,
'commits_url': 'https://api.github.com/repos/octocat/Hello-World/commits{/sha}',
'git_commits_url': 'https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}',
'svn_url': 'https://github.com/octocat/Hello-World',
'forks': 1085,
'fork': False,
'subscription_url': 'https://api.github.com/repos/octocat/Hello-World/subscription',
'archive_url': 'https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}',
'subscribers_url': 'https://api.github.com/repos/octocat/Hello-World/subscribers',
'description': 'This your first repo!',
'blobs_url': 'https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}',
'teams_url': 'https://api.github.com/repos/octocat/Hello-World/teams',
'compare_url': 'https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}',
'issues_url': 'https://api.github.com/repos/octocat/Hello-World/issues{/number}',
'stargazers_url': 'https://api.github.com/repos/octocat/Hello-World/stargazers',
'private': False,
'created_at': '2011-01-26T19:01:12Z',
'issue_comment_url': 'https://api.github.com/repos/octocat/Hello-World/issues/comments/{number}',
'has_issues': True,
'milestones_url': 'https://api.github.com/repos/octocat/Hello-World/milestones{/number}',
'issue_events_url': 'https://api.github.com/repos/octocat/Hello-World/issues/events{/number}',
'languages_url': 'https://api.github.com/repos/octocat/Hello-World/languages',
'name': 'Hello-World',
'mirror_url': None,
'has_wiki': True,
'updated_at': '2014-12-12T16:45:49Z',
'watchers': 1407,
'open_issues_count': 126,
'labels_url': 'https://api.github.com/repos/octocat/Hello-World/labels{/name}',
'collaborators_url': 'https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}',
'assignees_url': 'https://api.github.com/repos/octocat/Hello-World/assignees{/user}',
'size': 558,
'git_refs_url': 'https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}',
'events_url': 'https://api.github.com/repos/octocat/Hello-World/events'
}
@pytest.fixture
def branch_metadata():
return {
'commit': {
'html_url': 'https://github.com/octocat/Hello-World/commit/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'committer': {
'html_url': 'https://github.com/octocat',
'login': 'octocat',
'type': 'User',
'gravatar_id': '',
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'id': 583231,
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'url': 'https://api.github.com/users/octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers',
'repos_url': 'https://api.github.com/users/octocat/repos',
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'site_admin': False
},
'parents': [{
'html_url': 'https://github.com/octocat/Hello-World/commit/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e',
'sha': '553c2077f0edc3d5dc5d17262f6aa498e69d6f8e'
}, {
'html_url': 'https://github.com/octocat/Hello-World/commit/762941318ee16e59dabbacb1b4049eec22f0d303',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/762941318ee16e59dabbacb1b4049eec22f0d303',
'sha': '762941318ee16e59dabbacb1b4049eec22f0d303'
}],
'sha': '7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'author': {
'html_url': 'https://github.com/octocat',
'login': 'octocat',
'type': 'User',
'gravatar_id': '',
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'id': 583231,
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'url': 'https://api.github.com/users/octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers',
'repos_url': 'https://api.github.com/users/octocat/repos',
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'site_admin': False
},
'comments_url': 'https://api.github.com/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d/comments',
'commit': {
'url': 'https://api.github.com/repos/octocat/Hello-World/git/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'message': 'Merge pull request #6 from Spaceghost/patch-1\n\nNew line at end of file.',
'committer': {
'email': '[email protected]',
'date': '2012-03-06T23:06:50Z',
'name': 'The Octocat'
},
'tree': {
'url': 'https://api.github.com/repos/octocat/Hello-World/git/trees/b4eecafa9be2f2006ce1b709d6857b07069b4608',
'sha': 'b4eecafa9be2f2006ce1b709d6857b07069b4608'
},
'comment_count': 51,
'author': {
'email': '[email protected]',
'date': '2012-03-06T23:06:50Z',
'name': 'The Octocat'
}
}
},
'_links': {
'html': 'https://github.com/octocat/Hello-World/tree/master',
'self': 'https://api.github.com/repos/octocat/Hello-World/branches/master'
},
'name': 'master'
}
@pytest.fixture
def content_repo_metadata_root():
return [
{
'path': 'file.txt',
'type': 'file',
'html_url': 'https://github.com/icereval/test/blob/master/file.txt',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'url': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'self': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html': 'https://github.com/icereval/test/blob/master/file.txt'
},
'name': 'file.txt',
'size': 0,
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/file.txt'
}, {
'path': 'level1',
'type': 'dir',
'html_url': 'https://github.com/icereval/test/tree/master/level1',
'git_url': 'https://api.github.com/repos/icereval/test/git/trees/bc1087ebfe8354a684bf9f8b75517784143dde86',
'url': 'https://api.github.com/repos/icereval/test/contents/level1?ref=master',
'sha': 'bc1087ebfe8354a684bf9f8b75517784143dde86',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/trees/bc1087ebfe8354a684bf9f8b75517784143dde86',
'self': 'https://api.github.com/repos/icereval/test/contents/level1?ref=master',
'html': 'https://github.com/icereval/test/tree/master/level1'
},
'name': 'level1',
'size': 0,
'download_url': None
}, {
'path': 'test.rst',
'type': 'file',
'html_url': 'https://github.com/icereval/test/blob/master/test.rst',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'url': 'https://api.github.com/repos/icereval/test/contents/test.rst?ref=master',
'sha': 'ca39bcbf849231525ce9e775935fcb18ed477b5a',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'self': 'https://api.github.com/repos/icereval/test/contents/test.rst?ref=master',
'html': 'https://github.com/icereval/test/blob/master/test.rst'
},
'name': 'test.rst',
'size': 190,
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/test.rst'
}
]
@pytest.fixture
def repo_tree_metadata_root():
return {
'tree': [
{
'url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'size': 0,
'type': 'blob',
'path': 'file.txt',
'mode': '100644',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391'
},
{
'type': 'tree',
'url': 'https://api.github.com/repos/icereval/test/git/trees/05353097666f449344b7f69036c70a52dc504088',
'path': 'level1',
'mode': '040000',
'sha': '05353097666f449344b7f69036c70a52dc504088'
},
{
'url': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'size': 190,
'type': 'blob',
'path': 'test.rst',
'mode': '100644',
'sha': 'ca39bcbf849231525ce9e775935fcb18ed477b5a'
}
],
'url': 'https://api.github.com/repos/icereval/test/git/trees/cd83e4a08261a54f1c4630fbb1de34d1e48f0c8a',
'truncated': False,
'sha': 'cd83e4a08261a54f1c4630fbb1de34d1e48f0c8a'
}
@pytest.fixture
def content_repo_metadata_root_file_txt():
return {
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'self': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html': 'https://github.com/icereval/test/blob/master/file.txt'
},
'content': '',
'url': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html_url': 'https://github.com/icereval/test/blob/master/file.txt',
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/file.txt',
'name': 'file.txt',
'type': 'file',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'encoding': 'base64',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'path': 'file.txt',
'size': 0
}
@pytest.fixture
def provider(auth, credentials, settings, repo_metadata):
provider = GitHubProvider(auth, credentials, settings)
provider._repo = repo_metadata
provider.default_branch = repo_metadata['default_branch']
return provider
class TestHelpers:
def test_build_repo_url(self, provider, settings):
expected = provider.build_url('repos', settings['owner'], settings['repo'], 'contents')
assert provider.build_repo_url('contents') == expected
def test_committer(self, auth, provider):
expected = {
'name': auth['name'],
'email': auth['email'],
}
assert provider.committer == expected
class TestValidatePath:
@async
def test_validate_path(self, provider):
path = yield from provider.validate_path('/this/is/my/path')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, None)
@async
def test_validate_path_passes_branch(self, provider):
path = yield from provider.validate_path('/this/is/my/path', branch='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
@async
def test_validate_path_passes_ref(self, provider):
path = yield from provider.validate_path('/this/is/my/path', ref='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
@async
def test_validate_path_passes_file_sha(self, provider):
path = yield from provider.validate_path('/this/is/my/path', fileSha='Thisisasha')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, 'Thisisasha')
class TestCRUD:
# @async
# @pytest.mark.aiohttpretty
# def test_download_by_file_sha(self, provider, content_repo_metadata_root_file_txt):
# ref = hashlib.sha1().hexdigest()
# url = provider.build_repo_url('git', 'refs', 'heads', 'master')
# path = WaterButlerPath('/file.txt', _ids=(None, ('master', ref)))
# aiohttpretty.register_uri('GET', url, body=b'delicious')
# aiohttpretty.register_json_uri('GET', url, body={'object': {'sha': ref}})
# result = yield from provider.download(path)
# content = yield from result.read()
# assert content == b'delicious'
@async
@pytest.mark.aiohttpretty
def test_download_by_path(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = yield from provider.validate_path('/file.txt')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
latest_sha_url = provider.build_repo_url('git', 'refs', 'heads', path.identifier[0])
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', latest_sha_url, body={'object': {'sha': ref}})
result = yield from provider.download(path)
content = yield from result.read()
assert content == b'delicious'
@async
@pytest.mark.aiohttpretty
def test_download_by_path_ref_branch(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = yield from provider.validate_path('/file.txt', branch='other_branch')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
latest_sha_url = provider.build_repo_url('git', 'refs', 'heads', path.identifier[0])
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', latest_sha_url, body={'object': {'sha': ref}})
result = yield from provider.download(path)
content = yield from result.read()
assert content == b'delicious'
# @async
# @pytest.mark.aiohttpretty
# def test_download_bad_status(self, provider):
# ref = hashlib.sha1().hexdigest()
# url = provider.build_repo_url('git', 'blobs', ref)
# aiohttpretty.register_uri('GET', url, body=b'delicious', status=418)
# with pytest.raises(exceptions.DownloadError):
# yield from provider.download('', fileSha=ref)
# @async
# @pytest.mark.aiohttpretty
# def test_upload_create(self, provider, upload_response, file_content, file_stream):
# message = 'so hungry'
# path = upload_response['content']['path'][::-1]
# metadata_url = provider.build_repo_url('contents', os.path.dirname(path))
# aiohttpretty.register_json_uri('GET', metadata_url, body=[upload_response['content']], status=200)
# upload_url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('PUT', upload_url, body=upload_response, status=201)
# yield from provider.upload(file_stream, path, message)
# expected_data = {
# 'path': path,
# 'message': message,
# 'content': base64.b64encode(file_content).decode('utf-8'),
# 'committer': provider.committer,
# }
# assert aiohttpretty.has_call(method='GET', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=upload_url, data=json.dumps(expected_data))
#
# @async
# @pytest.mark.aiohttpretty
# def test_upload_update(self, provider, upload_response, file_content, file_stream):
# message = 'so hungry'
# sha = upload_response['content']['sha']
# path = '/' + upload_response['content']['path']
#
# upload_url = provider.build_repo_url('contents', provider.build_path(path))
# metadata_url = provider.build_repo_url('contents', os.path.dirname(path))
#
# aiohttpretty.register_json_uri('PUT', upload_url, body=upload_response)
# aiohttpretty.register_json_uri('GET', metadata_url, body=[upload_response['content']])
#
# yield from provider.upload(file_stream, path, message)
#
# expected_data = {
# 'path': path,
# 'message': message,
# 'content': base64.b64encode(file_content).decode('utf-8'),
# 'committer': provider.committer,
# 'sha': sha,
# }
#
# assert aiohttpretty.has_call(method='GET', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=upload_url, data=json.dumps(expected_data))
# @async
# @pytest.mark.aiohttpretty
# def test_delete_with_branch(self, provider, repo_contents):
# path = os.path.join('/', repo_contents[0]['path'])
# sha = repo_contents[0]['sha']
# branch = 'master'
# message = 'deleted'
# url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('DELETE', url)
# yield from provider.delete(path, message, sha, branch=branch)
# expected_data = {
# 'message': message,
# 'sha': sha,
# 'committer': provider.committer,
# 'branch': branch,
# }
#
# assert aiohttpretty.has_call(method='DELETE', uri=url, data=json.dumps(expected_data))
#
# @async
# @pytest.mark.aiohttpretty
# def test_delete_without_branch(self, provider, repo_contents):
# path = repo_contents[0]['path']
# sha = repo_contents[0]['sha']
# message = 'deleted'
# url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('DELETE', url)
# yield from provider.delete(path, message, sha)
# expected_data = {
# 'message': message,
# 'sha': sha,
# 'committer': provider.committer,
# }
#
# assert aiohttpretty.has_call(method='DELETE', uri=url, data=json.dumps(expected_data))
class TestMetadata:
@async
@pytest.mark.aiohttpretty
def test_metadata_file(self, provider, repo_metadata, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
path = yield from provider.validate_path('/file.txt')
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
latest_sha_url = provider.build_repo_url('git', 'refs', 'heads', path.identifier[0])
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', latest_sha_url, body={'object': {'sha': ref}})
result = yield from provider.metadata(path)
item = repo_tree_metadata_root['tree'][0]
web_view = provider._web_view(path=path)
assert result == GitHubFileTreeMetadata(item, web_view=web_view)
# TODO: Additional Tests
# def test_metadata_root_file_txt_branch(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# def test_metadata_root_file_txt_commit_sha(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
@async
@pytest.mark.aiohttpretty
def test_metadata_folder_root(self, provider, repo_metadata, content_repo_metadata_root):
path = yield from provider.validate_path('/')
url = provider.build_repo_url('contents', path.path, ref=provider.default_branch)
aiohttpretty.register_json_uri('GET', url, body=content_repo_metadata_root)
result = yield from provider.metadata(path)
ret = []
for item in content_repo_metadata_root:
if item['type'] == 'dir':
ret.append(GitHubFolderContentMetadata(item))
else:
ret.append(GitHubFileContentMetadata(item, web_view=item['html_url']))
assert result == ret
# TODO: Additional Tests
# def test_metadata_non_root_folder(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# def test_metadata_non_root_folder_branch(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# def test_metadata_non_root_folder_commit_sha(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
class TestCreateFolder:
@async
@pytest.mark.aiohttpretty
def test_errors_out(self, provider, repo_metadata):
path = yield from provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', path.child('.gitkeep').path)
aiohttpretty.register_uri('PUT', url, status=400)
with pytest.raises(exceptions.CreateFolderError) as e:
yield from provider.create_folder(path)
assert e.value.code == 400
@async
@pytest.mark.aiohttpretty
def test_must_be_folder(self, provider, repo_metadata):
path = yield from provider.validate_path('/Imarealboy')
with pytest.raises(exceptions.CreateFolderError) as e:
yield from provider.create_folder(path)
@async
@pytest.mark.aiohttpretty
def test_already_exists(self, provider, repo_metadata):
path = yield from provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'Invalid request.\n\n"sha" wasn\'t supplied.'
})
with pytest.raises(exceptions.FolderNamingConflict) as e:
yield from provider.create_folder(path)
assert e.value.code == 409
assert e.value.message == 'Cannot create folder "Imarealboy" because a file or folder already exists at path "/Imarealboy/"'
@async
@pytest.mark.aiohttpretty
def test_raises_other_422(self, provider, repo_metadata):
path = yield from provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'github no likey'
})
with pytest.raises(exceptions.CreateFolderError) as e:
yield from provider.create_folder(path)
assert e.value.code == 422
assert e.value.data == {'message': 'github no likey'}
@async
@pytest.mark.aiohttpretty
def test_returns_metadata(self, provider, repo_metadata, create_folder_response):
path = yield from provider.validate_path('/i/like/trains/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=201, body=create_folder_response)
metadata = yield from provider.create_folder(path)
assert metadata.kind == 'folder'
assert metadata.name == 'trains'
assert metadata.path == '/i/like/trains/'
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import glob
import os
import sys
import time
import unittest
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir',
default='gen-py',
help='include this local directory in sys.path for locating generated code')
parser.add_option("--port", type="int", dest="port",
help="connect to server at port")
parser.add_option("--host", type="string", dest="host",
help="connect to server")
parser.add_option("--zlib", action="store_true", dest="zlib",
help="use zlib wrapper for compressed transport")
parser.add_option("--ssl", action="store_true", dest="ssl",
help="use SSL for encrypted transport")
parser.add_option("--http", dest="http_path",
help="Use the HTTP transport with the specified path")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.add_option('--protocol', dest="proto", type="string",
help="protocol to use, one of: accel, binary, compact, json")
parser.add_option('--transport', dest="trans", type="string",
help="transport to use, one of: buffered, framed")
parser.set_defaults(framed=False, http_path=None, verbose=1, host='localhost', port=9090, proto='binary')
options, args = parser.parse_args()
script_dir = os.path.abspath(os.path.dirname(__file__))
lib_dir = os.path.join(os.path.dirname(os.path.dirname(script_dir)), 'lib', 'py', 'build', 'lib.*')
sys.path.insert(0, os.path.join(script_dir, options.genpydir))
sys.path.insert(0, glob.glob(lib_dir)[0])
from ThriftTest import ThriftTest, SecondService
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.transport import TZlibTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import TJSONProtocol
class AbstractTest(unittest.TestCase):
def setUp(self):
if options.http_path:
self.transport = THttpClient.THttpClient(options.host, port=options.port, path=options.http_path)
else:
if options.ssl:
from thrift.transport import TSSLSocket
socket = TSSLSocket.TSSLSocket(options.host, options.port, validate=False)
else:
socket = TSocket.TSocket(options.host, options.port)
# frame or buffer depending upon args
self.transport = TTransport.TBufferedTransport(socket)
if options.trans == 'framed':
self.transport = TTransport.TFramedTransport(socket)
elif options.trans == 'buffered':
self.transport = TTransport.TBufferedTransport(socket)
elif options.trans == '':
raise AssertionError('Unknown --transport option: %s' % options.trans)
if options.zlib:
self.transport = TZlibTransport.TZlibTransport(self.transport, 9)
self.transport.open()
protocol = self.protocol_factory.getProtocol(self.transport)
self.client = ThriftTest.Client(protocol)
def tearDown(self):
# Close!
self.transport.close()
def testVoid(self):
print('testVoid')
self.client.testVoid()
def testString(self):
print('testString')
self.assertEqual(self.client.testString('Python' * 20), 'Python' * 20)
self.assertEqual(self.client.testString(''), '')
def testBool(self):
print('testBool')
self.assertEqual(self.client.testBool(True), True)
self.assertEqual(self.client.testBool(False), False)
def testByte(self):
print('testByte')
self.assertEqual(self.client.testByte(63), 63)
self.assertEqual(self.client.testByte(-127), -127)
def testI32(self):
print('testI32')
self.assertEqual(self.client.testI32(-1), -1)
self.assertEqual(self.client.testI32(0), 0)
def testI64(self):
print('testI64')
self.assertEqual(self.client.testI64(1), 1)
self.assertEqual(self.client.testI64(-34359738368), -34359738368)
def testDouble(self):
print('testDouble')
self.assertEqual(self.client.testDouble(-5.235098235), -5.235098235)
self.assertEqual(self.client.testDouble(0), 0)
self.assertEqual(self.client.testDouble(-1), -1)
def testBinary(self):
if isinstance(self, JSONTest):
self.skipTest('JSON protocol does not handle binary correctly.')
print('testBinary')
val = bytearray([i for i in range(0, 256)])
self.assertEqual(bytearray(self.client.testBinary(bytes(val))), val)
def testStruct(self):
print('testStruct')
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = self.client.testStruct(x)
self.assertEqual(y, x)
def testNest(self):
print('testNest')
inner = Xtruct(string_thing="Zero", byte_thing=1, i32_thing=-3, i64_thing=-5)
x = Xtruct2(struct_thing=inner, byte_thing=0, i32_thing=0)
y = self.client.testNest(x)
self.assertEqual(y, x)
def testMap(self):
print('testMap')
x = {0:1, 1:2, 2:3, 3:4, -1:-2}
y = self.client.testMap(x)
self.assertEqual(y, x)
def testSet(self):
print('testSet')
x = set([8, 1, 42])
y = self.client.testSet(x)
self.assertEqual(y, x)
def testList(self):
print('testList')
x = [1, 4, 9, -42]
y = self.client.testList(x)
self.assertEqual(y, x)
def testEnum(self):
print('testEnum')
x = Numberz.FIVE
y = self.client.testEnum(x)
self.assertEqual(y, x)
def testTypedef(self):
print('testTypedef')
x = 0xffffffffffffff # 7 bytes of 0xff
y = self.client.testTypedef(x)
self.assertEqual(y, x)
@unittest.skip('Cannot use dict as dict key')
def testMapMap(self):
print('testMapMap')
# does not work: dict() is not a hashable type, so a dict() cannot be used as a key in another dict()
x = {{1: 10, 2: 20}, {1: 100, 2: 200, 3: 300}, {1: 1000, 2: 2000, 3: 3000, 4: 4000}}
y = self.client.testMapMap(x)
self.assertEqual(y, x)
def testMulti(self):
print('testMulti')
xpected = Xtruct(string_thing='Hello2', byte_thing=74, i32_thing=0xff00ff, i64_thing=0xffffffffd0d0)
y = self.client.testMulti(xpected.byte_thing,
xpected.i32_thing,
xpected.i64_thing,
{ 0:'abc' },
Numberz.FIVE,
0xf0f0f0)
self.assertEqual(y, xpected)
def testException(self):
print('testException')
self.client.testException('Safe')
try:
self.client.testException('Xception')
self.fail("should have gotten exception")
except Xception as x:
self.assertEqual(x.errorCode, 1001)
self.assertEqual(x.message, 'Xception')
# TODO ensure same behavior for repr within generated python variants
# ensure exception's repr method works
#x_repr = repr(x)
#self.assertEqual(x_repr, 'Xception(errorCode=1001, message=\'Xception\')')
try:
self.client.testException('TException')
self.fail("should have gotten exception")
except TException as x:
pass
# Should not throw
self.client.testException('success')
def testMultiException(self):
print('testMultiException')
try:
self.client.testMultiException('Xception', 'ignore')
except Xception as ex:
self.assertEqual(ex.errorCode, 1001)
self.assertEqual(ex.message, 'This is an Xception')
try:
self.client.testMultiException('Xception2', 'ignore')
except Xception2 as ex:
self.assertEqual(ex.errorCode, 2002)
self.assertEqual(ex.struct_thing.string_thing, 'This is an Xception2')
y = self.client.testMultiException('success', 'foobar')
self.assertEqual(y.string_thing, 'foobar')
def testOneway(self):
print('testOneway')
start = time.time()
self.client.testOneway(1) # type is int, not float
end = time.time()
self.assertTrue(end - start < 3,
"oneway sleep took %f sec" % (end - start))
def testOnewayThenNormal(self):
print('testOnewayThenNormal')
self.client.testOneway(1) # type is int, not float
self.assertEqual(self.client.testString('Python'), 'Python')
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class CompactTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class JSONTest(AbstractTest):
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
if options.proto == 'binary': # look for --proto on cmdline
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
elif options.proto == 'accel':
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
elif options.proto == 'compact':
suite.addTest(loader.loadTestsFromTestCase(CompactTest))
elif options.proto == 'json':
suite.addTest(loader.loadTestsFromTestCase(JSONTest))
else:
raise AssertionError('Unknown protocol given with --protocol: %s' % options.proto)
return suite
class OwnArgsTestProgram(unittest.TestProgram):
def parseArgs(self, argv):
if args:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
if __name__ == "__main__":
OwnArgsTestProgram(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=1))
|
|
"""
Unit tests for optimization routines from optimize.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_,
assert_almost_equal, assert_warns,
assert_array_less)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import optimize
def test_check_grad():
# Verify if check_grad is able to estimate the derivative of the
# logistic function.
def logit(x):
return 1 / (1 + np.exp(-x))
def der_logit(x):
return np.exp(-x) / (1 + np.exp(-x))**2
x0 = np.array([1.5])
r = optimize.check_grad(logit, der_logit, x0)
assert_almost_equal(r, 0)
r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
assert_almost_equal(r, 0)
# Check if the epsilon parameter is being considered.
r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
assert_(r > 1e-7)
class CheckOptimize(object):
""" Base test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setup_method(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
class CheckOptimizeParameterized(CheckOptimize):
def test_cg(self):
# conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams,
self.grad, (), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_cg_cornercase(self):
def f(r):
return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
# Check several initial guesses. (Too far away from the
# minimum, the function ends up in the flat region of exp.)
for x0 in np.linspace(-0.75, 3, 71):
sol = optimize.minimize(f, [x0], method='CG')
assert_(sol.success)
assert_allclose(sol.x, [0.5], rtol=1e-5)
def test_bfgs(self):
# Broyden-Fletcher-Goldfarb-Shanno optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_infinite(self):
# Test corner case where -Inf is the minimum. See gh-2019.
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if self.use_wrapper:
opts = {'disp': self.disp}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_powell(self):
# Powell (direction set) optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[34:39],
[[0.72949016, -0.44156936, 0.47100962],
[0.72949016, -0.44156936, 0.48052496],
[1.45898031, -0.88313872, 0.95153458],
[0.72949016, -0.44156936, 0.47576729],
[1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self):
# Nelder-Mead simplex algorithm
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag, final_simplex = (
res['x'], res['fun'], res['nit'], res['nfev'],
res['status'], res['final_simplex'])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968, -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex(self):
# Nelder-Mead simplex algorithm
simplex = np.zeros((4, 3))
simplex[...] = self.startparams
for j in range(3):
simplex[j+1,j] += 0.1
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': True, 'initial_simplex': simplex}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['nit'], res['nfev'], \
res['status']
assert_allclose(res['allvecs'][0], simplex[0])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.17.0. Don't allow them to increase.
assert_(self.funccalls == 100, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.15.0
assert_allclose(self.trace[50:52],
[[0.14687474, -0.5103282, 0.48252111],
[0.14474003, -0.5282084, 0.48743951]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex_bad(self):
# Check it fails with a bad simplices
bad_simplices = []
simplex = np.zeros((3, 2))
simplex[...] = self.startparams[:2]
for j in range(2):
simplex[j+1,j] += 0.1
bad_simplices.append(simplex)
simplex = np.zeros((3, 3))
bad_simplices.append(simplex)
for simplex in bad_simplices:
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False, 'initial_simplex': simplex}
assert_raises(ValueError,
optimize.minimize, self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
else:
assert_raises(ValueError, optimize.fmin, self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
def test_ncg_negative_maxiter(self):
# Regression test for gh-8241
opts = {'maxiter': -1}
result = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)
assert_(result.status == 1)
def test_ncg(self):
# line-search Newton conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0
#assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self):
# Newton conjugate gradient with Hessian
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess=self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess=self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self):
# Newton conjugate gradient with Hessian times a vector p.
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp=self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p=self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_obj_func_returns_scalar():
match = ("The user-provided "
"objective function must "
"return a scalar value.")
with assert_raises(ValueError, match=match):
optimize.minimize(lambda x: x, np.array([1, 1]))
def test_neldermead_xatol_fatol():
# gh4484
# test we can call with fatol, xatol specified
func = lambda x: x[0]**2 + x[1]**2
optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
xatol=1e-3, fatol=1e-3)
assert_warns(DeprecationWarning,
optimize._minimize._minimize_neldermead,
func, [1, 1], xtol=1e-3, ftol=1e-3, maxiter=2)
def test_neldermead_adaptive():
func = lambda x: np.sum(x**2)
p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, 0.32308456,
0.9692297, 0.4471682, 0.77411992, 0.80441652, 0.35994957, 0.75487856,
0.99973421, 0.65063887, 0.09626474]
res = optimize.minimize(func, p0, method='Nelder-Mead')
assert_equal(res.success, False)
res = optimize.minimize(func, p0, method='Nelder-Mead',
options={'adaptive':True})
assert_equal(res.success, True)
class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = True
class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = False
class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = True
class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = False
class TestOptimizeSimple(CheckOptimize):
def test_bfgs_nan(self):
# Test corner case where nan is fed to optimizer. See gh-2067.
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
with np.errstate(over='ignore', invalid='ignore'):
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
def test_bfgs_nan_return(self):
# Test corner cases where fun returns NaN. See gh-4793.
# First case: NaN from first call.
func = lambda x: np.nan
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
# Second case: NaN from second call.
func = lambda x: 0 if x == 0 else np.nan
fprime = lambda x: np.ones_like(x) # Steer away from zero.
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0, jac=fprime)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
def test_bfgs_numerical_jacobian(self):
# BFGS with numerical jacobian and a vector epsilon parameter.
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_gh_2169(self):
def f(x):
if x < 0:
return 1.79769313e+308
else:
return x + 1./x
xs = optimize.fmin_bfgs(f, [10.], disp=False)
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
def test_bfgs_double_evaluations(self):
# check bfgs does not evaluate twice in a row at same point
def f(x):
xp = float(x)
assert xp not in seen
seen.add(xp)
return 10*x**2, 20*x
seen = set()
optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7)
def test_l_bfgs_b(self):
# limited-memory bound-constrained BFGS algorithm
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[0., -0.52489628, 0.48753042],
[0., -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
# L-BFGS-B with numerical jacobian
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with combined objective function and jacobian
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_maxiter(self):
# gh7854
# Ensure that not more than maxiters are ever run.
class Callback(object):
def __init__(self):
self.nit = 0
self.fun = None
self.x = None
def __call__(self, x):
self.x = x
self.fun = optimize.rosen(x)
self.nit += 1
c = Callback()
res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
callback=c, options={'maxiter': 5})
assert_equal(res.nit, 5)
assert_almost_equal(res.x, c.x)
assert_almost_equal(res.fun, c.fun)
assert_equal(res.status, 1)
assert_(res.success is False)
assert_equal(res.message.decode(), 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')
def test_minimize_l_bfgs_b(self):
# Minimize with L-BFGS-B method
opts = {'disp': False, 'maxiter': self.maxiter}
r = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
assert_allclose(self.func(r.x), self.func(self.solution),
atol=1e-6)
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', options=opts)
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
# check that function evaluations in approximate jacobian are counted
assert_(ra.nfev > r.nfev)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert_(v < v0)
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_minimize_l_bfgs_maxls(self):
# check that the maxls is passed down to the Fortran routine
sol = optimize.minimize(optimize.rosen, np.array([-1.2,1.0]),
method='L-BFGS-B', jac=optimize.rosen_der,
options={'disp': False, 'maxls': 1})
assert_(not sol.success)
def test_minimize_l_bfgs_b_maxfun_interruption(self):
# gh-6162
f = optimize.rosen
g = optimize.rosen_der
values = []
x0 = np.full(7, 1000)
def objfun(x):
value = f(x)
values.append(value)
return value
# Look for an interesting test case.
# Request a maxfun that stops at a particularly bad function
# evaluation somewhere between 100 and 300 evaluations.
low, medium, high = 30, 100, 300
optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
v, k = max((y, i) for i, y in enumerate(values[medium:]))
maxfun = medium + k
# If the minimization strategy is reasonable,
# the minimize() result should not be worse than the best
# of the first 30 function evaluations.
target = min(values[:low])
xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
assert_array_less(fmin, target)
def test_custom(self):
# This function comes from the documentation example.
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = optimize.minimize(optimize.rosen, x0, method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
if method in ('nelder-mead', 'powell', 'cobyla'):
jac = None
else:
jac = dfunc
sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,
method=method)
assert_(func(sol1.x) < func(sol2.x),
"%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
@pytest.mark.parametrize('method', ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
'fmin_slsqp',
'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B',
'TNC', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov'])
def test_minimize_callback_copies_array(self, method):
# Check that arrays passed to callbacks are not modified
# inplace by the optimizer afterward
if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))
else:
func = optimize.rosen
jac = optimize.rosen_der
hess = optimize.rosen_hess
x0 = np.zeros(10)
# Set options
kwargs = {}
if method.startswith('fmin'):
routine = getattr(optimize, method)
if method == 'fmin_slsqp':
kwargs['iter'] = 5
elif method == 'fmin_tnc':
kwargs['maxfun'] = 100
else:
kwargs['maxiter'] = 5
else:
def routine(*a, **kw):
kw['method'] = method
return optimize.minimize(*a, **kw)
if method == 'TNC':
kwargs['options'] = dict(maxiter=100)
else:
kwargs['options'] = dict(maxiter=5)
if method in ('fmin_ncg',):
kwargs['fprime'] = jac
elif method in ('Newton-CG',):
kwargs['jac'] = jac
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
'trust-constr'):
kwargs['jac'] = jac
kwargs['hess'] = hess
# Run with callback
results = []
def callback(x, *args, **kwargs):
results.append((x, np.copy(x)))
sol = routine(func, x0, callback=callback, **kwargs)
# Check returned arrays coincide with their copies and have no memory overlap
assert_(len(results) > 2)
assert_(all(np.all(x == y) for x, y in results))
assert_(not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2)))
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'])
def test_no_increase(self, method):
# Check that the solver doesn't return a value worse than the
# initial point.
def func(x):
return (x - 1)**2
def bad_grad(x):
# purposefully invalid gradient function, simulates a case
# where line searches start failing
return 2*(x - 1) * (-1) - 2
x0 = np.array([2.0])
f0 = func(x0)
jac = bad_grad
if method in ['nelder-mead', 'powell', 'cobyla']:
jac = None
sol = optimize.minimize(func, x0, jac=jac, method=method,
options=dict(maxiter=20))
assert_equal(func(sol.x), sol.fun)
if method == 'slsqp':
pytest.xfail("SLSQP returns slightly worse")
assert_(func(sol.x) <= f0)
def test_slsqp_respect_bounds(self):
# Regression test for gh-3108
def f(x):
return sum((x - np.array([1., 2., 3., 4.]))**2)
def cons(x):
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
x0 = np.array([0.5, 1., 1.5, 2.])
res = optimize.minimize(f, x0, method='slsqp',
constraints={'type': 'ineq', 'fun': cons})
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
def test_minimize_automethod(self):
def f(x):
return x**2
def cons(x):
return x - 2
x0 = np.array([10.])
sol_0 = optimize.minimize(f, x0)
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}])
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)])
sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)])
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
assert_(sol.success)
assert_allclose(sol_0.x, 0, atol=1e-7)
assert_allclose(sol_1.x, 2, atol=1e-7)
assert_allclose(sol_2.x, 5, atol=1e-7)
assert_allclose(sol_3.x, 5, atol=1e-7)
assert_allclose(sol_4.x, 2, atol=1e-7)
def test_minimize_coerce_args_param(self):
# Regression test for gh-3503
def Y(x, c):
return np.sum((x-c)**2)
def dY_dx(x, c=None):
return 2*(x-c)
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
xinit = np.random.randn(len(c))
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
def test_initial_step_scaling(self):
# Check that optimizer initial step is not huge even if the
# function and gradients are
scales = [1e-50, 1, 1e50]
methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
def f(x):
if first_step_size[0] is None and x[0] != x0[0]:
first_step_size[0] = abs(x[0] - x0[0])
if abs(x).max() > 1e4:
raise AssertionError("Optimization stepped far away!")
return scale*(x[0] - 1)**2
def g(x):
return np.array([scale*(x[0] - 1)])
for scale, method in itertools.product(scales, methods):
if method in ('CG', 'BFGS'):
options = dict(gtol=scale*1e-8)
else:
options = dict()
if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
# XXX: return initial point if they see small gradient
continue
x0 = [-1.0]
first_step_size = [None]
res = optimize.minimize(f, x0, jac=g, method=method,
options=options)
err_msg = "{0} {1}: {2}: {3}".format(method, scale, first_step_size,
res)
assert_(res.success, err_msg)
assert_allclose(res.x, [1.0], err_msg=err_msg)
assert_(res.nit <= 3, err_msg)
if scale > 1e-10:
if method in ('CG', 'BFGS'):
assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
else:
# Newton-CG and L-BFGS-B use different logic for the first step,
# but are both scaling invariant with step sizes ~ 1
assert_(first_step_size[0] > 0.5 and first_step_size[0] < 3,
err_msg)
else:
# step size has upper bound of ||grad||, so line
# search makes many small steps
pass
class TestLBFGSBBounds(object):
def setup_method(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with fun and jac combined and extra arguments
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
# Minimize with method='L-BFGS-B' with bounds
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar(object):
def setup_method(self):
self.solution = 1.5
def fun(self, x, a=1.5):
"""Objective function"""
return (x - a)**2 - 0.8
def test_brent(self):
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_golden(self):
x = optimize.golden(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, tol=0)
assert_allclose(x, self.solution)
maxiter_test_cases = [0, 1, 5]
for maxiter in maxiter_test_cases:
x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
nfev0, nfev = x0[2], x[2]
assert_equal(nfev - nfev0, maxiter)
def test_fminbound(self):
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
with pytest.raises(ValueError, match='.*must be scalar.*'):
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent')
assert_(x.success)
x = optimize.minimize_scalar(self.fun, method='Brent',
options=dict(maxiter=3))
assert_(not x.success)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
np.array([5])),
args=(np.array([1.5]), ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(5, 1), method='bounded', args=(1.5, ))
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar_custom(self):
# This function comes from the documentation example.
def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = (bracket[1] + bracket[0]) / 2.0
besty = fun(bestx)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for testx in [bestx - stepsize, bestx + stepsize]:
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, self.solution, atol=1e-6)
def test_minimize_scalar_coerce_args_param(self):
# Regression test for gh-3503
optimize.minimize_scalar(self.fun, args=1.5)
def test_brent_negative_tolerance():
assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
class TestNewtonCg(object):
def test_rosenbrock(self):
x0 = np.array([-1.2, 1.0])
sol = optimize.minimize(optimize.rosen, x0,
jac=optimize.rosen_der,
hess=optimize.rosen_hess,
tol=1e-5,
method='Newton-CG')
assert_(sol.success, sol.message)
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
def test_himmelblau(self):
x0 = np.array(himmelblau_x0)
sol = optimize.minimize(himmelblau,
x0,
jac=himmelblau_grad,
hess=himmelblau_hess,
method='Newton-CG',
tol=1e-6)
assert_(sol.success, sol.message)
assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
class TestRosen(object):
def test_hess(self):
# Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
def himmelblau(p):
"""
R^2 -> R^1 test function for optimization. The function has four local
minima where himmelblau(xopt) == 0.
"""
x, y = p
a = x*x + y - 11
b = x + y*y - 7
return a*a + b*b
def himmelblau_grad(p):
x, y = p
return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
def himmelblau_hess(p):
x, y = p
return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
[4*x + 4*y, 4*x + 12*y**2 - 26]])
himmelblau_x0 = [-0.27, -0.9]
himmelblau_xopt = [3, 2]
himmelblau_min = 0.0
def test_minimize_multiple_constraints():
# Regression test for gh-4240.
def func(x):
return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
def func1(x):
return np.array([x[1]])
def func2(x):
return np.array([x[2]])
cons = ({'type': 'ineq', 'fun': func},
{'type': 'ineq', 'fun': func1},
{'type': 'ineq', 'fun': func2})
f = lambda x: -1 * (x[0] + x[1] + x[2])
res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
assert_allclose(res.x, [125, 0, 0], atol=1e-10)
class TestOptimizeResultAttributes(object):
# Test that all minimizers return an OptimizeResult containing
# all the OptimizeResult attributes
def setup_method(self):
self.x0 = [5, 5]
self.func = optimize.rosen
self.jac = optimize.rosen_der
self.hess = optimize.rosen_hess
self.hessp = optimize.rosen_hess_prod
self.bounds = [(0., 10.), (0., 10.)]
def test_attributes_present(self):
methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'dogleg',
'trust-ncg']
attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
'message']
skip = {'COBYLA': ['nit']}
for method in methods:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Method .+ does not use (gradient|Hessian.*) information")
res = optimize.minimize(self.func, self.x0, method=method,
jac=self.jac, hess=self.hess,
hessp=self.hessp)
for attribute in attributes:
if method in skip and attribute in skip[method]:
continue
assert_(hasattr(res, attribute))
assert_(attribute in dir(res))
def f1(z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
def f2(z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
def f3(z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
def brute_func(z, *params):
return f1(z, *params) + f2(z, *params) + f3(z, *params)
class TestBrute:
# Test the "brute force" method
def setup_method(self):
self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
self.solution = np.array([-1.05665192, 1.80834843])
def brute_func(self, z, *params):
# an instance method optimizing
return brute_func(z, *params)
def test_brute(self):
# test fmin
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True, finish=optimize.fmin)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
atol=1e-3)
# test minimize
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True,
finish=optimize.minimize)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
atol=1e-3)
# test that brute can optimize an instance method (the other tests use
# a non-class based function
resbrute = optimize.brute(self.brute_func, self.rranges,
args=self.params, full_output=True,
finish=optimize.minimize)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
def test_1D(self):
# test that for a 1D problem the test function is passed an array,
# not a scalar.
def f(x):
assert_(len(x.shape) == 1)
assert_(x.shape[0] == 1)
return x ** 2
optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
def test_workers(self):
# check that parallel evaluation works
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True, finish=None)
resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True, finish=None, workers=2)
assert_allclose(resbrute1[-1], resbrute[-1])
assert_allclose(resbrute1[0], resbrute[0])
class TestIterationLimits(object):
# Tests that optimisation does not give up before trying requested
# number of iterations or evaluations. And that it does not succeed
# by exceeding the limits.
def setup_method(self):
self.funcalls = 0
def slow_func(self, v):
self.funcalls += 1
r,t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0],v[1])
return np.sin(r*20 + t)+r*0.5
def test_neldermead_limit(self):
self.check_limits("Nelder-Mead", 200)
def test_powell_limit(self):
self.check_limits("powell", 1000)
def check_limits(self, method, default_iters):
for start_v in [[0.1,0.1], [1,1], [2,2]]:
for mfev in [50, 500, 5000]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev)
else:
assert_(res["nfev"] >= mfev)
for mit in [50, 500,5000]:
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit})
if res["success"]:
assert_(res["nit"] <= mit)
else:
assert_(res["nit"] >= mit)
for mfev,mit in [[50,50], [5000,5000],[5000,np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit, "maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev and res["nit"] <= mit)
else:
assert_(res["nfev"] >= mfev or res["nit"] >= mit)
for mfev,mit in [[np.inf,None], [None,np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method, options={"maxiter":mit, "maxfev":mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
if mfev is None:
assert_(res["nfev"] < default_iters*2)
else:
assert_(res["nit"] <= default_iters*2)
else:
assert_(res["nfev"] >= default_iters*2 or
res["nit"] >= default_iters*2)
|
|
import sys
from functools import partial
import pluginmanager
import plugin
from utilities.GeneralUtilities import warning, error, executable_exists
class PluginManager(object):
"""
Frontend for pluginmanager
https://github.com/benhoff/pluginmanager
Also handles plugin.PluginComposed
"""
def __init__(self):
import pluginmanager.module_manager
self._backend = pluginmanager.PluginInterface()
# patch to ignore import exception
_load_source = pluginmanager.module_manager.load_source
def patched_load_source(*args):
try:
return _load_source(*args)
except ImportError as e:
print(e)
import sys
return sys
pluginmanager.module_manager.load_source = patched_load_source
self._plugin_dependency = PluginDependency()
self._cache = None
self._plugins_loaded = 0
self._cache_disabled = []
# blacklist files
def __ends_with_py(s):
return [x for x in s if x.endswith(".py")]
self._backend.set_file_filters(__ends_with_py)
self._backend.add_blacklisted_directories("jarviscli/packages/aiml")
self._backend.add_blacklisted_directories("jarviscli/packages/memory")
def add_directory(self, path):
"""Add directory to search path for plugins"""
self._backend.add_plugin_directories(path)
self._cache = None
def add_plugin(self, plugin):
"""Add singe plugin-instance"""
self._backend.add_plugins(plugin)
def _load(self):
"""lazy load"""
if self._cache is not None:
# cache clean!
return
self._cache = plugin.PluginStorage()
self._backend.collect_plugins()
(enabled, disabled) = self._validate_plugins(self._backend.get_plugins())
for plugin_to_add in enabled:
self._load_plugin(plugin_to_add, self._cache)
self._cache_disabled = self._filter_duplicated_disabled(
enabled, disabled)
self._plugins_loaded = len(enabled)
def _validate_plugins(self, plugins):
def partition(plugins):
plugins_valid = []
plugins_incompatible = []
for plugin_to_validate in plugins:
if not is_plugin(plugin_to_validate):
continue
compability_check_result = self._plugin_dependency.check(
plugin_to_validate)
if compability_check_result is True:
plugins_valid.append(plugin_to_validate)
else:
item = (
plugin_to_validate.get_name(),
compability_check_result)
plugins_incompatible.append(item)
return (plugins_valid, plugins_incompatible)
def is_plugin(plugin_to_validate):
if not isinstance(plugin_to_validate, pluginmanager.IPlugin):
return False
if plugin_to_validate.get_name() == "plugin":
return False
return True
return partition(plugins)
def _load_plugin(self, plugin_to_add, plugin_storage):
def handle_aliases(plugin_to_add):
add_plugin(
plugin_to_add.get_name().split(' '),
plugin_to_add,
plugin_storage)
for name in plugin_to_add.alias():
add_plugin(
name.lower().split(' '),
plugin_to_add,
plugin_storage)
def add_plugin(name, plugin_to_add, parent):
if len(name) == 1:
add_plugin_single(name[0], plugin_to_add, parent)
else:
add_plugin_compose(name[0], name[1:], plugin_to_add, parent)
def add_plugin_single(name, plugin_to_add, parent):
plugin_existing = parent.get_plugins(name)
if plugin_existing is None:
parent.add_plugin(name, plugin_to_add)
else:
if not plugin_existing.is_callable_plugin():
plugin_existing.change_with(plugin_to_add)
parent.add_plugin(name, plugin_to_add)
else:
error("Duplicated plugin {}!".format(name))
def add_plugin_compose(
name_first,
name_remaining,
plugin_to_add,
parent):
plugin_existing = parent.get_plugins(name_first)
if plugin_existing is None:
plugin_existing = plugin.Plugin()
plugin_existing._name = name_first
plugin_existing.__doc__ = ''
parent.add_plugin(name_first, plugin_existing)
add_plugin(name_remaining, plugin_to_add, plugin_existing)
return handle_aliases(plugin_to_add)
def _filter_duplicated_disabled(self, enabled_list, disabled_list):
enabled_names = []
for plugin_enabled in enabled_list:
enabled_names.append(plugin_enabled.get_name())
enabled_names.extend(plugin_enabled.alias())
disabled_unique = {}
for plugin_name, disable_reason in disabled_list:
if plugin_name in enabled_names:
continue
if plugin_name in disabled_unique:
disabled_unique[plugin_name].append(disable_reason)
else:
disabled_unique[plugin_name] = [disable_reason]
return disabled_unique
def get_plugins(self):
"""
Returns all loaded plugins as dictionary
Key: name
Value: plugin instance)
"""
self._load()
return self._cache.get_plugins()
def get_disabled(self):
"""
Returns all disabled plugins names as dictionary
Key: name
Value: List of reasons why disabled
"""
self._load()
return self._cache_disabled
def get_number_plugins_loaded(self):
self._load()
return self._plugins_loaded
class PluginDependency(object):
"""
Plugins may have requirement - specified by require().
Please refere plugin-doku.
This module checks if dependencies are fulfilled.
"""
def __init__(self):
# plugin shoud match these requirements
self._requirement_has_network = True
if sys.platform == "darwin":
self._requirement_platform = plugin.MACOS
elif sys.platform == "win32":
self._requirement_platform = plugin.WINDOWS
elif sys.platform.startswith("linux"):
self._requirement_platform = plugin.LINUX
else:
self._requirement_platform = None
warning("Unsupported platform {}".format(sys.platform))
def _plugin_get_requirements(self, requirements_iter):
plugin_requirements = {
"platform": [],
"network": [],
"native": []
}
# parse requirements
for requirement in requirements_iter:
key = requirement[0]
values = requirement[1]
if isinstance(values, str) or isinstance(values, bool):
values = [values]
if key in plugin_requirements:
plugin_requirements[key].extend(values)
else:
warning("{}={}: No supported requirement".format(key, values))
return plugin_requirements
def check(self, plugin):
"""
Parses plugin.require(). Plase refere plugin.Plugin-documentation
"""
plugin_requirements = self._plugin_get_requirements(plugin.require())
if not self._check_platform(plugin_requirements["platform"]):
required_platform = ", ".join(plugin_requirements["platform"])
return "Requires os {}".format(required_platform)
if not self._check_network(plugin_requirements["network"], plugin):
return "Requires networking"
natives_ok = self._check_native(plugin_requirements["native"], plugin)
if natives_ok is not True:
return natives_ok
return True
def _check_platform(self, values):
if not values:
return True
if plugin.UNIX in values:
values += [plugin.LINUX, plugin.MACOS]
return self._requirement_platform in values
def _check_network(self, values, plugin):
if True in values:
if not self._requirement_has_network:
return False
self._plugin_patch_network_error_message(plugin)
return True
return True
def _check_native(self, values, plugin):
missing = ""
for native in values:
if native.startswith('!'):
# native should not exist
requirement_ok = not executable_exists(native[1:])
else:
requirement_ok = executable_exists(native)
if not requirement_ok:
missing += native
missing += " "
if not missing:
return True
message = "Missing native executables {}"
return message.format(missing)
def _plugin_patch_network_error_message(self, plugin):
if "plugin._network_error_patched" not in plugin.__dict__:
plugin.run = partial(
plugin._plugin_run_with_network_error, plugin.run)
|
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for various parts of L{twisted.web}.
"""
from cStringIO import StringIO
from zope.interface import implements
from twisted.trial import unittest
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.web import server, resource, util
from twisted.internet import defer, interfaces, error, task
from twisted.web import http, http_headers
from twisted.python import log
class DummyRequest:
"""
Represents a dummy or fake request.
@ivar _finishedDeferreds: C{None} or a C{list} of L{Deferreds} which will
be called back with C{None} when C{finish} is called or which will be
errbacked if C{processingFailed} is called.
@type headers: C{dict}
@ivar headers: A mapping of header name to header value for all request
headers.
@type outgoingHeaders: C{dict}
@ivar outgoingHeaders: A mapping of header name to header value for all
response headers.
@type responseCode: C{int}
@ivar responseCode: The response code which was passed to
C{setResponseCode}.
@type written: C{list} of C{str}
@ivar written: The bytes which have been written to the request.
"""
uri = 'http://dummy/'
method = 'GET'
client = None
def registerProducer(self, prod,s):
self.go = 1
while self.go:
prod.resumeProducing()
def unregisterProducer(self):
self.go = 0
def __init__(self, postpath, session=None):
self.sitepath = []
self.written = []
self.finished = 0
self.postpath = postpath
self.prepath = []
self.session = None
self.protoSession = session or server.Session(0, self)
self.args = {}
self.outgoingHeaders = {}
self.responseHeaders = http_headers.Headers()
self.responseCode = None
self.headers = {}
self._finishedDeferreds = []
def getHeader(self, name):
"""
Retrieve the value of a request header.
@type name: C{str}
@param name: The name of the request header for which to retrieve the
value. Header names are compared case-insensitively.
@rtype: C{str} or L{NoneType}
@return: The value of the specified request header.
"""
return self.headers.get(name.lower(), None)
def setHeader(self, name, value):
"""TODO: make this assert on write() if the header is content-length
"""
self.outgoingHeaders[name.lower()] = value
def getSession(self):
if self.session:
return self.session
assert not self.written, "Session cannot be requested after data has been written."
self.session = self.protoSession
return self.session
def write(self, data):
self.written.append(data)
def notifyFinish(self):
"""
Return a L{Deferred} which is called back with C{None} when the request
is finished. This will probably only work if you haven't called
C{finish} yet.
"""
finished = Deferred()
self._finishedDeferreds.append(finished)
return finished
def finish(self):
"""
Record that the request is finished and callback and L{Deferred}s
waiting for notification of this.
"""
self.finished = self.finished + 1
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.callback(None)
def processingFailed(self, reason):
"""
Errback and L{Deferreds} waiting for finish notification.
"""
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.errback(reason)
def addArg(self, name, value):
self.args[name] = [value]
def setResponseCode(self, code):
"""
Set the HTTP status response code, but takes care that this is called
before any data is written.
"""
assert not self.written, "Response code cannot be set after data has been written: %s." % "@@@@".join(self.written)
self.responseCode = code
def setLastModified(self, when):
assert not self.written, "Last-Modified cannot be set after data has been written: %s." % "@@@@".join(self.written)
def setETag(self, tag):
assert not self.written, "ETag cannot be set after data has been written: %s." % "@@@@".join(self.written)
def getClientIP(self):
"""
Return the IPv4 address of the client which made this request, if there
is one, otherwise C{None}.
"""
if isinstance(self.client, IPv4Address):
return self.client.host
return None
class ResourceTestCase(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.failUnlessEqual([], r.listEntities())
class SimpleResource(resource.Resource):
def render(self, request):
if http.CACHED in (request.setLastModified(10),
request.setETag('MatchingTag')):
return ''
else:
return "correct"
class DummyChannel:
class TCP:
port = 80
def __init__(self):
self.written = StringIO()
def getPeer(self):
return IPv4Address("TCP", '192.168.1.1', 12344)
def write(self, bytes):
assert isinstance(bytes, str)
self.written.write(bytes)
def writeSequence(self, iovec):
map(self.write, iovec)
def getHost(self):
return IPv4Address("TCP", '10.0.0.1', self.port)
class SSL(TCP):
implements(interfaces.ISSLTransport)
site = server.Site(resource.Resource())
def __init__(self):
self.transport = self.TCP()
def requestDone(self, request):
pass
class SiteTest(unittest.TestCase):
def test_simplestSite(self):
"""
L{Site.getResourceFor} returns the C{""} child of the root resource it
is constructed with when processing a request for I{/}.
"""
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild("",sres2)
site = server.Site(sres1)
self.assertIdentical(
site.getResourceFor(DummyRequest([''])),
sres2, "Got the wrong resource.")
class SessionTest(unittest.TestCase):
def setUp(self):
"""
Set up a session using a simulated scheduler. Creates a
C{times} attribute which specifies the return values of the
session's C{_getTime} method.
"""
clock = self.clock = task.Clock()
times = self.times = []
class MockSession(server.Session):
"""
A mock L{server.Session} object which fakes out scheduling
with the C{clock} attribute and fakes out the current time
to be the elements of L{SessionTest}'s C{times} attribute.
"""
def loopFactory(self, *a, **kw):
"""
Create a L{task.LoopingCall} which uses
L{SessionTest}'s C{clock} attribute.
"""
call = task.LoopingCall(*a, **kw)
call.clock = clock
return call
def _getTime(self):
return times.pop(0)
self.site = server.Site(SimpleResource())
self.site.sessionFactory = MockSession
def test_basicExpiration(self):
"""
Test session expiration: setup a session, and simulate an expiration
time.
"""
self.times.extend([0, server.Session.sessionTimeout + 1])
session = self.site.makeSession()
hasExpired = [False]
def cbExpire():
hasExpired[0] = True
session.notifyOnExpire(cbExpire)
self.clock.advance(server.Site.sessionCheckTime - 1)
# Looping call should not have been executed
self.failIf(hasExpired[0])
self.clock.advance(1)
self.failUnless(hasExpired[0])
def test_delayedCallCleanup(self):
"""
Checking to make sure Sessions do not leave extra DelayedCalls.
"""
self.times.extend([0, 100])
session = self.site.makeSession()
loop = session.checkExpiredLoop
session.touch()
self.failUnless(loop.running)
session.expire()
self.failIf(self.clock.calls)
self.failIf(loop.running)
# Conditional requests:
# If-None-Match, If-Modified-Since
# make conditional request:
# normal response if condition succeeds
# if condition fails:
# response code
# no body
def httpBody(whole):
return whole.split('\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split('\r\n\r\n', 1)[0]
for header in headers.split('\r\n'):
if header.lower().startswith(key):
return header.split(':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split('\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTest(unittest.TestCase):
"""web.server's handling of conditional requests for cache validation."""
# XXX: test web.distrib.
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild('', self.resrc)
self.site = server.Site(self.resrc)
self.site = server.Site(self.resrc)
self.site.logFile = log.logfile
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
for l in ["GET / HTTP/1.1",
"Accept: text/html"]:
self.channel.lineReceived(l)
def tearDown(self):
self.channel.connectionLost(None)
def test_modified(self):
"""If-Modified-Since cache validator (positive)"""
self.channel.lineReceived("If-Modified-Since: %s"
% http.datetimeToString(1))
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_unmodified(self):
"""If-Modified-Since cache validator (negative)"""
self.channel.lineReceived("If-Modified-Since: %s"
% http.datetimeToString(100))
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
def test_etagMatchedNot(self):
"""If-None-Match ETag cache validator (positive)"""
self.channel.lineReceived("If-None-Match: unmatchedTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_etagMatched(self):
"""If-None-Match ETag cache validator (negative)"""
self.channel.lineReceived("If-None-Match: MatchingTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpHeader(result, "ETag"), "MatchingTag")
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
from twisted.web import google
class GoogleTestCase(unittest.TestCase):
def testCheckGoogle(self):
raise unittest.SkipTest("no violation of google ToS")
d = google.checkGoogle('site:www.twistedmatrix.com twisted')
d.addCallback(self.assertEquals, 'http://twistedmatrix.com/')
return d
from twisted.web import static
from twisted.web import script
class StaticFileTest(unittest.TestCase):
def testStaticPaths(self):
import os
dp = os.path.join(self.mktemp(),"hello")
ddp = os.path.join(dp, "goodbye")
tp = os.path.abspath(os.path.join(dp,"world.txt"))
tpy = os.path.join(dp,"wyrld.rpy")
os.makedirs(dp)
f = open(tp,"wb")
f.write("hello world")
f = open(tpy, "wb")
f.write("""
from twisted.web.static import Data
resource = Data('dynamic world','text/plain')
""")
f = static.File(dp)
f.processors = {
'.rpy': script.ResourceScript,
}
f.indexNames = f.indexNames + ['world.txt']
self.assertEquals(f.getChild('', DummyRequest([''])).path,
tp)
self.assertEquals(f.getChild('wyrld.rpy', DummyRequest(['wyrld.rpy'])
).__class__,
static.Data)
f = static.File(dp)
wtextr = DummyRequest(['world.txt'])
wtext = f.getChild('world.txt', wtextr)
self.assertEquals(wtext.path, tp)
wtext.render(wtextr)
self.assertEquals(wtextr.outgoingHeaders.get('content-length'),
str(len('hello world')))
self.assertNotEquals(f.getChild('', DummyRequest([''])).__class__,
static.File)
def testIgnoreExt(self):
f = static.File(".")
f.ignoreExt(".foo")
self.assertEquals(f.ignoredExts, [".foo"])
f = static.File(".")
self.assertEquals(f.ignoredExts, [])
f = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEquals(f.ignoredExts, [".bar", ".baz"])
def testIgnoredExts(self):
import os
dp = os.path.join(self.mktemp(), 'allYourBase')
fp = os.path.join(dp, 'AreBelong.ToUs')
os.makedirs(dp)
open(fp, 'wb').write("Take off every 'Zig'!!")
f = static.File(dp)
f.ignoreExt('.ToUs')
dreq = DummyRequest([''])
child_without_ext = f.getChild('AreBelong', dreq)
self.assertNotEquals(child_without_ext, f.childNotFound)
class TestRequest(unittest.TestCase):
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar/', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
request.setHost('example.com', 80)
self.assertEqual(request.prePathURL(), 'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('foo.com', 81, 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://foo.com:81/foo/bar')
def test_prePathURLQuoting(self):
"""
L{Request.prePathURL} quotes special characters in the URL segments to
preserve the original meaning.
"""
d = DummyChannel()
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo%2Fbar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com/foo%2Fbar')
def testNotifyFinishConnectionLost(self):
d = DummyChannel()
request = server.Request(d, 1)
finished = request.notifyFinish()
request.connectionLost(error.ConnectionDone("Connection done"))
return self.assertFailure(finished, error.ConnectionDone)
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTest(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild('foo', rr)
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(r)
for url in ['/foo/', '/foo/bar', '/foo/bar/baz', '/foo/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(rr)
for url in ['/', '/bar', '/bar/baz', '/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return "hi hi"
def render_HEH(self, request):
return "ho ho"
class NewRenderTestCase(unittest.TestCase):
def _getReq(self):
d = DummyChannel()
d.site.resource.putChild('newrender', NewRenderResource())
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived('GET', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'hi hi')
req = self._getReq()
req.requestReceived('HEH', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived('CONNECT', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
req = self._getReq()
req.requestReceived('hlalauguG', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived('HEAD', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 200)
self.assertEquals(-1, req.transport.getvalue().find('hi hi'))
class SDResource(resource.Resource):
def __init__(self,default): self.default=default
def getChildWithDefault(self,name,request):
d=defer.succeed(self.default)
return util.DeferredResource(d).getChildWithDefault(name, request)
class SDTest(unittest.TestCase):
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
class DummyRequestForLogTest(DummyRequest):
uri = '/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
clientproto = 'HTTP/1.0'
sentLength = None
client = IPv4Address('TCP', '1.2.3.4', 12345)
class TestLogEscaping(unittest.TestCase):
def setUp(self):
self.site = http.HTTPFactory()
self.site.logFile = StringIO()
self.request = DummyRequestForLogTest(self.site, False)
def testSimple(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def testMethodQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = 'G"T'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def testRequestQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri='/dummy"withquote'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def testProtoQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto='HT"P/1.0'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def testRefererQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['referer'] = 'http://malicious" ".website.invalid'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "http://malicious\\" \\".website.invalid" "-"\n')
def testUserAgentQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['user-agent'] = 'Malicious Web" Evil'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
|
|
from collections import Counter
from unittest.mock import patch, Mock
from django.test import SimpleTestCase, TestCase
from casexml.apps.case.mock import CaseStructure, CaseIndex, CaseFactory
from corehq.apps.domain.shortcuts import create_user
from corehq.apps.registry.exceptions import RegistryAccessException
from corehq.apps.registry.helper import DataRegistryHelper
from corehq.apps.registry.models import DataRegistry
from corehq.apps.registry.schema import RegistrySchemaBuilder
from corehq.apps.registry.tests.utils import create_registry_for_test, Invitation
from corehq.apps.users.models import Permissions
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.models import CommCareCase
from corehq.form_processor.tests.utils import FormProcessorTestUtils
class TestDataRegistryHelper(SimpleTestCase):
def setUp(self):
self.registry = DataRegistry(
schema=[{"case_type": "a"}]
)
self.registry.get_granted_domains = _mock_get_granted_domain
self.helper = DataRegistryHelper("domain1", registry=self.registry)
self.log_data_access_patch = patch.object(self.helper, "log_data_access")
self.log_data_access = self.log_data_access_patch.start()
def tearDown(self):
self.log_data_access_patch.stop()
def test_get_case(self):
mock_case = _mock_case("a", "domain1")
with patch.object(CommCareCase.objects, 'get_case', return_value=mock_case):
case = self.helper.get_case("case1", _mock_user(), "app")
self.assertEqual(case, mock_case)
self.log_data_access.assert_called_with("user", "domain1", "app", filters={
"case_type": "a",
"case_id": "case1"
})
def test_get_case_type_not_in_registry(self):
mock_case = _mock_case("other-type", "domain1")
with patch.object(CommCareCase.objects, 'get_case', return_value=mock_case), \
self.assertRaisesMessage(RegistryAccessException, "'other-type' not available in registry"):
self.helper.get_case("case1", _mock_user(), "app")
self.log_data_access.not_called()
def test_get_case_not_found(self):
with self.assertRaises(CaseNotFound), \
patch.object(CommCareCase.objects, 'get_case', side_effect=CaseNotFound):
self.helper.get_case("case1", _mock_user(), "app")
self.log_data_access.not_called()
def test_get_case_domain_not_in_registry(self):
mock_case = _mock_case("a", "other-domain")
with self.assertRaisesMessage(RegistryAccessException, "Data not available in registry"), \
patch.object(CommCareCase.objects, 'get_case', return_value=mock_case):
self.helper.get_case("case1", _mock_user(), "app")
self.log_data_access.not_called()
def test_get_case_access_to_current_domain_allowed_even_if_user_has_no_permission(self):
mock_case = _mock_case("a", "domain1")
mock_user = _mock_user(has_permission=False)
with patch.object(CommCareCase.objects, 'get_case', return_value=mock_case):
self.helper.get_case("case1", mock_user, "app")
self.log_data_access.assert_called_with("user", "domain1", "app", filters={
"case_type": "a",
"case_id": "case1"
})
def test_get_case_access_to_other_domain_not_allowed_if_user_has_no_permission(self):
mock_case = _mock_case("a", "domain2")
mock_user = _mock_user(has_permission=False)
with self.assertRaisesMessage(RegistryAccessException, "User not permitted to access registry data"),\
patch.object(CommCareCase.objects, 'get_case', return_value=mock_case):
self.helper.get_case("case1", mock_user, "app")
def test_get_case_access_to_other_domain_allowed_if_user_has_permission(self):
mock_case = _mock_case("a", "domain2")
mock_user = _mock_user(has_permission=True)
with patch.object(CommCareCase.objects, 'get_case', return_value=mock_case):
self.helper.get_case("case1", mock_user, "app")
self.log_data_access.assert_called_with("user", "domain2", "app", filters={
"case_type": "a",
"case_id": "case1"
})
class TestGetCaseHierarchy(TestCase):
domain = 'data-registry-case-hierarchy'
invited_domain = "reg-domain2"
@classmethod
def setUpTestData(cls):
cls.user = create_user("marg", "hairspray")
cls.registry = create_registry_for_test(cls.user, cls.domain, invitations=[
Invitation(cls.invited_domain)
])
cls.registry.schema = RegistrySchemaBuilder(["grandparent", "parent", "child", "extension"]).build()
cls.registry.save()
cls.helper = DataRegistryHelper(cls.domain, cls.registry.slug)
"""
springfield <--ext--
mona <-------
abraham(closed) <------- homer <------- bart
<--ext-- beer
"""
cls.host_case_id = 'springfield'
cls.grand_parent_case_id = 'mona'
cls.grand_parent_case_id_closed = 'abraham'
cls.parent_case_id = 'homer'
cls.child_case_id = 'bart'
cls.extension_case_id = 'beer'
host_case = CaseStructure(
case_id=cls.host_case_id,
attrs={'create': True, 'case_type': 'town'},
)
grand_parent_case = CaseStructure(
case_id=cls.grand_parent_case_id,
attrs={'create': True, 'case_type': 'grandparent'},
)
grand_parent_case_closed = CaseStructure(
case_id=cls.grand_parent_case_id_closed,
attrs={'create': True, 'case_type': 'grandparent', 'close': True},
)
parent_case = CaseStructure(
case_id=cls.parent_case_id,
attrs={'create': True, 'case_type': 'parent'},
indices=[
CaseIndex(grand_parent_case, identifier='mother'),
CaseIndex(grand_parent_case_closed, identifier='father'),
CaseIndex(host_case, identifier='host', relationship='extension'),
],
)
child_case = CaseStructure(
case_id=cls.child_case_id,
attrs={'create': True, 'case_type': 'child'},
indices=[CaseIndex(parent_case, identifier='parent')],
)
extension_case = CaseStructure(
case_id=cls.extension_case_id,
attrs={'create': True, 'case_type': 'extension'},
indices=[CaseIndex(parent_case, identifier='host', relationship='extension')],
walk_related=False
)
cls.cases = CaseFactory(cls.domain).create_or_update_cases([child_case, extension_case])
# create some cases in the 'invited domain'
cls.invited_domain_parent_id = "alternate homer"
cls.invited_domain_child_id = "alternate bart"
invited_domain_parent = CaseStructure(
case_id=cls.invited_domain_parent_id,
attrs={'create': True, 'case_type': 'parent'},
)
cls.invited_domain_cases = CaseFactory(cls.domain).create_or_update_cases([
CaseStructure(
case_id=cls.invited_domain_child_id,
attrs={'create': True, 'case_type': 'child'},
indices=[CaseIndex(invited_domain_parent, identifier='host', relationship='extension')],
)
])
@classmethod
def tearDownClass(cls):
cls.user.delete()
cls.registry.delete()
FormProcessorTestUtils.delete_all_sql_forms(cls.domain)
FormProcessorTestUtils.delete_all_sql_cases(cls.domain)
super().tearDownClass()
@patch.object(DataRegistryHelper, '_check_user_has_access', new=Mock())
def test_get_case_hierarchy(self):
case = CommCareCase.objects.get_case(self.parent_case_id)
cases = self.helper.get_case_hierarchy(self.domain, None, [case])
self.assertEqual({case.case_id for case in cases}, {
self.grand_parent_case_id_closed, self.host_case_id, self.grand_parent_case_id,
self.parent_case_id, self.extension_case_id
})
@patch.object(DataRegistryHelper, '_check_user_has_access', new=Mock())
def test_get_case_hierarchy_multiple_cases_no_duplicates(self):
starting_cases = [
CommCareCase.objects.get_case(self.parent_case_id),
CommCareCase.objects.get_case(self.extension_case_id)
]
all_cases = self.helper.get_case_hierarchy(self.domain, None, starting_cases)
counter = Counter([c.case_id for c in all_cases])
self.assertEqual(set(counter), {
self.grand_parent_case_id_closed, self.host_case_id, self.grand_parent_case_id,
self.parent_case_id, self.extension_case_id
})
duplicates = [case_id for case_id, count in counter.items() if count > 1]
self.assertEqual([], duplicates)
@patch.object(DataRegistryHelper, '_check_user_has_access', new=Mock())
def test_get_case_hierarchy_across_domains(self):
starting_cases = [
CommCareCase.objects.get_case(self.parent_case_id),
CommCareCase.objects.get_case(self.invited_domain_child_id)
]
all_cases = self.helper.get_multi_domain_case_hierarchy(None, starting_cases)
counter = Counter([c.case_id for c in all_cases])
self.assertEqual(set(counter), {
self.grand_parent_case_id_closed, self.host_case_id, self.grand_parent_case_id,
self.parent_case_id, self.extension_case_id,
self.invited_domain_child_id, self.invited_domain_parent_id
})
duplicates = [case_id for case_id, count in counter.items() if count > 1]
self.assertEqual([], duplicates)
def _mock_get_granted_domain(domain):
return {"domain1", "domain2"}
def _mock_case(case_type, domain):
return Mock(type=case_type, domain=domain, spec_set=["type", "domain"])
def _mock_user(has_permission=True):
mock_role = Mock(permissions=Permissions(view_data_registry_contents=has_permission))
return Mock(get_role=Mock(return_value=mock_role), get_django_user=Mock(return_value="user"))
|
|
'''
Created on 2016/9/22
:author: hubo
'''
from vlcp.config import Configurable, config
from vlcp.protocol.zookeeper import ZooKeeper, ZooKeeperRetryException,\
ZooKeeperConnectionStateEvent, ZooKeeperResponseEvent, ZooKeeperWatcherEvent,\
ZooKeeperSessionExpiredException
from random import shuffle, random, randrange
from vlcp.event.connection import Client
from vlcp.event.event import Event, withIndices, M_
import logging
import vlcp.utils.zookeeper as zk
try:
from itertools import izip_longest
except Exception:
from itertools import zip_longest as izip_longest
from time import time
from vlcp.event.future import RoutineFuture
from contextlib import closing
from namedstruct import dump
import json
@withIndices('state', 'client', 'sessionid')
class ZooKeeperSessionStateChanged(Event):
CREATED = 'created'
DISCONNECTED = 'disconnected'
RECONNECTED = 'reconnected'
AUTHFAILED = 'authfailed'
EXPIRED = 'expired'
@withIndices('client', 'sessionid', 'restore')
class ZooKeeperRestoreWatches(Event):
pass
class ZooKeeperSessionUnavailable(Exception):
def __init__(self, state):
Exception.__init__(self, "ZooKeeper state is '%r'" % (state,))
self.state = state
class ZooKeeperIllegalPathException(ValueError):
pass
_MAX_SETWATCHES_SIZE = 128 * 1024
_should_add_watch = set(((zk.CREATED_EVENT_DEF, zk.ZOO_ERR_NONODE),))
@config('zookeeperclient')
class ZooKeeperClient(Configurable):
"""
ZooKeeper client to send requests to a cluster
"""
# Default ZooKeeper server list, should be a list contains connection URLs
_default_serverlist = []
# Chroot to a child node instead of the root node. All paths used in the program
# will be mapped to *chroot_path*/*path*
_default_chroot = '/'
# Extra authentications, should be list of tuples [(scheme1, auth1), (scheme2, auth2), ...]
_default_auth = []
# Zookeeper session timeout
_default_sessiontimeout = 20
# If not None, ZooKeeperClient will disconnect from the server and reconnect to a random server
# to make sure the connections to ZooKeeper servers are balanced. It sometimes causes problems,
# so it is disabled by default.
_default_rebalancetime = None
_logger = logging.getLogger(__name__ + '.ZooKeeperClient')
def __init__(self, container, serverlist = None, chroot = None, protocol = None, readonly = False,
restart_session = True):
if serverlist is not None:
self.serverlist = list(serverlist)
else:
self.serverlist = list(self.serverlist)
shuffle(self.serverlist)
self.nextptr = 0
self.current_connection = None
if protocol is None:
self.protocol = ZooKeeper()
else:
self.protocol = protocol
self.protocol.persist = False
if chroot:
self.chroot = chroot
else:
self.chroot = self.chroot
if not isinstance(self.chroot, bytes):
self.chroot = self.chroot.encode('utf-8')
if self.chroot is None or self.chroot == b'/':
self.chroot = b''
self.chroot = self.chroot.rstrip(b'/')
self._container = container
self.readonly = readonly
self.auth_set = set(self.auth)
self.restart_session = restart_session
self.session_id = 0
self.session_state = ZooKeeperSessionStateChanged.EXPIRED
self._shutdown = False
self.key = None
self.certificate = None
self.ca_certs = None
self._last_zxid = 0
self._last_watch_zxid = 0
def start(self, asyncstart = False):
self._connmanage_routine = self._container.subroutine(self._connection_manage(), asyncstart)
def reset(self):
'''
Discard current session and start a new one
'''
self._connmanage_routine.close()
self._shutdown = False
self.start()
async def shutdown(self):
self._connmanage_routine.close()
async def _connection_manage(self):
try:
failed = 0
self._last_zxid = last_zxid = 0
self._last_watch_zxid = last_watch_zxid = 0
session_id = 0
passwd = b'\x00' * 16
last_conn_time = None
while True:
self.currentserver = self.serverlist[self.nextptr]
np = self.nextptr + 1
if np >= len(self.serverlist):
np = 0
self.nextptr = np
conn = Client(self.currentserver, self.protocol, self._container.scheduler,
self.key, self.certificate, self.ca_certs)
self.current_connection = conn
conn_up = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.UP,
conn)
conn_nc = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.NOTCONNECTED,
conn)
conn.start()
try:
_, m = await M_(conn_up, conn_nc)
if m is conn_nc:
self._logger.warning('Connect to %r failed, try next server', self.currentserver)
if failed > 5:
# Wait for a small amount of time to prevent a busy loop
# Socket may be rejected, it may fail very quick
await self._container.wait_with_timeout(min((failed - 5) * 0.1, 1.0))
failed += 1
continue
try:
# Handshake
set_watches = []
if self.session_state == ZooKeeperSessionStateChanged.DISCONNECTED:
await self._container.wait_for_send(ZooKeeperRestoreWatches(self,
self.session_id,
True,
restore_watches = (set(), set(), set())))
ev = await ZooKeeperRestoreWatches.createMatcher(self)
data_watches, exists_watches, child_watches = \
ev.restore_watches
if data_watches or exists_watches or child_watches:
restore_watch_zxid = last_watch_zxid
if restore_watch_zxid > 1:
restore_watch_zxid = restore_watch_zxid - 1
current_set_watches = zk.SetWatches(relativeZxid = restore_watch_zxid)
current_length = 0
for d, e, c in izip_longest(data_watches, exists_watches, child_watches):
if d is not None:
current_set_watches.dataWatches.append(d)
current_length += 4 + len(d)
if e is not None:
current_set_watches.existWatches.append(e)
current_length += 4 + len(e)
if c is not None:
current_set_watches.childWatches.append(c)
current_length += 4 + len(c)
if current_length > _MAX_SETWATCHES_SIZE:
# Split set_watches
set_watches.append(current_set_watches)
current_set_watches = zk.SetWatches(relativeZxid = restore_watch_zxid)
current_length = 0
if current_set_watches.dataWatches or current_set_watches.existWatches \
or current_set_watches.childWatches:
set_watches.append(current_set_watches)
auth_list = list(self.auth_set)
extra_requests = [zk.AuthPacket(scheme = a[0], auth = a[1]) for a in auth_list] + set_watches
timeout, handshake_result = await self._container.execute_with_timeout(
10,
self.protocol.handshake(
conn,
zk.ConnectRequest(lastZxidSeen = last_zxid,
timeOut = int(self.sessiontimeout * 1000.0),
sessionId = session_id,
passwd = passwd,
readOnly = self.readonly),
self._container,
extra_requests
)
)
if timeout:
raise IOError
except ZooKeeperSessionExpiredException:
self._logger.warning('Session expired.')
# Session expired
self.session_state = ZooKeeperSessionStateChanged.EXPIRED
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.EXPIRED,
self,
session_id
)
)
if self.restart_session:
failed = 0
last_zxid = 0
session_id = 0
passwd = b'\x00' * 16
last_conn_time = None
continue
else:
break
except Exception:
self._logger.warning('Handshake failed to %r, try next server', self.currentserver)
if failed > 5:
# There is a bug ZOOKEEPER-1159 that ZooKeeper server does not respond
# for session expiration, but directly close the connection.
# This is a workaround: we store the time that we disconnected from the server,
# if we have exceeded the session expiration time, we declare the session is expired
if last_conn_time is not None and last_conn_time + self.sessiontimeout * 2 < time():
self._logger.warning('Session expired detected from client time.')
# Session expired
self.session_state = ZooKeeperSessionStateChanged.EXPIRED
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.EXPIRED,
self,
session_id
)
)
if self.restart_session:
failed = 0
last_zxid = 0
session_id = 0
passwd = b'\x00' * 16
last_conn_time = None
continue
else:
break
else:
# Wait for a small amount of time to prevent a busy loop
await self._container.wait_with_timeout(min((failed - 5) * 0.1, 1.0))
failed += 1
else:
failed = 0
conn_resp, auth_resp = handshake_result
if conn_resp.timeOut <= 0:
# Session expired
# Currently should not happen because handshake() should raise an exception
self._logger.warning('Session expired detected from handshake packet')
self.session_state = ZooKeeperSessionStateChanged.EXPIRED
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.EXPIRED,
self,
session_id
)
)
if self.restart_session:
failed = 0
last_zxid = 0
last_conn_time = None
session_id = 0
passwd = b'\x00' * 16
continue
else:
break
else:
session_id = conn_resp.sessionId
passwd = conn_resp.passwd
# Authentication result check
auth_failed = any(a.err == zk.ZOO_ERR_AUTHFAILED for a in auth_resp)
if auth_failed:
self._logger.warning('ZooKeeper authentication failed for following auth: %r',
[a for a,r in zip(auth_list, auth_resp) if r.err == zk.ZOO_ERR_AUTHFAILED])
self.session_state = ZooKeeperSessionStateChanged.AUTHFAILED
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.AUTHFAILED,
self,
session_id
)
)
# Not retrying
break
else:
# Check other failures
failed_results = [[r,a] for r,a in zip(extra_requests, auth_resp) if a.err != zk.ZOO_ERR_OK]
if failed_results:
# What's wrong? log them and ignored
self._logger.warning('Some handshake packets have error result:\n%s',
json.dumps(dump(failed_results, tostr=True), indent=2))
self.session_readonly = getattr(conn_resp, 'readOnly', False)
self.session_id = session_id
if self.session_state == ZooKeeperSessionStateChanged.EXPIRED:
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.CREATED,
self,
session_id
)
)
else:
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.RECONNECTED,
self,
session_id
)
)
self.session_state = ZooKeeperSessionStateChanged.CREATED
if conn.connected:
conn_down = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.DOWN,
conn,
conn.connmark
)
auth_failed = ZooKeeperResponseEvent.createMatcher(zk.AUTH_XID,
conn,
conn.connmark,
_ismatch = lambda x: x.message.err == ZOO_ERR_AUTHFAILED)
while True:
rebalancetime = self.rebalancetime
if rebalancetime is not None:
rebalancetime += random() * 60
timeout, ev, m = await self._container.wait_with_timeout(rebalancetime, conn_down, auth_failed)
if timeout:
# Rebalance
if conn.zookeeper_requests:
# There are still requests not processed, wait longer
for _ in range(0, 3):
longer_time = random() * 10
timeout, ev, m = await self._container.wait_with_timeout(
longer_time, conn_down, auth_failed)
if not timeout:
# Connection is down, or auth failed
break
if not conn.zookeeper_requests:
break
else:
# There is still requests, skip for this time
continue
# Rebalance to a random server
if timeout:
self.nextptr = randrange(len(self.serverlist))
break
if m is auth_failed:
self._logger.warning('ZooKeeper authentication failed, shutdown the connection')
self.session_state = ZooKeeperSessionStateChanged.AUTHFAILED
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.AUTHFAILED,
self,
session_id
)
)
# Not retrying
break
else:
# Connection is down, try other servers
if not timeout:
self._logger.warning('Connection lost to %r, try next server', self.currentserver)
else:
self._logger.info('Rebalance to next server')
self._last_zxid = last_zxid = conn.zookeeper_lastzxid
self._last_watch_zxid = last_watch_zxid = conn.zookeeper_last_watch_zxid
last_conn_time = time()
self.session_state = ZooKeeperSessionStateChanged.DISCONNECTED
await self._container.wait_for_send(
ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.DISCONNECTED,
self,
session_id
)
)
finally:
conn.subroutine(conn.shutdown(True), False)
self.current_connection = None
finally:
self._shutdown = True
if self.session_state != ZooKeeperSessionStateChanged.EXPIRED and self.session_state != ZooKeeperSessionStateChanged.AUTHFAILED:
self.session_state = ZooKeeperSessionStateChanged.EXPIRED
self._container.scheduler.emergesend(ZooKeeperSessionStateChanged(
ZooKeeperSessionStateChanged.EXPIRED,
self,
session_id
))
def chroot_path(self, path):
return self.chroot + path
def unchroot_path(self, path):
return path[len(self.chroot):]
def _analyze(self, request):
if request.type in (zk.ZOO_EXISTS_OP, zk.ZOO_GETDATA_OP, zk.ZOO_GETACL_OP, zk.ZOO_GETCHILDREN_OP,
zk.ZOO_SYNC_OP, zk.ZOO_PING_OP, zk.ZOO_GETCHILDREN2_OP, zk.ZOO_SETAUTH_OP):
# These requests can be retried even if they are already sent
can_retry = True
else:
can_retry = False
watch_type = None
if request.type == zk.ZOO_MULTI_OP:
# chroot sub ops
for op in request.requests:
if hasattr(op, 'path'):
op.path = self.chroot_path(op.path)
else:
if hasattr(request, 'path'):
request.path = self.chroot_path(request.path)
if getattr(request, 'watch', False):
if request.type == zk.ZOO_GETDATA_OP:
watch_type = zk.CHANGED_EVENT_DEF
elif request.type == zk.ZOO_EXISTS_OP:
watch_type = zk.CREATED_EVENT_DEF
elif request.type == zk.ZOO_GETCHILDREN_OP or request.type == zk.ZOO_GETCHILDREN2_OP:
watch_type = zk.CHILD_EVENT_DEF
return (request, can_retry, watch_type)
async def watch_path(self, path, watch_type, container = None):
'''
Watch the specified path as specified type
'''
if watch_type == zk.CHANGED_EVENT_DEF:
watch_matchers = (ZooKeeperWatcherEvent.createMatcher(None, None, self.protocol, zk.CHANGED_EVENT_DEF, None, path),
ZooKeeperWatcherEvent.createMatcher(None, None, self.protocol, zk.DELETED_EVENT_DEF, None, path))
else:
watch_matchers = (ZooKeeperWatcherEvent.createMatcher(None, None, self.protocol, watch_type, None, path),)
# If the session expires, raise exception and exit
session_state = ZooKeeperSessionStateChanged.createMatcher(ZooKeeperSessionStateChanged.EXPIRED,
self,
self.session_id)
auth_failed = ZooKeeperSessionStateChanged.createMatcher(ZooKeeperSessionStateChanged.AUTHFAILED,
self,
self.session_id)
# If the watchers are restored, restore the matchers
restore_matcher = ZooKeeperRestoreWatches.createMatcher(self, self.session_id, True)
while True:
ev, m = await M_(session_state, auth_failed, restore_matcher, *watch_matchers)
if m is session_state or m is auth_failed:
raise ZooKeeperSessionUnavailable(ev.state)
elif m is restore_matcher:
ev.restore_watches[{zk.CHANGED_EVENT_DEF : 0,
zk.CREATED_EVENT_DEF : 1,
zk.CHILD_EVENT_DEF : 2}[watch_type]].add(path)
else:
watcher_event = ev.message
if watcher_event.path:
watcher_event.path = self.unchroot_path(watcher_event.path)
return watcher_event
async def requests(self, requests, container, timeout = None, session_lock = None, callback = None, priority = 0):
'''
similar to vlcp.protocol.zookeeper.ZooKeeper.requests, but:
1. Returns an extra item *watchers*, which is a list of objects corresponding to each request.
if the request has watch=True, the corresponding object is a RoutineFuture object;
if the request has watch=False or does not support watch, the corresponding object is None.
Use watcher.wait() to get the watch event. Use watcher.close() to discard the watcher.
2. If the connection is lost during requests, this method waits for reconnecting until timeout,
session expires or the response of a request which is not read-only is lost.
:param requests: sequence of request
:param container: container of current routine
:param timeout: if not None, wait only for specified time. Notice that it is not an exact limit,
it won't stop the execution unless the connection is lost
:param session_lock: if not None, only execute if the session_id == session_lock
:param callback: if not None, callback(request, response) is called immediately after any response is received
:return: (result, lost_responses, retry_requests, watchers) tuple, the first three are the
same as ZooKeeper.requests, the last item *watchers* is a list of RoutineFuture objects
'''
if self._shutdown:
raise ZooKeeperSessionUnavailable(self.session_state)
if session_lock is not None and self.session_id != session_lock:
raise ZooKeeperSessionUnavailable(ZooKeeperSessionStateChanged.EXPIRED)
start_time = time()
if timeout is not None:
end_time = start_time + timeout
def left_time():
if timeout is None:
return None
else:
return max(end_time - time(), 0)
def has_time_left():
t = left_time()
return t is None or t > 0
result = {}
lost_responses = []
analysis = dict((v[0], (v[1], v[2])) for v in (self._analyze(r) for r in requests))
retry_requests = list(requests)
watchers = {}
def requests_callback(request, response):
watch_type = analysis[request][1]
if watch_type is not None and (response.err == zk.ZOO_ERR_OK or \
(watch_type, response.err) in _should_add_watch):
watchers[request] = RoutineFuture(self.watch_path(request.path, watch_type, container), container)
if callback is not None:
callback(request, response)
def unchroot_response(resp):
if resp.zookeeper_request_type == zk.ZOO_MULTI_OP:
for r in resp.responses:
if hasattr(r, 'path'):
r.path = self.unchroot_path(r.path)
elif hasattr(resp, 'path'):
resp.path = self.unchroot_path(resp.path)
return resp
while has_time_left() and not lost_responses and retry_requests:
if self.session_state != ZooKeeperSessionStateChanged.CREATED:
async def wait_for_connect():
state_change = ZooKeeperSessionStateChanged.createMatcher(None, self)
while True:
ev = await state_change
if ev.state in (ZooKeeperSessionStateChanged.CREATED, ZooKeeperSessionStateChanged.RECONNECTED):
break
elif self._shutdown:
raise ZooKeeperSessionUnavailable(self.session_state)
elif session_lock is not None and (ev.sessionid != session_lock or \
ev.state == ZooKeeperSessionStateChanged.EXPIRED):
raise ZooKeeperSessionUnavailable(ZooKeeperSessionStateChanged.EXPIRED)
try:
timeout_, _ = await container.execute_with_timeout(left_time(), wait_for_connect())
except ZooKeeperSessionUnavailable:
if len(retry_requests) == len(requests):
raise
else:
break
if timeout_:
if len(retry_requests) == len(requests):
raise ZooKeeperSessionUnavailable(ZooKeeperSessionStateChanged.DISCONNECTED)
else:
break
# retry all the requests
new_result, new_lost, new_retry = await self.protocol.requests(self.current_connection, retry_requests, container,
requests_callback, priority=priority)
# Save the results
result.update((k,unchroot_response(v)) for k,v in zip(retry_requests, new_result) if v is not None)
if new_lost:
# Some responses are lost
for i in range(len(new_lost) - 1, -1, -1):
if not analysis[new_lost[i]][0]:
# This request can not be retried
break
else:
i = -1
new_retry = new_lost[i+1:] + new_retry
new_lost = new_lost[:i+1]
if new_lost:
# Some requests can not be retried, this is as far as we go
lost_responses = new_lost
retry_requests = new_retry
break
retry_requests = new_retry
return ([result.get(r, None) for r in requests],
lost_responses,
retry_requests,
[watchers.get(r, None) for r in requests])
def get_last_zxid(self):
'''
Return the latest zxid seen from servers
'''
if not self.current_connection:
return self._last_zxid
else:
return getattr(self.current_connection, 'zookeeper_lastzxid', self._last_zxid)
def get_last_watch_zxid(self):
'''
Return the latest zxid seen from servers
'''
if not self.current_connection:
return self._last_watch_zxid
else:
return getattr(self.current_connection, 'zookeeper_last_watch_zxid', self._last_watch_zxid)
|
|
# Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo.config import cfg
from oslo import messaging
from oslo.utils import excutils
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_gwmode_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.extensions import portbindings
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.ibm.common import config # noqa
from neutron.plugins.ibm.common import constants
from neutron.plugins.ibm.common import exceptions as sdnve_exc
from neutron.plugins.ibm import sdnve_api as sdnve
from neutron.plugins.ibm import sdnve_api_fake as sdnve_fake
LOG = logging.getLogger(__name__)
class SdnveRpcCallbacks(object):
def __init__(self, notifier):
self.notifier = notifier # used to notify the agent
def sdnve_info(self, rpc_context, **kwargs):
'''Update new information.'''
info = kwargs.get('info')
# Notify all other listening agents
self.notifier.info_update(rpc_context, info)
return info
class AgentNotifierApi(object):
'''Agent side of the SDN-VE rpc API.'''
def __init__(self, topic):
target = messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
self.topic_info_update = topics.get_topic_name(topic,
constants.INFO,
topics.UPDATE)
def info_update(self, context, info):
cctxt = self.client.prepare(topic=self.topic_info_update, fanout=True)
cctxt.cast(context, 'info_update', info=info)
def _ha(func):
'''Supports the high availability feature of the controller.'''
@functools.wraps(func)
def hawrapper(self, *args, **kwargs):
'''This wrapper sets the new controller if necessary
When a controller is detected to be not responding, and a
new controller is chosen to be used in its place, this decorator
makes sure the existing integration bridges are set to point
to the new controller by calling the set_controller method.
'''
ret_func = func(self, *args, **kwargs)
self.set_controller(args[0])
return ret_func
return hawrapper
class SdnvePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
portbindings_db.PortBindingMixin,
l3_gwmode_db.L3_NAT_db_mixin,
agents_db.AgentDbMixin,
):
'''
Implement the Neutron abstractions using SDN-VE SDN Controller.
'''
__native_bulk_support = False
__native_pagination_support = False
__native_sorting_support = False
supported_extension_aliases = ["binding", "router", "external-net",
"agent", "quotas"]
def __init__(self, configfile=None):
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.VIF_DETAILS: {portbindings.CAP_PORT_FILTER: False}}
super(SdnvePluginV2, self).__init__()
self.setup_rpc()
self.sdnve_controller_select()
if self.fake_controller:
self.sdnve_client = sdnve_fake.FakeClient()
else:
self.sdnve_client = sdnve.Client()
def sdnve_controller_select(self):
self.fake_controller = cfg.CONF.SDNVE.use_fake_controller
def setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.endpoints = [SdnveRpcCallbacks(self.notifier),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def _update_base_binding_dict(self, tenant_type):
if tenant_type == constants.TENANT_TYPE_OVERLAY:
self.base_binding_dict[
portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE
if tenant_type == constants.TENANT_TYPE_OF:
self.base_binding_dict[
portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS
def set_controller(self, context):
LOG.info(_LI("Set a new controller if needed."))
new_controller = self.sdnve_client.sdnve_get_controller()
if new_controller:
self.notifier.info_update(
context,
{'new_controller': new_controller})
LOG.info(_LI("Set the controller to a new controller: %s"),
new_controller)
def _process_request(self, request, current):
new_request = dict(
(k, v) for k, v in request.items()
if v != current.get(k))
msg = _("Original SDN-VE HTTP request: %(orig)s; New request: %(new)s")
LOG.debug(msg, {'orig': request, 'new': new_request})
return new_request
#
# Network
#
@_ha
def create_network(self, context, network):
LOG.debug("Create network in progress: %r", network)
session = context.session
tenant_id = self._get_tenant_id_for_create(context, network['network'])
# Create a new SDN-VE tenant if need be
sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant(
tenant_id)
if sdnve_tenant is None:
raise sdnve_exc.SdnveException(
msg=_('Create net failed: no SDN-VE tenant.'))
with session.begin(subtransactions=True):
net = super(SdnvePluginV2, self).create_network(context, network)
self._process_l3_create(context, net, network['network'])
# Create SDN-VE network
(res, data) = self.sdnve_client.sdnve_create('network', net)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_network(context, net['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create net failed in SDN-VE: %s') % res))
LOG.debug("Created network: %s", net['id'])
return net
@_ha
def update_network(self, context, id, network):
LOG.debug("Update network in progress: %r", network)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_network = super(SdnvePluginV2, self).get_network(
context, id)
processed_request['network'] = self._process_request(
network['network'], original_network)
net = super(SdnvePluginV2, self).update_network(
context, id, network)
self._process_l3_update(context, net, network['network'])
if processed_request['network']:
(res, data) = self.sdnve_client.sdnve_update(
'network', id, processed_request['network'])
if res not in constants.HTTP_ACCEPTABLE:
net = super(SdnvePluginV2, self).update_network(
context, id, {'network': original_network})
raise sdnve_exc.SdnveException(
msg=(_('Update net failed in SDN-VE: %s') % res))
return net
@_ha
def delete_network(self, context, id):
LOG.debug("Delete network in progress: %s", id)
session = context.session
with session.begin(subtransactions=True):
self._process_l3_delete(context, id)
super(SdnvePluginV2, self).delete_network(context, id)
(res, data) = self.sdnve_client.sdnve_delete('network', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(
_LE("Delete net failed after deleting the network in DB: %s"),
res)
@_ha
def get_network(self, context, id, fields=None):
LOG.debug("Get network in progress: %s", id)
return super(SdnvePluginV2, self).get_network(context, id, fields)
@_ha
def get_networks(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
LOG.debug("Get networks in progress")
return super(SdnvePluginV2, self).get_networks(
context, filters, fields, sorts, limit, marker, page_reverse)
#
# Port
#
@_ha
def create_port(self, context, port):
LOG.debug("Create port in progress: %r", port)
session = context.session
# Set port status as 'ACTIVE' to avoid needing the agent
port['port']['status'] = n_const.PORT_STATUS_ACTIVE
port_data = port['port']
with session.begin(subtransactions=True):
port = super(SdnvePluginV2, self).create_port(context, port)
if 'id' not in port:
return port
# If the tenant_id is set to '' by create_port, add the id to
# the request being sent to the controller as the controller
# requires a tenant id
tenant_id = port.get('tenant_id')
if not tenant_id:
LOG.debug("Create port does not have tenant id info")
original_network = super(SdnvePluginV2, self).get_network(
context, port['network_id'])
original_tenant_id = original_network['tenant_id']
port['tenant_id'] = original_tenant_id
LOG.debug(
"Create port does not have tenant id info; "
"obtained is: %s",
port['tenant_id'])
os_tenant_id = tenant_id
id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid(
os_tenant_id)
self._update_base_binding_dict(tenant_type)
self._process_portbindings_create_and_update(context,
port_data, port)
# NOTE(mb): Remove this block when controller is updated
# Remove the information that the controller does not accept
sdnve_port = port.copy()
sdnve_port.pop('device_id', None)
sdnve_port.pop('device_owner', None)
(res, data) = self.sdnve_client.sdnve_create('port', sdnve_port)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_port(context, port['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create port failed in SDN-VE: %s') % res))
LOG.debug("Created port: %s", port.get('id', 'id not found'))
return port
@_ha
def update_port(self, context, id, port):
LOG.debug("Update port in progress: %r", port)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_port = super(SdnvePluginV2, self).get_port(
context, id)
processed_request['port'] = self._process_request(
port['port'], original_port)
updated_port = super(SdnvePluginV2, self).update_port(
context, id, port)
os_tenant_id = updated_port['tenant_id']
id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid(
os_tenant_id)
self._update_base_binding_dict(tenant_type)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
if processed_request['port']:
(res, data) = self.sdnve_client.sdnve_update(
'port', id, processed_request['port'])
if res not in constants.HTTP_ACCEPTABLE:
updated_port = super(SdnvePluginV2, self).update_port(
context, id, {'port': original_port})
raise sdnve_exc.SdnveException(
msg=(_('Update port failed in SDN-VE: %s') % res))
return updated_port
@_ha
def delete_port(self, context, id, l3_port_check=True):
LOG.debug("Delete port in progress: %s", id)
# if needed, check to see if this is a port owned by
# an l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
self.disassociate_floatingips(context, id)
super(SdnvePluginV2, self).delete_port(context, id)
(res, data) = self.sdnve_client.sdnve_delete('port', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(
_LE("Delete port operation failed in SDN-VE "
"after deleting the port from DB: %s"), res)
#
# Subnet
#
@_ha
def create_subnet(self, context, subnet):
LOG.debug("Create subnet in progress: %r", subnet)
new_subnet = super(SdnvePluginV2, self).create_subnet(context, subnet)
# Note(mb): Use of null string currently required by controller
sdnve_subnet = new_subnet.copy()
if subnet.get('gateway_ip') is None:
sdnve_subnet['gateway_ip'] = 'null'
(res, data) = self.sdnve_client.sdnve_create('subnet', sdnve_subnet)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_subnet(context,
new_subnet['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create subnet failed in SDN-VE: %s') % res))
LOG.debug("Subnet created: %s", new_subnet['id'])
return new_subnet
@_ha
def update_subnet(self, context, id, subnet):
LOG.debug("Update subnet in progress: %r", subnet)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_subnet = super(SdnvePluginV2, self).get_subnet(
context, id)
processed_request['subnet'] = self._process_request(
subnet['subnet'], original_subnet)
updated_subnet = super(SdnvePluginV2, self).update_subnet(
context, id, subnet)
if processed_request['subnet']:
# Note(mb): Use of string containing null required by controller
if 'gateway_ip' in processed_request['subnet']:
if processed_request['subnet'].get('gateway_ip') is None:
processed_request['subnet']['gateway_ip'] = 'null'
(res, data) = self.sdnve_client.sdnve_update(
'subnet', id, processed_request['subnet'])
if res not in constants.HTTP_ACCEPTABLE:
for key in subnet['subnet'].keys():
subnet['subnet'][key] = original_subnet[key]
super(SdnvePluginV2, self).update_subnet(
context, id, subnet)
raise sdnve_exc.SdnveException(
msg=(_('Update subnet failed in SDN-VE: %s') % res))
return updated_subnet
@_ha
def delete_subnet(self, context, id):
LOG.debug("Delete subnet in progress: %s", id)
super(SdnvePluginV2, self).delete_subnet(context, id)
(res, data) = self.sdnve_client.sdnve_delete('subnet', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(_LE("Delete subnet operation failed in SDN-VE after "
"deleting the subnet from DB: %s"), res)
#
# Router
#
@_ha
def create_router(self, context, router):
LOG.debug("Create router in progress: %r", router)
if router['router']['admin_state_up'] is False:
LOG.warning(_LW('Ignoring admin_state_up=False for router=%r. '
'Overriding with True'), router)
router['router']['admin_state_up'] = True
tenant_id = self._get_tenant_id_for_create(context, router['router'])
# Create a new SDN-VE tenant if need be
sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant(
tenant_id)
if sdnve_tenant is None:
raise sdnve_exc.SdnveException(
msg=_('Create router failed: no SDN-VE tenant.'))
new_router = super(SdnvePluginV2, self).create_router(context, router)
# Create SDN-VE router
(res, data) = self.sdnve_client.sdnve_create('router', new_router)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_router(context, new_router['id'])
raise sdnve_exc.SdnveException(
msg=(_('Create router failed in SDN-VE: %s') % res))
LOG.debug("Router created: %r", new_router)
return new_router
@_ha
def update_router(self, context, id, router):
LOG.debug("Update router in progress: id=%(id)s "
"router=%(router)r",
{'id': id, 'router': router})
session = context.session
processed_request = {}
if not router['router'].get('admin_state_up', True):
raise n_exc.NotImplementedError(_('admin_state_up=False '
'routers are not '
'supported.'))
with session.begin(subtransactions=True):
original_router = super(SdnvePluginV2, self).get_router(
context, id)
processed_request['router'] = self._process_request(
router['router'], original_router)
updated_router = super(SdnvePluginV2, self).update_router(
context, id, router)
if processed_request['router']:
egw = processed_request['router'].get('external_gateway_info')
# Check for existing empty set (different from None) in request
if egw == {}:
processed_request['router'][
'external_gateway_info'] = {'network_id': 'null'}
(res, data) = self.sdnve_client.sdnve_update(
'router', id, processed_request['router'])
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).update_router(
context, id, {'router': original_router})
raise sdnve_exc.SdnveException(
msg=(_('Update router failed in SDN-VE: %s') % res))
return updated_router
@_ha
def delete_router(self, context, id):
LOG.debug("Delete router in progress: %s", id)
super(SdnvePluginV2, self).delete_router(context, id)
(res, data) = self.sdnve_client.sdnve_delete('router', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(
_LE("Delete router operation failed in SDN-VE after "
"deleting the router in DB: %s"), res)
@_ha
def add_router_interface(self, context, router_id, interface_info):
LOG.debug("Add router interface in progress: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r",
{'router_id': router_id, 'interface_info': interface_info})
new_interface = super(SdnvePluginV2, self).add_router_interface(
context, router_id, interface_info)
LOG.debug(
"SdnvePluginV2.add_router_interface called. Port info: %s",
new_interface)
request_info = interface_info.copy()
request_info['port_id'] = new_interface['port_id']
# Add the subnet_id to the request sent to the controller
if 'subnet_id' not in interface_info:
request_info['subnet_id'] = new_interface['subnet_id']
(res, data) = self.sdnve_client.sdnve_update(
'router', router_id + '/add_router_interface', request_info)
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).remove_router_interface(
context, router_id, interface_info)
raise sdnve_exc.SdnveException(
msg=(_('Update router-add-interface failed in SDN-VE: %s') %
res))
LOG.debug("Added router interface: %r", new_interface)
return new_interface
def _add_router_interface_only(self, context, router_id, interface_info):
LOG.debug("Add router interface only called: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r",
{'router_id': router_id, 'interface_info': interface_info})
port_id = interface_info.get('port_id')
if port_id:
(res, data) = self.sdnve_client.sdnve_update(
'router', router_id + '/add_router_interface', interface_info)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(_LE("SdnvePluginV2._add_router_interface_only: "
"failed to add the interface in the roll back."
" of a remove_router_interface operation"))
@_ha
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug("Remove router interface in progress: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r",
{'router_id': router_id, 'interface_info': interface_info})
subnet_id = interface_info.get('subnet_id')
port_id = interface_info.get('port_id')
if not subnet_id:
if not port_id:
raise sdnve_exc.BadInputException(msg=_('No port ID'))
myport = super(SdnvePluginV2, self).get_port(context, port_id)
LOG.debug("SdnvePluginV2.remove_router_interface port: %s",
myport)
myfixed_ips = myport.get('fixed_ips')
if not myfixed_ips:
raise sdnve_exc.BadInputException(msg=_('No fixed IP'))
subnet_id = myfixed_ips[0].get('subnet_id')
if subnet_id:
interface_info['subnet_id'] = subnet_id
LOG.debug(
"SdnvePluginV2.remove_router_interface subnet_id: %s",
subnet_id)
else:
if not port_id:
# The backend requires port id info in the request
subnet = super(SdnvePluginV2, self).get_subnet(context,
subnet_id)
df = {'device_id': [router_id],
'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF],
'network_id': [subnet['network_id']]}
ports = self.get_ports(context, filters=df)
if ports:
pid = ports[0]['id']
interface_info['port_id'] = pid
msg = ("SdnvePluginV2.remove_router_interface "
"subnet_id: %(sid)s port_id: %(pid)s")
LOG.debug(msg, {'sid': subnet_id, 'pid': pid})
(res, data) = self.sdnve_client.sdnve_update(
'router', router_id + '/remove_router_interface', interface_info)
if res not in constants.HTTP_ACCEPTABLE:
raise sdnve_exc.SdnveException(
msg=(_('Update router-remove-interface failed SDN-VE: %s') %
res))
session = context.session
with session.begin(subtransactions=True):
try:
info = super(SdnvePluginV2, self).remove_router_interface(
context, router_id, interface_info)
except Exception:
with excutils.save_and_reraise_exception():
self._add_router_interface_only(context,
router_id, interface_info)
return info
#
# Floating Ip
#
@_ha
def create_floatingip(self, context, floatingip):
LOG.debug("Create floatingip in progress: %r",
floatingip)
new_floatingip = super(SdnvePluginV2, self).create_floatingip(
context, floatingip)
(res, data) = self.sdnve_client.sdnve_create(
'floatingip', {'floatingip': new_floatingip})
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).delete_floatingip(
context, new_floatingip['id'])
raise sdnve_exc.SdnveException(
msg=(_('Creating floating ip operation failed '
'in SDN-VE controller: %s') % res))
LOG.debug("Created floatingip : %r", new_floatingip)
return new_floatingip
@_ha
def update_floatingip(self, context, id, floatingip):
LOG.debug("Update floatingip in progress: %r", floatingip)
session = context.session
processed_request = {}
with session.begin(subtransactions=True):
original_floatingip = super(
SdnvePluginV2, self).get_floatingip(context, id)
processed_request['floatingip'] = self._process_request(
floatingip['floatingip'], original_floatingip)
updated_floatingip = super(
SdnvePluginV2, self).update_floatingip(context, id, floatingip)
if processed_request['floatingip']:
(res, data) = self.sdnve_client.sdnve_update(
'floatingip', id,
{'floatingip': processed_request['floatingip']})
if res not in constants.HTTP_ACCEPTABLE:
super(SdnvePluginV2, self).update_floatingip(
context, id, {'floatingip': original_floatingip})
raise sdnve_exc.SdnveException(
msg=(_('Update floating ip failed in SDN-VE: %s') % res))
return updated_floatingip
@_ha
def delete_floatingip(self, context, id):
LOG.debug("Delete floatingip in progress: %s", id)
super(SdnvePluginV2, self).delete_floatingip(context, id)
(res, data) = self.sdnve_client.sdnve_delete('floatingip', id)
if res not in constants.HTTP_ACCEPTABLE:
LOG.error(_LE("Delete floatingip failed in SDN-VE: %s"), res)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.apps import apps
from django.core import checks
from django.core.checks import Error, Warning
from django.core.checks.registry import CheckRegistry
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import models
from django.test import SimpleTestCase
from django.test.utils import (
isolate_apps, override_settings, override_system_checks,
)
from django.utils.encoding import force_text
from django.utils.six import StringIO
from .models import SimpleModel
class DummyObj(object):
def __repr__(self):
return "obj"
class SystemCheckFrameworkTests(SimpleTestCase):
def test_register_and_run_checks(self):
def f(**kwargs):
calls[0] += 1
return [1, 2, 3]
def f2(**kwargs):
return [4, ]
def f3(**kwargs):
return [5, ]
calls = [0]
# test register as decorator
registry = CheckRegistry()
registry.register()(f)
registry.register("tag1", "tag2")(f2)
registry.register("tag2", deploy=True)(f3)
# test register as function
registry2 = CheckRegistry()
registry2.register(f)
registry2.register(f2, "tag1", "tag2")
registry2.register(f3, "tag2", deploy=True)
# check results
errors = registry.run_checks()
errors2 = registry2.run_checks()
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [1, 2, 3, 4])
self.assertEqual(calls[0], 2)
errors = registry.run_checks(tags=["tag1"])
errors2 = registry2.run_checks(tags=["tag1"])
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [4])
errors = registry.run_checks(tags=["tag1", "tag2"], include_deployment_checks=True)
errors2 = registry2.run_checks(tags=["tag1", "tag2"], include_deployment_checks=True)
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [4, 5])
class MessageTests(SimpleTestCase):
def test_printing(self):
e = Error("Message", hint="Hint", obj=DummyObj())
expected = "obj: Message\n\tHINT: Hint"
self.assertEqual(force_text(e), expected)
def test_printing_no_hint(self):
e = Error("Message", obj=DummyObj())
expected = "obj: Message"
self.assertEqual(force_text(e), expected)
def test_printing_no_object(self):
e = Error("Message", hint="Hint")
expected = "?: Message\n\tHINT: Hint"
self.assertEqual(force_text(e), expected)
def test_printing_with_given_id(self):
e = Error("Message", hint="Hint", obj=DummyObj(), id="ID")
expected = "obj: (ID) Message\n\tHINT: Hint"
self.assertEqual(force_text(e), expected)
def test_printing_field_error(self):
field = SimpleModel._meta.get_field('field')
e = Error("Error", obj=field)
expected = "check_framework.SimpleModel.field: Error"
self.assertEqual(force_text(e), expected)
def test_printing_model_error(self):
e = Error("Error", obj=SimpleModel)
expected = "check_framework.SimpleModel: Error"
self.assertEqual(force_text(e), expected)
def test_printing_manager_error(self):
manager = SimpleModel.manager
e = Error("Error", obj=manager)
expected = "check_framework.SimpleModel.manager: Error"
self.assertEqual(force_text(e), expected)
def simple_system_check(**kwargs):
simple_system_check.kwargs = kwargs
return []
def tagged_system_check(**kwargs):
tagged_system_check.kwargs = kwargs
return [checks.Warning('System Check')]
tagged_system_check.tags = ['simpletag']
def deployment_system_check(**kwargs):
deployment_system_check.kwargs = kwargs
return [checks.Warning('Deployment Check')]
deployment_system_check.tags = ['deploymenttag']
class CheckCommandTests(SimpleTestCase):
def setUp(self):
simple_system_check.kwargs = None
tagged_system_check.kwargs = None
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def tearDown(self):
sys.stdout, sys.stderr = self.old_stdout, self.old_stderr
@override_system_checks([simple_system_check, tagged_system_check])
def test_simple_call(self):
call_command('check')
self.assertEqual(simple_system_check.kwargs, {'app_configs': None})
self.assertEqual(tagged_system_check.kwargs, {'app_configs': None})
@override_system_checks([simple_system_check, tagged_system_check])
def test_given_app(self):
call_command('check', 'auth', 'admin')
auth_config = apps.get_app_config('auth')
admin_config = apps.get_app_config('admin')
self.assertEqual(simple_system_check.kwargs, {'app_configs': [auth_config, admin_config]})
self.assertEqual(tagged_system_check.kwargs, {'app_configs': [auth_config, admin_config]})
@override_system_checks([simple_system_check, tagged_system_check])
def test_given_tag(self):
call_command('check', tags=['simpletag'])
self.assertIsNone(simple_system_check.kwargs)
self.assertEqual(tagged_system_check.kwargs, {'app_configs': None})
@override_system_checks([simple_system_check, tagged_system_check])
def test_invalid_tag(self):
with self.assertRaises(CommandError):
call_command('check', tags=['missingtag'])
@override_system_checks([simple_system_check])
def test_list_tags_empty(self):
call_command('check', list_tags=True)
self.assertEqual('\n', sys.stdout.getvalue())
@override_system_checks([tagged_system_check])
def test_list_tags(self):
call_command('check', list_tags=True)
self.assertEqual('simpletag\n', sys.stdout.getvalue())
@override_system_checks([tagged_system_check], deployment_checks=[deployment_system_check])
def test_list_deployment_check_omitted(self):
call_command('check', list_tags=True)
self.assertEqual('simpletag\n', sys.stdout.getvalue())
@override_system_checks([tagged_system_check], deployment_checks=[deployment_system_check])
def test_list_deployment_check_included(self):
call_command('check', deploy=True, list_tags=True)
self.assertEqual('deploymenttag\nsimpletag\n', sys.stdout.getvalue())
@override_system_checks([tagged_system_check], deployment_checks=[deployment_system_check])
def test_tags_deployment_check_omitted(self):
msg = 'There is no system check with the "deploymenttag" tag.'
with self.assertRaisesMessage(CommandError, msg):
call_command('check', tags=['deploymenttag'])
@override_system_checks([tagged_system_check], deployment_checks=[deployment_system_check])
def test_tags_deployment_check_included(self):
call_command('check', deploy=True, tags=['deploymenttag'])
self.assertIn('Deployment Check', sys.stderr.getvalue())
@override_system_checks([tagged_system_check])
def test_fail_level(self):
with self.assertRaises(CommandError):
call_command('check', fail_level='WARNING')
def custom_error_system_check(app_configs, **kwargs):
return [Error('Error', id='myerrorcheck.E001')]
def custom_warning_system_check(app_configs, **kwargs):
return [Warning('Warning', id='mywarningcheck.E001')]
class SilencingCheckTests(SimpleTestCase):
def setUp(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.stdout, self.stderr = StringIO(), StringIO()
sys.stdout, sys.stderr = self.stdout, self.stderr
def tearDown(self):
sys.stdout, sys.stderr = self.old_stdout, self.old_stderr
@override_settings(SILENCED_SYSTEM_CHECKS=['myerrorcheck.E001'])
@override_system_checks([custom_error_system_check])
def test_silenced_error(self):
out = StringIO()
err = StringIO()
call_command('check', stdout=out, stderr=err)
self.assertEqual(out.getvalue(), 'System check identified no issues (1 silenced).\n')
self.assertEqual(err.getvalue(), '')
@override_settings(SILENCED_SYSTEM_CHECKS=['mywarningcheck.E001'])
@override_system_checks([custom_warning_system_check])
def test_silenced_warning(self):
out = StringIO()
err = StringIO()
call_command('check', stdout=out, stderr=err)
self.assertEqual(out.getvalue(), 'System check identified no issues (1 silenced).\n')
self.assertEqual(err.getvalue(), '')
class CheckFrameworkReservedNamesTests(SimpleTestCase):
@isolate_apps('check_framework', kwarg_name='apps')
@override_system_checks([checks.model_checks.check_all_models])
def test_model_check_method_not_shadowed(self, apps):
class ModelWithAttributeCalledCheck(models.Model):
check = 42
class ModelWithFieldCalledCheck(models.Model):
check = models.IntegerField()
class ModelWithRelatedManagerCalledCheck(models.Model):
pass
class ModelWithDescriptorCalledCheck(models.Model):
check = models.ForeignKey(ModelWithRelatedManagerCalledCheck, models.CASCADE)
article = models.ForeignKey(
ModelWithRelatedManagerCalledCheck,
models.CASCADE,
related_name='check',
)
errors = checks.run_checks(app_configs=apps.get_app_configs())
expected = [
Error(
"The 'ModelWithAttributeCalledCheck.check()' class method is "
"currently overridden by 42.",
obj=ModelWithAttributeCalledCheck,
id='models.E020'
),
Error(
"The 'ModelWithRelatedManagerCalledCheck.check()' class method is "
"currently overridden by %r." % ModelWithRelatedManagerCalledCheck.check,
obj=ModelWithRelatedManagerCalledCheck,
id='models.E020'
),
Error(
"The 'ModelWithDescriptorCalledCheck.check()' class method is "
"currently overridden by %r." % ModelWithDescriptorCalledCheck.check,
obj=ModelWithDescriptorCalledCheck,
id='models.E020'
),
]
self.assertEqual(errors, expected)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'LogIdenticalProduct.basemodel_ptr'
db.delete_column(u'catalog_logidenticalproduct', u'basemodel_ptr_id')
# Adding field 'LogIdenticalProduct.id'
db.execute('ALTER TABLE "catalog_logidenticalproduct" ADD COLUMN "id" SERIAL NOT NULL PRIMARY KEY')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'LogIdenticalProduct.basemodel_ptr'
raise RuntimeError("Cannot reverse this migration. 'LogIdenticalProduct.basemodel_ptr' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'LogIdenticalProduct.basemodel_ptr'
db.add_column(u'catalog_logidenticalproduct', u'basemodel_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalog.BaseModel'], unique=True, primary_key=True),
keep_default=False)
# Deleting field 'LogIdenticalProduct.id'
db.delete_column(u'catalog_logidenticalproduct', u'id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'catalog.abstractlike': {
'Meta': {'object_name': 'AbstractLike', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'liked_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.abstracttop': {
'Meta': {'object_name': 'AbstractTop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'recorded_time': ('django.db.models.fields.DateTimeField', [], {})
},
'catalog.basemodel': {
'Meta': {'object_name': 'BaseModel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'})
},
'catalog.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"})
},
'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproducttutorial': {
'Meta': {'object_name': 'LikeProductTutorial', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.list': {
'Meta': {'object_name': 'List', '_ormbases': ['catalog.BaseModel']},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.location': {
'Meta': {'object_name': 'Location', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.note': {
'Meta': {'object_name': 'Note', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog']
|
|
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved.
import pytest
from django.core.exceptions import ValidationError
from awx.main.utils import decrypt_field
from awx.main.models import Credential, CredentialType
from rest_framework import serializers
EXAMPLE_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nxyz==\n-----END PRIVATE KEY-----'
EXAMPLE_ENCRYPTED_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nxyz==\n-----END PRIVATE KEY-----'
PKCS8_PRIVATE_KEY = '''-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD0uyqyUHELQ25B
8lNBu/ZfVx8fPFT6jvAUscxfWLqsZCJrR8BWadXMa/0ALMaUuZbZ8Ug27jztOSO8
w8hJ6dqHaQ2gfbwsfbF6XHaetap0OoAFtnaiULSvljOkoWG+WSyfvJ73ZwEP3KzW
0JbNX24zGFdTFzX1W+8BbLpEIw3XiP9iYPtu0uit6VradMrt2Kdu+VKlQzbG1+89
g70IyFkvopynnWAkA+YXNo08dxOzmci7/G0Cp1Lwh4IAH++HbE2E4odWm5zoCaT7
gcZzKuZs/kkDHaS9O5VjsWGrZ+mp3NgeABbFRP0jDhCtS8QRa94RC6mobtnYoRd7
C1Iz3cdjAgMBAAECggEAb5p9BZUegBrviH5YDmWHnIHP7QAn5p1RibZtM1v0wRHn
ClJNuXqJJ7BlT3Ob2Y3q55ebLYWmXi4NCJOl3mMZJ2A2eSZtrkJhsaHB7G1+/oMB
B9nmLu4r/9i4005PEy16ZpvvSHZ+KvwhC9NSufRXflCO3hL7JdmXXGh3ZwQvV0a7
mP1RIQKIcLynPBTbTH1w30Znj2M4bSjUlsLbOYhwg2YQxa1qKuCtata5qdAVbgny
JYPruBhcHLPGvC0FBcd8zoYWLvQ52hcXNxrl0iN1KY7zIEYmU+3gbuBIoVl2Qo/p
zmH01bo9h9p5DdkjQ6MdjvrOX8aT93S1g9y8WqtoXQKBgQD7E2+RZ/XNIFts9cqG
2S7aywIydkgEmaOJl1fzebutJPPQXJDpQZtEenr+CG7KsRPf8nJ3jc/4OHIsnHYD
WBgXLQz0QWEgXwTRicXsxsARzHKV2Lb8IsXK5vfia+i9fxZV3WwkKVXOmTJHcVl1
XD5zfbAlrQ4r+Uo618zgpchsBQKBgQD5h+A+PX+3PdUPNkHdCltMwaSsXjBcYYoF
uZGR4v8jRQguGD5h8Eyk/cS3VVryYRKiYJCvaPFXTzN6GAsQoSnMW+37GKsbL+oK
5JYoSiCY6BpaJO3uo/UwvitV8EjHdaArb5oBjx1yiobRqhVJ+iH1PKxgnQFI5RgO
4AhnnYMqRwKBgQDUX+VQXlp5LzSGXwX3uH+8jFmIa6qRUZAWU1EO3tqUI5ykk5fz
5g27B8s/U8y7YLuKA581Z1wR/1T8TUA5peuCtxWtChxo8Fa4E0y68ocGxyPpgk2N
yq/56BKnkFVm7Lfs24WctOYjAkyYR9W+ws8Ei71SsSY6pfxW97ESGMkGLQKBgAlW
ABnUCzc75QDQst4mSQwyIosgawbJz3QvYTboG0uihY/T8GGRsAxsQjPpyaFP6HaS
zlcBwiXWHMLwq1lP7lRrDBhc7+nwfP0zWDrhqx6NcI722sAW+lF8i/qHJvHvgLKf
Vk/AnwVuEWU+y9UcurCGOJzUwvuLNr83upjF1+Z5AoGAP91XiBCorJLRJaryi6zt
iCjRxoVsrN6NvAh+MQ1yfAopO4RhxEXM/uUOBkulNhlnp+evSxUwDnFNOWzsZVn9
B6yXdJ9BTWXFX7YhEkosRZCXnNWX4Dz+DGU/yvSHQR/JYj8mRav98TmJU6lK6Vw/
YukmWPxNB+x4Ym3RNPrLpU4=
-----END PRIVATE KEY-----'''
PKCS8_ENCRYPTED_PRIVATE_KEY = '''-----BEGIN ENCRYPTED PRIVATE KEY-----
MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQIC4E/DX+33rACAggA
MB0GCWCGSAFlAwQBAgQQbeAsQdsEKoztekP5JXmHFASCBNAmNAMGSnycmN4sYleT
NS9r/ph9v58dv0/hzbE6TCt/i6nmA/D8mtuYB8gm30E/DOuN/dnL3z2gpyvr478P
FjoRnueuwMdLcfEpzEXotJdc7vmUsSjTFq99oh84JHdCfWSRtxkDu64dwp3GPC9+
f1qqg6o4/bPkjni+bCMgq9vgr4K+vuaKzaJqUTEQFuT3CirDGoWGpfRDtDoBmlg8
8esEXoA6RD2DNv6fQrOu9Q4Fc0YkzcoIfY6EJxu+f75LF/NUVpmeJ8QDjj6VFVuX
35ChPYolhBSC/MHBHAVVrn17FAdpLkiz7hIR7KBIg2nuu8oUnPMzDff/CeehYzNb
OH12P9zaHZa3DZHuu27oI6yUdgs8HYNLtBzXH/DbyAeW9alg1Ofber5DO62ieL3E
LqBd4R7qqDSTQmiA6B8LkVIrFrIOqn+nWoM9gHhIrTI409A/oTbpen87sZ4MIQk4
Vjw/A/D5OYhnjOEVgMXrNpKzFfRJPdKh8LYjAaytsLKZk/NOWKpBOcIPhBG/agmx
CX2NE2tpwNo+uWSOG6qTqc8xiQFDsQmbz9YEuux13J3Hg5gVMOJQNMvYpxgFD156
Z82QBMdrY1tRIA91kW97UDj6OEAyz8HnmL+rCiRLGJXKUnZsSET+VHs9+uhBggX8
GxliP35pYlmdejqGWHjiYlGF2+WKrd5axx/m1DcfZdXSaF1IdLKafnNXzUZbOnOM
7RbKHDhBKr/vkBV1SGYgDLNn4hflFzhdI65AKxO2KankzaWxF09/0kRZlmxm+tZX
8r0fHe9IO1KQR/52Kfg1vAQdt2KiyAziw5+tcqQT28knSDboNKpD2Du8BAoH9xG7
0Ca57oBHh/VGzM/niJBjI4EMOPZKuRJsxZF7wOOO6NTh/XFf3LpzsR1y3qoXN4cR
n+/jLUO/3kSGsqso6DT9C0o1pTrnORaJb4aF05jljFx9LYiQUOoLujp8cVW7XxQB
pTgJEFxTN5YA//cwYu3GOJ1AggSeF/WkHCDfCTpTfnO/WTZ0oc+nNyC1lBVfcZ67
GCH8COsfmhusrYiJUN6vYZIr4MfylVg53PUKYbLKYad9bIIaYYuu3MP4CtKDWHvk
8q+GzpjVUCPwjjsea56RMav+xDPvmgIayDptae26Fv+mRPcwqORYMFNtVRG6DUXo
+lrWlaDlkfyfZlQ6sK5c1cJNI8pSPocP/c9TBhP+xFROiWxvMOxhM7DmDl8rhAxU
ttZSukCg7n38AFsUqg5eLLq9sT+P6VmX8d3YflPBIkvNgK7nKUTwgrpbuADo07b0
sVlAY/9SmtHvOCibxphvPYUOhwWo97PzzAsdVGz/xRvH8mzI/Iftbc1U2C2La8FJ
xjaAFwWK/CjQSwnCB8raWo9FUavV6xdb2K0G4VBVDvZO9EJBzX0m6EqQx3XMZf1s
crP0Dp9Ee66vVOlj+XnyyTkUADSYHr8/42Aohv96fJEMjy5gbBl4QQm2QKzAkq9n
lrHvQpCxPixUUAEI0ZL1Y74hcMecnfbpGibrUvSp+cyDCOG92KKxLXEgVYCbXHZu
bOlOanZF3vC6I9dUC2d8I5B87b2K+y57OkWpmS3zxCEpsBqQmn8Te50DnlkPJPBj
GLqbpJyX2r3p/Rmo6mLY71SqpA==
-----END ENCRYPTED PRIVATE KEY-----'''
@pytest.mark.django_db
def test_default_cred_types():
assert sorted(CredentialType.defaults.keys()) == [
'aim',
'aws',
'azure_kv',
'azure_rm',
'cloudforms',
'conjur',
'gce',
'github_token',
'gitlab_token',
'hashivault_kv',
'hashivault_ssh',
'insights',
'kubernetes_bearer_token',
'net',
'openstack',
'rhv',
'satellite6',
'scm',
'ssh',
'tower',
'vault',
'vmware',
]
for type_ in CredentialType.defaults.values():
assert type_().managed_by_tower is True
@pytest.mark.django_db
def test_credential_creation(organization_factory):
org = organization_factory('test').organization
type_ = CredentialType(
kind='cloud',
name='SomeCloud',
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': 'Username for SomeCloud',
'type': 'string'
}]
}
)
type_.save()
cred = Credential(credential_type=type_, name="Bob's Credential",
inputs={'username': 'bob'}, organization=org)
cred.save()
cred.full_clean()
assert isinstance(cred, Credential)
assert cred.name == "Bob's Credential"
assert cred.inputs['username'] == 'bob'
@pytest.mark.django_db
@pytest.mark.parametrize('kind', ['ssh', 'net', 'scm'])
@pytest.mark.parametrize('ssh_key_data, ssh_key_unlock, valid', [
[EXAMPLE_PRIVATE_KEY, None, True], # unencrypted key, no unlock pass
[EXAMPLE_PRIVATE_KEY, 'super-secret', False], # unencrypted key, unlock pass
[EXAMPLE_ENCRYPTED_PRIVATE_KEY, 'super-secret', True], # encrypted key, unlock pass
[EXAMPLE_ENCRYPTED_PRIVATE_KEY, None, False], # encrypted key, no unlock pass
[PKCS8_ENCRYPTED_PRIVATE_KEY, 'passme', True], # encrypted PKCS8 key, unlock pass
[PKCS8_ENCRYPTED_PRIVATE_KEY, None, False], # encrypted PKCS8 key, no unlock pass
[PKCS8_PRIVATE_KEY, None, True], # unencrypted PKCS8 key, no unlock pass
[PKCS8_PRIVATE_KEY, 'passme', False], # unencrypted PKCS8 key, unlock pass
[None, None, True], # no key, no unlock pass
['INVALID-KEY-DATA', None, False], # invalid key data
[EXAMPLE_PRIVATE_KEY.replace('=', '\u003d'), None, True], # automatically fix JSON-encoded GCE keys
])
def test_ssh_key_data_validation(organization, kind, ssh_key_data, ssh_key_unlock, valid):
inputs = {'username': 'joe-user'}
if ssh_key_data:
inputs['ssh_key_data'] = ssh_key_data
if ssh_key_unlock:
inputs['ssh_key_unlock'] = ssh_key_unlock
cred_type = CredentialType.defaults[kind]()
cred_type.save()
cred = Credential(
credential_type=cred_type,
name="Best credential ever",
inputs=inputs,
organization=organization
)
cred.save()
if valid:
cred.full_clean()
else:
with pytest.raises(Exception) as e:
cred.full_clean()
assert e.type in (ValidationError, serializers.ValidationError)
@pytest.mark.django_db
@pytest.mark.parametrize('inputs, valid', [
({'vault_password': 'some-pass'}, True),
({}, True),
({'vault_password': 'dev-pass', 'vault_id': 'dev'}, True),
({'vault_password': 'dev-pass', 'vault_id': 'dev@prompt'}, False), # @ not allowed
])
def test_vault_validation(organization, inputs, valid):
cred_type = CredentialType.defaults['vault']()
cred_type.save()
cred = Credential(
credential_type=cred_type,
name="Best credential ever",
inputs=inputs,
organization=organization
)
cred.save()
if valid:
cred.full_clean()
else:
with pytest.raises(Exception) as e:
cred.full_clean()
assert e.type in (ValidationError, serializers.ValidationError)
@pytest.mark.django_db
@pytest.mark.parametrize('become_method, valid', [
('', True),
('sudo', True),
('custom-plugin', True),
])
def test_choices_validity(become_method, valid, organization):
inputs = {'become_method': become_method}
cred_type = CredentialType.defaults['ssh']()
cred_type.save()
cred = Credential(
credential_type=cred_type,
name="Best credential ever",
inputs=inputs,
organization=organization
)
cred.save()
if valid:
cred.full_clean()
else:
with pytest.raises(serializers.ValidationError) as e:
cred.full_clean()
assert "'%s' is not one of" % become_method in str(e)
@pytest.mark.django_db
def test_credential_encryption(organization_factory, credentialtype_ssh):
org = organization_factory('test').organization
cred = Credential(
credential_type=credentialtype_ssh,
name="Bob's Credential",
inputs={'password': 'testing123'},
organization=org
)
cred.save()
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
assert cred.inputs['password'].startswith('$encrypted$')
assert decrypt_field(cred, 'password') == 'testing123'
@pytest.mark.django_db
def test_credential_encryption_with_ask(organization_factory, credentialtype_ssh):
org = organization_factory('test').organization
cred = Credential(
credential_type=credentialtype_ssh,
name="Bob's Credential",
inputs={'password': 'ASK'},
organization=org
)
cred.save()
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
assert cred.inputs['password'] == 'ASK'
@pytest.mark.django_db
def test_credential_with_multiple_secrets(organization_factory, credentialtype_ssh):
org = organization_factory('test').organization
cred = Credential(
credential_type=credentialtype_ssh,
name="Bob's Credential",
inputs={'ssh_key_data': 'SOMEKEY', 'ssh_key_unlock': 'testing123'},
organization=org
)
cred.save()
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
assert cred.inputs['ssh_key_data'].startswith('$encrypted$')
assert decrypt_field(cred, 'ssh_key_data') == 'SOMEKEY'
assert cred.inputs['ssh_key_unlock'].startswith('$encrypted$')
assert decrypt_field(cred, 'ssh_key_unlock') == 'testing123'
@pytest.mark.django_db
def test_credential_update(organization_factory, credentialtype_ssh):
org = organization_factory('test').organization
cred = Credential(
credential_type=credentialtype_ssh,
name="Bob's Credential",
inputs={'password': 'testing123'},
organization=org
)
cred.save()
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
cred.inputs['password'] = 'newpassword'
cred.save()
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
assert cred.inputs['password'].startswith('$encrypted$')
assert decrypt_field(cred, 'password') == 'newpassword'
@pytest.mark.django_db
def test_credential_update_with_prior(organization_factory, credentialtype_ssh):
org = organization_factory('test').organization
cred = Credential(
credential_type=credentialtype_ssh,
name="Bob's Credential",
inputs={'password': 'testing123'},
organization=org
)
cred.save()
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
cred.inputs['username'] = 'joe'
cred.inputs['password'] = '$encrypted$'
cred.save()
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
assert cred.inputs['username'] == 'joe'
assert cred.inputs['password'].startswith('$encrypted$')
assert decrypt_field(cred, 'password') == 'testing123'
@pytest.mark.django_db
def test_credential_get_input(organization_factory):
organization = organization_factory('test').organization
type_ = CredentialType(
kind='vault',
name='somevault',
managed_by_tower=True,
inputs={
'fields': [{
'id': 'vault_password',
'type': 'string',
'secret': True,
}, {
'id': 'vault_id',
'type': 'string',
'secret': False
}, {
'id': 'secret',
'type': 'string',
'secret': True,
}]
}
)
type_.save()
cred = Credential(
organization=organization,
credential_type=type_,
name="Bob's Credential",
inputs={'vault_password': 'testing321'}
)
cred.save()
cred.full_clean()
assert isinstance(cred, Credential)
# verify expected exception is raised when attempting to access an unset
# input without providing a default
with pytest.raises(AttributeError):
cred.get_input('vault_id')
# verify that the provided default is used for unset inputs
assert cred.get_input('vault_id', default='foo') == 'foo'
# verify expected exception is raised when attempting to access an undefined
# input without providing a default
with pytest.raises(AttributeError):
cred.get_input('field_not_on_credential_type')
# verify that the provided default is used for undefined inputs
assert cred.get_input('field_not_on_credential_type', default='bar') == 'bar'
# verify expected exception is raised when attempting to access an unset secret
# input without providing a default
with pytest.raises(AttributeError):
cred.get_input('secret')
# verify that the provided default is used for undefined inputs
assert cred.get_input('secret', default='fiz') == 'fiz'
# verify return values for encrypted secret fields are decrypted
assert cred.inputs['vault_password'].startswith('$encrypted$')
assert cred.get_input('vault_password') == 'testing321'
|
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import netaddr
from neutron_lib import constants as n_consts
from neutron.agent import firewall
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.common import utils
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
CT_STATES = [
ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY,
ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED]
FLOW_FIELD_FOR_IPVER_AND_DIRECTION = {
(n_consts.IP_VERSION_4, firewall.EGRESS_DIRECTION): 'nw_dst',
(n_consts.IP_VERSION_6, firewall.EGRESS_DIRECTION): 'ipv6_dst',
(n_consts.IP_VERSION_4, firewall.INGRESS_DIRECTION): 'nw_src',
(n_consts.IP_VERSION_6, firewall.INGRESS_DIRECTION): 'ipv6_src',
}
FORBIDDEN_PREFIXES = (n_consts.IPv4_ANY, n_consts.IPv6_ANY)
def is_valid_prefix(ip_prefix):
# IPv6 have multiple ways how to describe ::/0 network, converting to
# IPNetwork and back to string unifies it
return (ip_prefix and
str(netaddr.IPNetwork(ip_prefix)) not in FORBIDDEN_PREFIXES)
def _assert_mergeable_rules(rule_conj_list):
"""Assert a given (rule, conj_ids) list has mergeable rules.
The given rules must be the same except for port_range_{min,max}
differences.
"""
rule_tmpl = rule_conj_list[0][0].copy()
rule_tmpl.pop('port_range_min', None)
rule_tmpl.pop('port_range_max', None)
for rule, conj_id in rule_conj_list[1:]:
rule1 = rule.copy()
rule1.pop('port_range_min', None)
rule1.pop('port_range_max', None)
if rule_tmpl != rule1:
raise RuntimeError(
"Incompatible SG rules detected: %(rule1)s and %(rule2)s. "
"They cannot be merged. This should not happen." %
{'rule1': rule_tmpl, 'rule2': rule})
def merge_common_rules(rule_conj_list):
"""Take a list of (rule, conj_id) and merge elements with the same rules.
Return a list of (rule, conj_id_list).
"""
if len(rule_conj_list) == 1:
rule, conj_id = rule_conj_list[0]
return [(rule, [conj_id])]
_assert_mergeable_rules(rule_conj_list)
rule_conj_map = collections.defaultdict(list)
for rule, conj_id in rule_conj_list:
rule_conj_map[(rule.get('port_range_min'),
rule.get('port_range_max'))].append(conj_id)
result = []
rule_tmpl = rule_conj_list[0][0]
rule_tmpl.pop('port_range_min', None)
rule_tmpl.pop('port_range_max', None)
for (port_min, port_max), conj_ids in rule_conj_map.items():
rule = rule_tmpl.copy()
if port_min is not None:
rule['port_range_min'] = port_min
if port_max is not None:
rule['port_range_max'] = port_max
result.append((rule, conj_ids))
return result
def _merge_port_ranges_helper(port_range_item):
# Sort with 'port' but 'min' things must come first.
port, m, dummy = port_range_item
return port * 2 + (0 if m == 'min' else 1)
def merge_port_ranges(rule_conj_list):
"""Take a list of (rule, conj_id) and transform into a list
whose rules don't overlap. Return a list of (rule, conj_id_list).
"""
if len(rule_conj_list) == 1:
rule, conj_id = rule_conj_list[0]
return [(rule, [conj_id])]
_assert_mergeable_rules(rule_conj_list)
port_ranges = []
for rule, conj_id in rule_conj_list:
port_ranges.append((rule.get('port_range_min', 1), 'min', conj_id))
port_ranges.append((rule.get('port_range_max', 65535), 'max', conj_id))
port_ranges.sort(key=_merge_port_ranges_helper)
# The idea here is to scan the port_ranges list in an ascending order,
# keeping active conjunction IDs and range in cur_conj and cur_range_min.
# A 'min' port_ranges item means an addition to cur_conj, while a 'max'
# item means a removal.
result = []
rule_tmpl = rule_conj_list[0][0]
cur_conj = set()
cur_range_min = None
for port, m, conj_id in port_ranges:
if m == 'min':
if cur_conj and cur_range_min != port:
rule = rule_tmpl.copy()
rule['port_range_min'] = cur_range_min
rule['port_range_max'] = port - 1
result.append((rule, list(cur_conj)))
cur_range_min = port
cur_conj.add(conj_id)
else:
if cur_range_min <= port:
rule = rule_tmpl.copy()
rule['port_range_min'] = cur_range_min
rule['port_range_max'] = port
result.append((rule, list(cur_conj)))
# The next port range without 'port' starts from (port + 1)
cur_range_min = port + 1
cur_conj.remove(conj_id)
if (len(result) == 1 and result[0][0]['port_range_min'] == 1 and
result[0][0]['port_range_max'] == 65535):
del result[0][0]['port_range_min']
del result[0][0]['port_range_max']
return result
def flow_priority_offset(rule, conjunction=False):
"""Calculate flow priority offset from rule.
Whether the rule belongs to conjunction flows or not is decided
upon existence of rule['remote_group_id'] but can be overridden
to be True using the optional conjunction arg.
"""
conj_offset = 0 if 'remote_group_id' in rule or conjunction else 4
protocol = rule.get('protocol')
if protocol is None:
return conj_offset
if protocol in [n_consts.PROTO_NUM_ICMP, n_consts.PROTO_NUM_IPV6_ICMP]:
if 'port_range_min' not in rule:
return conj_offset + 1
elif 'port_range_max' not in rule:
return conj_offset + 2
return conj_offset + 3
def create_flows_from_rule_and_port(rule, port, conjunction=False):
"""Create flows from given args.
For description of the optional conjunction arg, see flow_priority_offset.
"""
ethertype = rule['ethertype']
direction = rule['direction']
dst_ip_prefix = rule.get('dest_ip_prefix')
src_ip_prefix = rule.get('source_ip_prefix')
flow_template = {
'priority': 70 + flow_priority_offset(rule, conjunction),
'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype],
'reg_port': port.ofport,
}
if is_valid_prefix(dst_ip_prefix):
flow_template[FLOW_FIELD_FOR_IPVER_AND_DIRECTION[(
utils.get_ip_version(dst_ip_prefix), firewall.EGRESS_DIRECTION)]
] = dst_ip_prefix
if is_valid_prefix(src_ip_prefix):
flow_template[FLOW_FIELD_FOR_IPVER_AND_DIRECTION[(
utils.get_ip_version(src_ip_prefix), firewall.INGRESS_DIRECTION)]
] = src_ip_prefix
flows = create_protocol_flows(direction, flow_template, port, rule)
return flows
def populate_flow_common(direction, flow_template, port):
"""Initialize common flow fields."""
if direction == firewall.INGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE
flow_template['actions'] = "output:{:d}".format(port.ofport)
elif direction == firewall.EGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE
# Traffic can be both ingress and egress, check that no ingress rules
# should be applied
flow_template['actions'] = 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE)
return flow_template
def create_protocol_flows(direction, flow_template, port, rule):
flow_template = populate_flow_common(direction,
flow_template.copy(),
port)
protocol = rule.get('protocol')
if protocol is not None:
flow_template['nw_proto'] = protocol
if protocol in [n_consts.PROTO_NUM_ICMP, n_consts.PROTO_NUM_IPV6_ICMP]:
flows = create_icmp_flows(flow_template, rule)
else:
flows = create_port_range_flows(flow_template, rule)
return flows or [flow_template]
def create_port_range_flows(flow_template, rule):
protocol = ovsfw_consts.REVERSE_IP_PROTOCOL_MAP_WITH_PORTS.get(
rule.get('protocol'))
if protocol is None:
return []
flows = []
src_port_match = '{:s}_src'.format(protocol)
src_port_min = rule.get('source_port_range_min')
src_port_max = rule.get('source_port_range_max')
dst_port_match = '{:s}_dst'.format(protocol)
dst_port_min = rule.get('port_range_min')
dst_port_max = rule.get('port_range_max')
dst_port_range = []
if dst_port_min and dst_port_max:
dst_port_range = utils.port_rule_masking(dst_port_min, dst_port_max)
src_port_range = []
if src_port_min and src_port_max:
src_port_range = utils.port_rule_masking(src_port_min, src_port_max)
for port in src_port_range:
flow = flow_template.copy()
flow[src_port_match] = port
if dst_port_range:
for port in dst_port_range:
dst_flow = flow.copy()
dst_flow[dst_port_match] = port
flows.append(dst_flow)
else:
flows.append(flow)
else:
for port in dst_port_range:
flow = flow_template.copy()
flow[dst_port_match] = port
flows.append(flow)
return flows
def create_icmp_flows(flow_template, rule):
icmp_type = rule.get('port_range_min')
if icmp_type is None:
return
flow = flow_template.copy()
flow['icmp_type'] = icmp_type
icmp_code = rule.get('port_range_max')
if icmp_code is not None:
flow['icmp_code'] = icmp_code
return [flow]
def _flow_priority_offset_from_conj_id(conj_id):
"Return a flow priority offset encoded in a conj_id."
# A base conj_id, which is returned by ConjIdMap.get_conj_id, is a
# multiple of 8, and we use 2 conj_ids per offset.
return conj_id % 8 // 2
def create_flows_for_ip_address(ip_address, direction, ethertype,
vlan_tag, conj_ids):
"""Create flows from a rule and an ip_address derived from
remote_group_id
"""
# Group conj_ids per priority.
conj_id_lists = [[] for i in range(4)]
for conj_id in conj_ids:
conj_id_lists[
_flow_priority_offset_from_conj_id(conj_id)].append(conj_id)
ip_prefix = str(netaddr.IPNetwork(ip_address).cidr)
flow_template = {
'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype],
'reg_net': vlan_tag, # needed for project separation
}
ip_ver = utils.get_ip_version(ip_prefix)
if direction == firewall.EGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE
elif direction == firewall.INGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE
flow_template[FLOW_FIELD_FOR_IPVER_AND_DIRECTION[(
ip_ver, direction)]] = ip_prefix
result = []
for offset, conj_id_list in enumerate(conj_id_lists):
if not conj_id_list:
continue
flow_template['priority'] = 70 + offset
result.extend(
substitute_conjunction_actions([flow_template], 1, conj_id_list))
return result
def create_accept_flows(flow):
flow['ct_state'] = CT_STATES[0]
result = [flow.copy()]
flow['ct_state'] = CT_STATES[1]
if flow['table'] == ovs_consts.RULES_INGRESS_TABLE:
flow['actions'] = (
'ct(commit,zone=NXM_NX_REG{:d}[0..15]),{:s}'.format(
ovsfw_consts.REG_NET, flow['actions']))
result.append(flow)
return result
def substitute_conjunction_actions(flows, dimension, conj_ids):
result = []
for flow in flows:
for i in range(2):
new_flow = flow.copy()
new_flow['ct_state'] = CT_STATES[i]
new_flow['actions'] = ','.join(
["conjunction(%d,%d/2)" % (s + i, dimension)
for s in conj_ids])
result.append(new_flow)
return result
def create_conj_flows(port, conj_id, direction, ethertype):
"""Generate "accept" flows for a given conjunction ID."""
flow_template = {
'priority': 70 + _flow_priority_offset_from_conj_id(conj_id),
'conj_id': conj_id,
'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype],
# This reg_port matching is for delete_all_port_flows.
# The matching is redundant as it has been done by
# conjunction(...,2/2) flows and flows can be summarized
# without this.
'reg_port': port.ofport,
}
flow_template = populate_flow_common(direction, flow_template, port)
flows = create_accept_flows(flow_template)
flows[1]['conj_id'] += 1
return flows
|
|
"""Main test file for the pyrabbit Client."""
import json
try:
#python 2.x
import unittest2 as unittest
except ImportError:
#python 3.x
import unittest
import sys
sys.path.append('..')
import pyrabbit
from mock import Mock, patch
class TestClient(unittest.TestCase):
def setUp(self):
self.client = pyrabbit.api.Client('localhost:55672', 'guest', 'guest')
def tearDown(self):
del self.client
def test_server_init_200(self):
self.assertIsInstance(self.client, pyrabbit.api.Client)
self.assertEqual(self.client.host, 'localhost:55672')
def test_server_is_alive_default_vhost(self):
response = {'status': 'ok'}
self.client.http.do_call = Mock(return_value=response)
with patch.object(pyrabbit.api.Client, 'has_admin_rights') as mock_rights:
mock_rights.__get__ = Mock(return_value=True)
self.assertTrue(self.client.is_alive())
def test_get_vhosts_200(self):
self.client.http.do_call = Mock(return_value=[])
vhosts = self.client.get_all_vhosts()
self.assertIsInstance(vhosts, list)
def test_get_all_queues(self):
self.client.http.do_call = Mock(return_value=[])
queues = self.client.get_queues()
self.assertIsInstance(queues, list)
def test_get_nodes(self):
self.client.http.do_call = Mock(return_value=[])
nodes = self.client.get_nodes()
self.assertIsInstance(nodes, list)
def test_purge_queues(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.purge_queues(['q1', 'q2']))
def test_get_queue(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_queue('', 'q1'))
def test_get_all_exchanges(self):
xchs = [{'name': 'foo', 'vhost': '/', 'type': 'direct',
'durable': False, 'auto_delete': False, 'internal': False,
'arguments': {}},
{'name': 'bar', 'vhost': '/', 'type': 'direct',
'durable': False, 'auto_delete': False, 'internal': False,
'arguments': {}},]
self.client.http.do_call = Mock(return_value=xchs)
xlist = self.client.get_exchanges()
self.assertIsInstance(xlist, list)
self.assertEqual(len(xlist), 2)
def test_get_named_exchange(self):
xch = {'name': 'foo', 'vhost': '/', 'type': 'direct',
'durable': False, 'auto_delete': False, 'internal': False,
'arguments': {}}
self.client.http.do_call = Mock(return_value=xch)
myexch = self.client.get_exchange('%2F', 'foo')
self.assertEqual(myexch['name'], 'foo')
@patch.object(pyrabbit.api.Client, 'has_admin_rights')
def test_get_users_noprivs(self, has_rights):
has_rights.__get__ = Mock(return_value=False)
self.assertRaises(pyrabbit.api.PermissionError, self.client.get_users)
@patch.object(pyrabbit.api.Client, 'has_admin_rights')
def test_get_users_withprivs(self, has_rights):
has_rights.return_value = True
with patch('pyrabbit.http.HTTPClient.do_call') as do_call:
self.assertTrue(self.client.get_users())
def test_get_queue_depth(self):
q = {'messages': 4}
self.client.http.do_call = Mock(return_value=q)
depth = self.client.get_queue_depth('/', 'test')
self.assertEqual(depth, q['messages'])
def test_get_queue_depth_2(self):
"""
An integration test that includes the HTTP client's do_call
method and json decoding operations.
"""
q = {'messages': 8}
json_q = json.dumps(q)
with patch('httplib2.Response') as resp:
resp.reason = 'response reason here'
resp.status = 200
self.client.http.client.request = Mock(return_value=(resp, json_q))
depth = self.client.get_queue_depth('/', 'test')
self.assertEqual(depth, q['messages'])
def test_purge_queue(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.purge_queue('vname', 'qname'))
def test_create_queue(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_queue('qname', 'vname'))
def test_get_connections(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_connections())
def test_get_connection(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_connection('cname'))
def test_delete_connection(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_connection('127.0.0.1:1234 -> 127.0.0.1:5678 (1)'))
def test_get_channels(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_channels())
def test_get_channel(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_channel('127.0.0.1:1234 -> 127.0.0.1:5678 (1)'))
def test_get_bindings(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_bindings())
def test_create_binding(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_binding('vhost',
'exch',
'queue',
'rt_key'))
def test_delete_binding(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_binding('vhost',
'exch',
'queue',
'rt_key'))
def test_publish(self):
self.client.http.do_call = Mock(return_value={'routed': 'true'})
self.assertTrue(self.client.publish('vhost', 'xname', 'rt_key',
'payload'))
def test_create_vhost(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_vhost('vname'))
def test_delete_vhost(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_vhost('vname'))
def test_create_user(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.create_user('user', 'password'))
def test_delete_user(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_user('user'))
def test_get_permissions(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_permissions())
def test_get_vhost_permissions(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_vhost_permissions('vname'))
def test_get_user_permissions(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_user_permissions('username'))
def test_delete_permission(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.delete_permission('vname', 'username'))
def test_get_permission(self):
self.client.http.do_call = Mock(return_value=True)
self.assertTrue(self.client.get_permission('vname', 'username'))
@patch.object(pyrabbit.api.Client, 'has_admin_rights')
def test_is_alive_withprivs(self, mock_rights):
mock_rights.__get__ = Mock(return_value=True)
with patch('pyrabbit.http.HTTPClient.do_call') as do_call:
do_call.return_value = {'status': 'ok'}
self.assertTrue(self.client.is_alive())
def test_is_alive_noprivs(self):
with patch.object(pyrabbit.api.Client, 'has_admin_rights') as mock_rights:
mock_rights.__get__ = Mock(return_value=False)
self.assertRaises(pyrabbit.api.PermissionError, self.client.is_alive)
def test_has_admin_rights(self):
response = {
'auth_backend': 'rabbit_auth_backend_internal',
'name': 'guest',
'tags': 'administrator',
}
self.client.get_whoami = Mock(return_value=response)
with patch.object(pyrabbit.api.Client, 'get_whoami') as mock_whoami:
mock_whoami.__get__ = Mock(return_value=True)
self.assertTrue(self.client.has_admin_rights)
@unittest.skip
class TestLiveServer(unittest.TestCase):
def setUp(self):
self.rabbit = pyrabbit.api.Client('localhost:15672', 'guest', 'guest')
self.vhost_name = 'pyrabbit_test_vhost'
self.exchange_name = 'pyrabbit_test_exchange'
self.queue_name = 'pyrabbit_test_queue'
self.rt_key = 'pyrabbit-roundtrip'
self.payload = 'pyrabbit test message payload'
self.user = 'guest'
def test_round_trip(self):
"""
This does a 'round trip' test, which consists of the following steps:
* Create a vhost, and verify creation
* Give 'guest' all perms on vhost
* Create an exchange in that vhost, verify creation
* Create a queue
* Create a binding between the queue and exchange
* Publish a message to the exchange that makes it to the queue
* Grab that message from the queue (verify it's the same message)
* Delete binding and verify we don't receive messages
* Delete the exchange
* Delete the vhost
"""
# create a vhost, verify creation, and grant all perms to 'guest'.
self.rabbit.create_vhost(self.vhost_name)
vhosts = [i['name'] for i in self.rabbit.get_all_vhosts()]
self.assertIn(self.vhost_name, vhosts)
self.rabbit.set_vhost_permissions(self.vhost_name, self.user,
'.*', '.*', '.*')
# create an exchange, and verify creation.
self.rabbit.create_exchange(self.vhost_name,
self.exchange_name,
'direct')
self.assertEqual(self.exchange_name,
self.rabbit.get_exchange(self.vhost_name,
self.exchange_name)['name'])
# create a queue and verify it was created
self.rabbit.create_queue(self.vhost_name,self.queue_name)
self.assertEqual(self.queue_name,
self.rabbit.get_queue(self.vhost_name,
self.queue_name)['name'])
# bind the queue and exchange
self.rabbit.create_binding(self.vhost_name, self.exchange_name,
self.queue_name, self.rt_key)
# publish a message, and verify by getting it back.
self.rabbit.publish(self.vhost_name, self.exchange_name, self.rt_key,
self.payload)
messages = self.rabbit.get_messages(self.vhost_name, self.queue_name)
self.assertEqual(messages[0]['payload'], self.payload)
# delete binding and verify we don't get the message
self.rabbit.delete_binding(self.vhost_name, self.exchange_name,
self.queue_name, self.rt_key)
self.rabbit.publish(self.vhost_name, self.exchange_name, self.rt_key,
self.payload)
messages = self.rabbit.get_messages(self.vhost_name, self.queue_name)
self.assertIsNone(messages)
# Clean up.
self.rabbit.delete_exchange(self.vhost_name, self.exchange_name)
self.rabbit.delete_vhost(self.vhost_name)
if __name__ == "__main__":
log = open('test_out.log', 'w')
unittest.main(testRunner=unittest.TextTestRunner(log))
|
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Validators for ``hw`` namespaced extra specs."""
from nova.api.validation.extra_specs import base
realtime_validators = [
base.ExtraSpecValidator(
name='hw:cpu_realtime',
description=(
'Determine whether realtime mode should be enabled for the '
'instance or not. Only supported by the libvirt driver.'
),
value={
'type': bool,
'description': 'Whether to enable realtime priority.',
},
),
base.ExtraSpecValidator(
name='hw:cpu_realtime_mask',
description=(
'A exclusion mask of CPUs that should not be enabled for realtime.'
),
value={
'type': str,
# NOTE(stephenfin): Yes, these things *have* to start with '^'
'pattern': r'\^\d+((-\d+)?(,\^?\d+(-\d+)?)?)*',
},
),
]
hide_hypervisor_id_validator = [
base.ExtraSpecValidator(
name='hw:hide_hypervisor_id',
description=(
'Determine whether the hypervisor ID should be hidden from the '
'guest. Only supported by the libvirt driver.'
),
value={
'type': bool,
'description': 'Whether to hide the hypervisor ID.',
},
)
]
cpu_policy_validators = [
base.ExtraSpecValidator(
name='hw:cpu_policy',
description=(
'The policy to apply when determining what host CPUs the guest '
'CPUs can run on. If ``shared`` (default), guest CPUs can be '
'overallocated but cannot float across host cores. If '
'``dedicated``, guest CPUs cannot be overallocated but are '
'individually pinned to their own host core.'
),
value={
'type': str,
'description': 'The CPU policy.',
'enum': [
'dedicated',
'shared'
],
},
),
base.ExtraSpecValidator(
name='hw:cpu_thread_policy',
description=(
'The policy to apply when determining whether the destination '
'host can have hardware threads enabled or not. If ``prefer`` '
'(default), hosts with hardware threads will be preferred. If '
'``require``, hosts with hardware threads will be required. If '
'``isolate``, hosts with hardware threads will be forbidden.'
),
value={
'type': str,
'description': 'The CPU thread policy.',
'enum': [
'prefer',
'isolate',
'require',
],
},
),
base.ExtraSpecValidator(
name='hw:emulator_threads_policy',
description=(
'The policy to apply when determining whether emulator threads '
'should be offloaded to a separate isolated core or to a pool '
'of shared cores. If ``share``, emulator overhead threads will '
'be offloaded to a pool of shared cores. If ``isolate``, '
'emulator overhead threads will be offloaded to their own core.'
),
value={
'type': str,
'description': 'The emulator thread policy.',
'enum': [
'isolate',
'share',
],
},
),
]
hugepage_validators = [
base.ExtraSpecValidator(
name='hw:mem_page_size',
description=(
'The size of memory pages to allocate to the guest with. Can be '
'one of the three alias - ``large``, ``small`` or ``any``, - or '
'an actual size. Only supported by the libvirt virt driver.'
),
value={
'type': str,
'description': 'The size of memory page to allocate',
'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)',
},
),
]
numa_validators = [
base.ExtraSpecValidator(
name='hw:numa_nodes',
description=(
'The number of virtual NUMA nodes to allocate to configure the '
'guest with. Each virtual NUMA node will be mapped to a unique '
'host NUMA node. Only supported by the libvirt virt driver.'
),
value={
'type': int,
'description': 'The number of virtual NUMA nodes to allocate',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:numa_cpus.{id}',
description=(
'A mapping of **guest** CPUs to the **guest** NUMA node '
'identified by ``{id}``. This can be used to provide asymmetric '
'CPU-NUMA allocation and is necessary where the number of guest '
'NUMA nodes is not a factor of the number of guest CPUs.'
),
parameters=[
{
'name': 'id',
'pattern': r'\d+', # positive integers
'description': 'The ID of the **guest** NUMA node.',
},
],
value={
'type': str,
'description': (
'The guest CPUs, in the form of a CPU map, to allocate to the '
'guest NUMA node identified by ``{id}``.'
),
'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*',
},
),
base.ExtraSpecValidator(
name='hw:numa_mem.{id}',
description=(
'A mapping of **guest** memory to the **guest** NUMA node '
'identified by ``{id}``. This can be used to provide asymmetric '
'memory-NUMA allocation and is necessary where the number of '
'guest NUMA nodes is not a factor of the total guest memory.'
),
parameters=[
{
'name': 'id',
'pattern': r'\d+', # positive integers
'description': 'The ID of the **guest** NUMA node.',
},
],
value={
'type': int,
'description': (
'The guest memory, in MB, to allocate to the guest NUMA node '
'identified by ``{id}``.'
),
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:pci_numa_affinity_policy',
description=(
'The NUMA affinity policy of any PCI passthrough devices or '
'SR-IOV network interfaces attached to the instance.'
),
value={
'type': str,
'description': 'The PCI NUMA affinity policy',
'enum': [
'required',
'preferred',
'legacy',
],
},
),
]
cpu_topology_validators = [
base.ExtraSpecValidator(
name='hw:cpu_sockets',
description=(
'The number of virtual CPU threads to emulate in the guest '
'CPU topology.'
),
value={
'type': int,
'description': 'A number of vurtla CPU sockets',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:cpu_cores',
description=(
'The number of virtual CPU cores to emulate per socket in the '
'guest CPU topology.'
),
value={
'type': int,
'description': 'A number of virtual CPU cores',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:cpu_threads',
description=(
'The number of virtual CPU threads to emulate per core in the '
'guest CPU topology.'
),
value={
'type': int,
'description': 'A number of virtual CPU threads',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:max_cpu_sockets',
description=(
'The max number of virtual CPU threads to emulate in the '
'guest CPU topology. This is used to limit the topologies that '
'can be requested by an image and will be used to validate the '
'``hw_cpu_sockets`` image metadata property.'
),
value={
'type': int,
'description': 'A number of virtual CPU sockets',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:max_cpu_cores',
description=(
'The max number of virtual CPU cores to emulate per socket in the '
'guest CPU topology. This is used to limit the topologies that '
'can be requested by an image and will be used to validate the '
'``hw_cpu_cores`` image metadata property.'
),
value={
'type': int,
'description': 'A number of virtual CPU cores',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:max_cpu_threads',
description=(
'The max number of virtual CPU threads to emulate per core in the '
'guest CPU topology. This is used to limit the topologies that '
'can be requested by an image and will be used to validate the '
'``hw_cpu_threads`` image metadata property.'
),
value={
'type': int,
'description': 'A number of virtual CPU threads',
'min': 1,
},
),
]
feature_flag_validators = [
# TODO(stephenfin): Consider deprecating and moving this to the 'os:'
# namespace
base.ExtraSpecValidator(
name='hw:boot_menu',
description=(
'Whether to show a boot menu when booting the guest.'
),
value={
'type': bool,
'description': 'Whether to enable the boot menu',
},
),
base.ExtraSpecValidator(
name='hw:vif_multiqueue_enabled',
description=(
'Whether to enable the virtio-net multiqueue feature. '
'When set, the driver sets the number of queues equal to the '
'number of guest vCPUs. This makes the network performance scale '
'across a number of vCPUs. This requires guest support and is '
'only supported by the libvirt driver.'
),
value={
'type': bool,
'description': 'Whether to enable multiqueue',
},
),
base.ExtraSpecValidator(
name='hw:mem_encryption',
description=(
'Whether to enable memory encryption for the guest. Only '
'supported by the libvirt driver on hosts with AMD SEV support.'
),
value={
'type': bool,
'description': 'Whether to enable memory encryption',
},
),
base.ExtraSpecValidator(
name='hw:pmem',
description=(
'A comma-separated list of ``$LABEL``\\ s defined in config for '
'vPMEM devices.'
),
value={
'type': str,
'description': (
'A comma-separated list of valid resource class names.'
),
'pattern': '([a-zA-Z0-9_]+(,)?)+',
},
),
base.ExtraSpecValidator(
name='hw:pmu',
description=(
'Whether to enable the Performance Monitory Unit (PMU) for the '
'guest. Only supported by the libvirt driver.'
),
value={
'type': bool,
'description': 'Whether to enable the PMU',
},
),
base.ExtraSpecValidator(
name='hw:serial_port_count',
description=(
'The number of serial ports to allocate to the guest. Only '
'supported by the libvirt virt driver.'
),
value={
'type': int,
'min': 0,
'description': 'The number of serial ports to allocate',
},
),
base.ExtraSpecValidator(
name='hw:watchdog_action',
description=(
'The action to take when the watchdog timer is kicked. Only '
'supported by the libvirt virt driver.'
),
value={
'type': str,
'description': 'The action to take',
'enum': [
'none',
'pause',
'poweroff',
'reset',
'disabled',
],
},
),
]
def register():
return (
realtime_validators +
hide_hypervisor_id_validator +
cpu_policy_validators +
hugepage_validators +
numa_validators +
cpu_topology_validators +
feature_flag_validators
)
|
|
# Copyright 2015 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pep8
import textwrap
from magnum.hacking import checks
from magnum.tests import base
class HackingTestCase(base.TestCase):
"""Hacking test class.
This class tests the hacking checks in magnum.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_policy_enforce_decorator(self):
code = """
@some_other_decorator
@policy.enforce_wsgi("bay", "create")
def my_method():
pass
"""
self._assert_has_errors(code, checks.check_policy_enforce_decorator,
expected_errors=[(2, 0, "M301")])
def test_assert_equal_in(self):
errors = [(1, 0, "M338")]
check = checks.assert_equal_in
code = "self.assertEqual(a in b, True)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual('str' in 'string', True)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(any(a==1 for a in b), True)"
self._assert_has_no_errors(code, check)
code = "self.assertEqual(True, a in b)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(True, 'str' in 'string')"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(True, any(a==1 for a in b))"
self._assert_has_no_errors(code, check)
code = "self.assertEqual(a in b, False)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual('str' in 'string', False)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(any(a==1 for a in b), False)"
self._assert_has_no_errors(code, check)
code = "self.assertEqual(False, a in b)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(False, 'str' in 'string')"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(False, any(a==1 for a in b))"
self._assert_has_no_errors(code, check)
def test_assert_equal_none(self):
errors = [(1, 0, "M318")]
check = checks.assert_equal_none
code = "self.assertEqual(A, None)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(None, A)"
self._assert_has_errors(code, check, errors)
code = "self.assertIsNone()"
self._assert_has_no_errors(code, check)
def test_assert_equal_true_or_false(self):
errors = [(1, 0, "M323")]
check = checks.assert_equal_true_or_false
code = "self.assertEqual(True, A)"
self._assert_has_errors(code, check, errors)
code = "self.assertEqual(False, A)"
self._assert_has_errors(code, check, errors)
code = "self.assertTrue()"
self._assert_has_no_errors(code, check)
code = "self.assertFalse()"
self._assert_has_no_errors(code, check)
def test_no_mutable_default_args(self):
errors = [(1, 0, "M322")]
check = checks.no_mutable_default_args
code = "def get_info_from_bdm(virt_type, bdm, mapping=[])"
self._assert_has_errors(code, check, errors)
code = "defined = []"
self._assert_has_no_errors(code, check)
code = "defined, undefined = [], {}"
self._assert_has_no_errors(code, check)
def test_assert_is_not_none(self):
errors = [(1, 0, "M302")]
check = checks.assert_equal_not_none
code = "self.assertEqual(A is not None)"
self._assert_has_errors(code, check, errors)
code = "self.assertIsNone()"
self._assert_has_no_errors(code, check)
def test_assert_true_isinstance(self):
errors = [(1, 0, "M316")]
check = checks.assert_true_isinstance
code = "self.assertTrue(isinstance(e, exception.BuilAbortException))"
self._assert_has_errors(code, check, errors)
code = "self.assertTrue()"
self._assert_has_no_errors(code, check)
def test_use_timeunitls_utcow(self):
errors = [(1, 0, "M310")]
check = checks.use_timeutils_utcnow
filename = "magnum/api/controller/v1/baymodel.py"
code = "datetime.now"
self._assert_has_errors(code, check, errors, filename)
code = "datetime.utcnow"
self._assert_has_errors(code, check, errors, filename)
code = "datetime.aa"
self._assert_has_no_errors(code, check, filename)
code = "aaa"
self._assert_has_no_errors(code, check, filename)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.bucket_by_sequence_length()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _element_length_fn(x, y=None):
del y
return array_ops.shape(x)[0]
def _to_sparse_tensor(record):
return sparse_tensor.SparseTensor(**record)
def _format_record(array, sparse):
if sparse:
return {
"values": array,
"indices": [[i] for i in range(len(array))],
"dense_shape": (len(array),)
}
return array
def _get_record_type(sparse):
if sparse:
return {
"values": dtypes.int64,
"indices": dtypes.int64,
"dense_shape": dtypes.int64
}
return dtypes.int32
def _get_record_shape(sparse):
if sparse:
return {
"values": tensor_shape.TensorShape([None,]),
"indices": tensor_shape.TensorShape([None, 1]),
"dense_shape": tensor_shape.TensorShape([1,])
}
return tensor_shape.TensorShape([None])
class BucketBySequenceLengthTest(test_base.DatasetTestBase):
def testBucket(self):
boundaries = [10, 20, 30]
batch_sizes = [10, 8, 4, 2]
lengths = [8, 13, 25, 35]
def build_dataset(sparse):
def _generator():
# Produce 1 batch for each bucket
elements = []
for batch_size, length in zip(batch_sizes, lengths):
record_len = length - 1
for _ in range(batch_size):
elements.append([1] * record_len)
record_len = length
random.shuffle(elements)
for el in elements:
yield (_format_record(el, sparse),)
dataset = dataset_ops.Dataset.from_generator(
_generator,
(_get_record_type(sparse),),
(_get_record_shape(sparse),))
if sparse:
dataset = dataset.map(lambda x: (_to_sparse_tensor(x),))
return dataset
def _test_bucket_by_padding(no_padding):
dataset = build_dataset(sparse=no_padding)
dataset = dataset.apply(
grouping.bucket_by_sequence_length(
_element_length_fn,
boundaries,
batch_sizes,
no_padding=no_padding))
batch, = dataset_ops.make_one_shot_iterator(dataset).get_next()
with self.cached_session() as sess:
batches = []
for _ in range(4):
batches.append(self.evaluate(batch))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(batch)
batch_sizes_val = []
lengths_val = []
for batch in batches:
shape = batch.dense_shape if no_padding else batch.shape
batch_size = shape[0]
length = shape[1]
batch_sizes_val.append(batch_size)
lengths_val.append(length)
sum_check = batch.values.sum() if no_padding else batch.sum()
self.assertEqual(sum_check, batch_size * length - 1)
self.assertEqual(sum(batch_sizes_val), sum(batch_sizes))
self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val))
self.assertEqual(sorted(lengths), sorted(lengths_val))
for no_padding in (True, False):
_test_bucket_by_padding(no_padding)
def testPadToBoundary(self):
boundaries = [10, 20, 30]
batch_sizes = [10, 8, 4, 2]
lengths = [8, 13, 25]
def element_gen():
# Produce 1 batch for each bucket
elements = []
for batch_size, length in zip(batch_sizes[:-1], lengths):
for _ in range(batch_size):
elements.append([1] * length)
random.shuffle(elements)
for el in elements:
yield (el,)
for _ in range(batch_sizes[-1]):
el = [1] * (boundaries[-1] + 5)
yield (el,)
element_len = lambda el: array_ops.shape(el)[0]
dataset = dataset_ops.Dataset.from_generator(
element_gen, (dtypes.int64,), ([None],)).apply(
grouping.bucket_by_sequence_length(
element_len, boundaries, batch_sizes,
pad_to_bucket_boundary=True))
batch, = dataset_ops.make_one_shot_iterator(dataset).get_next()
with self.cached_session() as sess:
batches = []
for _ in range(3):
batches.append(self.evaluate(batch))
with self.assertRaisesOpError("bucket_boundaries"):
self.evaluate(batch)
batch_sizes_val = []
lengths_val = []
for batch in batches:
batch_size = batch.shape[0]
length = batch.shape[1]
batch_sizes_val.append(batch_size)
lengths_val.append(length)
batch_sizes = batch_sizes[:-1]
self.assertEqual(sum(batch_sizes_val), sum(batch_sizes))
self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val))
self.assertEqual([boundary - 1 for boundary in sorted(boundaries)],
sorted(lengths_val))
def testPadToBoundaryNoExtraneousPadding(self):
boundaries = [3, 7, 11]
batch_sizes = [2, 2, 2, 2]
lengths = range(1, 11)
def element_gen():
for length in lengths:
yield ([1] * length,)
element_len = lambda element: array_ops.shape(element)[0]
dataset = dataset_ops.Dataset.from_generator(
element_gen, (dtypes.int64,), ([None],)).apply(
grouping.bucket_by_sequence_length(
element_len, boundaries, batch_sizes,
pad_to_bucket_boundary=True))
batch, = dataset_ops.make_one_shot_iterator(dataset).get_next()
with self.cached_session() as sess:
batches = []
for _ in range(5):
batches.append(self.evaluate(batch))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(batch)
self.assertAllEqual(batches[0], [[1, 0],
[1, 1]])
self.assertAllEqual(batches[1], [[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0]])
self.assertAllEqual(batches[2], [[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1]])
self.assertAllEqual(batches[3], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
self.assertAllEqual(batches[4], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
def testTupleElements(self):
def build_dataset(sparse):
def _generator():
text = [[1, 2, 3], [3, 4, 5, 6, 7], [1, 2], [8, 9, 0, 2, 3]]
label = [1, 2, 1, 2]
for x, y in zip(text, label):
yield (_format_record(x, sparse), y)
dataset = dataset_ops.Dataset.from_generator(
generator=_generator,
output_types=(_get_record_type(sparse), dtypes.int32),
output_shapes=(_get_record_shape(sparse),
tensor_shape.TensorShape([])))
if sparse:
dataset = dataset.map(lambda x, y: (_to_sparse_tensor(x), y))
return dataset
def _test_tuple_elements_by_padding(no_padding):
dataset = build_dataset(sparse=no_padding)
dataset = dataset.apply(grouping.bucket_by_sequence_length(
element_length_func=_element_length_fn,
bucket_batch_sizes=[2, 2, 2],
bucket_boundaries=[0, 8],
no_padding=no_padding))
shapes = dataset.output_shapes
self.assertEqual([None, None], shapes[0].as_list())
self.assertEqual([None], shapes[1].as_list())
for no_padding in (True, False):
_test_tuple_elements_by_padding(no_padding)
def testBucketSparse(self):
"""Tests bucketing of sparse tensors (case where `no_padding` == True).
Test runs on following dataset:
[
[0],
[0, 1],
[0, 1, 2]
...
[0, ..., max_len - 1]
]
Sequences are bucketed by length and batched with
`batch_size` < `bucket_size`.
"""
min_len = 0
max_len = 100
batch_size = 7
bucket_size = 10
def _build_dataset():
input_data = [range(i+1) for i in range(min_len, max_len)]
def generator_fn():
for record in input_data:
yield _format_record(record, sparse=True)
dataset = dataset_ops.Dataset.from_generator(
generator=generator_fn,
output_types=_get_record_type(sparse=True))
dataset = dataset.map(_to_sparse_tensor)
return dataset
def _compute_expected_batches():
"""Computes expected batch outputs and stores in a set."""
all_expected_sparse_tensors = set()
for bucket_start_len in range(min_len, max_len, bucket_size):
for batch_offset in range(0, bucket_size, batch_size):
batch_start_len = bucket_start_len + batch_offset
batch_end_len = min(batch_start_len + batch_size,
bucket_start_len + bucket_size)
expected_indices = []
expected_values = []
for length in range(batch_start_len, batch_end_len):
for val in range(length + 1):
expected_indices.append((length - batch_start_len, val))
expected_values.append(val)
expected_sprs_tensor = (tuple(expected_indices),
tuple(expected_values))
all_expected_sparse_tensors.add(expected_sprs_tensor)
return all_expected_sparse_tensors
def _compute_batches(dataset):
"""Computes actual batch outputs of dataset and stores in a set."""
batch = dataset_ops.make_one_shot_iterator(dataset).get_next()
all_sparse_tensors = set()
with self.cached_session() as sess:
with self.assertRaises(errors.OutOfRangeError):
while True:
output = self.evaluate(batch)
sprs_tensor = (tuple([tuple(idx) for idx in output.indices]),
tuple(output.values))
all_sparse_tensors.add(sprs_tensor)
return all_sparse_tensors
dataset = _build_dataset()
boundaries = range(min_len + bucket_size + 1, max_len, bucket_size)
dataset = dataset.apply(grouping.bucket_by_sequence_length(
_element_length_fn,
boundaries,
[batch_size] * (len(boundaries) + 1),
no_padding=True))
batches = _compute_batches(dataset)
expected_batches = _compute_expected_batches()
self.assertEqual(batches, expected_batches)
if __name__ == "__main__":
test.main()
|
|
"""HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts).
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- send server version
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
- are request names really case sensitive?
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.2"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
import mimetools
import SocketServer
# Default error message
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0". <path> is encoded
using the URL encoding scheme (using %xx to signify the ASCII
character with hex code xx).
The protocol is vague about whether lines are separated by LF
characters or by CRLF pairs -- for compatibility with the widest
range of clients, both should be accepted. Similarly, whitespace
in the request line should be treated sensibly (allowing multiple
spaces between components and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.0 protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version (always "HTTP/1.0"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_request; the results
are in self.command, self.path, self.request_version and
self.headers.
Return value is 1 for success, 0 for failure; on failure, an
error is sent back.
"""
self.request_version = version = "HTTP/0.9" # Default
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%s)" % `version`)
return 0
elif len(words) == 2:
[command, path] = words
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%s)" % `command`)
return 0
else:
self.send_error(400, "Bad request syntax (%s)" % `requestline`)
return 0
self.command, self.path, self.request_version = command, path, version
self.headers = self.MessageClass(self.rfile, 0)
return 1
def handle(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%s)" % `self.command`)
return
method = getattr(self, mname)
method()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if not message:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
self.send_response(code, message)
self.end_headers()
self.wfile.write(self.error_message_format %
{'code': code,
'message': message,
'explain': explain})
error_message_format = DEFAULT_ERROR_MESSAGE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if self.responses.has_key(code):
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %s %s\r\n" %
(self.protocol_version, str(code), message))
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_reponse().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
apply(self.log_message, args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self):
"""Return the current date and time formatted for a message header."""
now = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Don't override unless you know what you're doing (hint: incoming
# requests are required to have exactly this version string).
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
responses = {
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Partial information', 'Request fulfilled from cache'),
204: ('No response', 'Request fulfilled, nothing follows'),
301: ('Moved', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('Method', 'Object moved -- see Method and URL list'),
304: ('Not modified',
'Document has not changed singe given time'),
400: ('Bad request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not found', 'Nothing matches the given URI'),
500: ('Internal error', 'Server got itself in trouble'),
501: ('Not implemented',
'Server does not support this operation'),
502: ('Service temporarily overloaded',
'The server cannot process the request due to a high load'),
503: ('Gateway timeout',
'The gateway server did not receive a timely response'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
|
|
from sqlalchemy.test.testing import assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy.orm import scoped_session
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.test.schema import Table, Column
from sqlalchemy.orm import mapper, relation, query
from sqlalchemy.test.testing import eq_
from test.orm import _base
class _ScopedTest(_base.MappedTest):
"""Adds another lookup bucket to emulate Session globals."""
run_setup_mappers = 'once'
_artifact_registries = (
_base.MappedTest._artifact_registries + ('scoping',))
@classmethod
def setup_class(cls):
cls.scoping = _base.adict()
super(_ScopedTest, cls).setup_class()
@classmethod
def teardown_class(cls):
cls.scoping.clear()
super(_ScopedTest, cls).teardown_class()
class ScopedSessionTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('table1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
Table('table2', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('someid', None, ForeignKey('table1.id')))
@testing.resolve_artifact_names
def test_basic(self):
Session = scoped_session(sa.orm.sessionmaker())
class CustomQuery(query.Query):
pass
class SomeObject(_base.ComparableEntity):
query = Session.query_property()
class SomeOtherObject(_base.ComparableEntity):
query = Session.query_property()
custom_query = Session.query_property(query_cls=CustomQuery)
mapper(SomeObject, table1, properties={
'options':relation(SomeOtherObject)})
mapper(SomeOtherObject, table2)
s = SomeObject(id=1, data="hello")
sso = SomeOtherObject()
s.options.append(sso)
Session.add(s)
Session.commit()
Session.refresh(sso)
Session.remove()
eq_(SomeObject(id=1, data="hello", options=[SomeOtherObject(someid=1)]),
Session.query(SomeObject).one())
eq_(SomeObject(id=1, data="hello", options=[SomeOtherObject(someid=1)]),
SomeObject.query.one())
eq_(SomeOtherObject(someid=1),
SomeOtherObject.query.filter(
SomeOtherObject.someid == sso.someid).one())
assert isinstance(SomeOtherObject.query, query.Query)
assert not isinstance(SomeOtherObject.query, CustomQuery)
assert isinstance(SomeOtherObject.custom_query, query.Query)
class ScopedMapperTest(_ScopedTest):
@classmethod
def define_tables(cls, metadata):
Table('table1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
Table('table2', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('someid', None, ForeignKey('table1.id')))
@classmethod
def setup_classes(cls):
class SomeObject(_base.ComparableEntity):
pass
class SomeOtherObject(_base.ComparableEntity):
pass
@classmethod
@testing.uses_deprecated()
@testing.resolve_artifact_names
def setup_mappers(cls):
Session = scoped_session(sa.orm.create_session)
Session.mapper(SomeObject, table1, properties={
'options':relation(SomeOtherObject)
})
Session.mapper(SomeOtherObject, table2)
cls.scoping['Session'] = Session
@classmethod
@testing.resolve_artifact_names
def insert_data(cls):
s = SomeObject()
s.id = 1
s.data = 'hello'
sso = SomeOtherObject()
s.options.append(sso)
Session.flush()
Session.expunge_all()
@testing.resolve_artifact_names
def test_query(self):
sso = SomeOtherObject.query().first()
assert SomeObject.query.filter_by(id=1).one().options[0].id == sso.id
@testing.uses_deprecated()
@testing.resolve_artifact_names
def test_query_compiles(self):
class Foo(object):
pass
Session.mapper(Foo, table2)
assert hasattr(Foo, 'query')
ext = sa.orm.MapperExtension()
class Bar(object):
pass
Session.mapper(Bar, table2, extension=[ext])
assert hasattr(Bar, 'query')
class Baz(object):
pass
Session.mapper(Baz, table2, extension=ext)
assert hasattr(Baz, 'query')
@testing.uses_deprecated()
@testing.resolve_artifact_names
def test_default_constructor_state_not_shared(self):
scope = scoped_session(sa.orm.sessionmaker())
class A(object):
pass
class B(object):
def __init__(self):
pass
scope.mapper(A, table1)
scope.mapper(B, table2)
A(foo='bar')
assert_raises(TypeError, B, foo='bar')
scope = scoped_session(sa.orm.sessionmaker())
class C(object):
def __init__(self):
pass
class D(object):
pass
scope.mapper(C, table1)
scope.mapper(D, table2)
assert_raises(TypeError, C, foo='bar')
D(foo='bar')
@testing.uses_deprecated()
@testing.resolve_artifact_names
def test_validating_constructor(self):
s2 = SomeObject(someid=12)
s3 = SomeOtherObject(someid=123, bogus=345)
class ValidatedOtherObject(object): pass
Session.mapper(ValidatedOtherObject, table2, validate=True)
v1 = ValidatedOtherObject(someid=12)
assert_raises(sa.exc.ArgumentError, ValidatedOtherObject,
someid=12, bogus=345)
@testing.uses_deprecated()
@testing.resolve_artifact_names
def test_dont_clobber_methods(self):
class MyClass(object):
def expunge(self):
return "an expunge !"
Session.mapper(MyClass, table2)
assert MyClass().expunge() == "an expunge !"
class ScopedMapperTest2(_ScopedTest):
@classmethod
def define_tables(cls, metadata):
Table('table1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
Column('type', String(30)))
Table('table2', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('someid', None, ForeignKey('table1.id')),
Column('somedata', String(30)))
@classmethod
def setup_classes(cls):
class BaseClass(_base.ComparableEntity):
pass
class SubClass(BaseClass):
pass
@classmethod
@testing.uses_deprecated()
@testing.resolve_artifact_names
def setup_mappers(cls):
Session = scoped_session(sa.orm.sessionmaker())
Session.mapper(BaseClass, table1,
polymorphic_identity='base',
polymorphic_on=table1.c.type)
Session.mapper(SubClass, table2,
polymorphic_identity='sub',
inherits=BaseClass)
cls.scoping['Session'] = Session
@testing.resolve_artifact_names
def test_inheritance(self):
def expunge_list(l):
for x in l:
Session.expunge(x)
return l
b = BaseClass(data='b1')
s = SubClass(data='s1', somedata='somedata')
Session.commit()
Session.expunge_all()
eq_(expunge_list([BaseClass(data='b1'),
SubClass(data='s1', somedata='somedata')]),
BaseClass.query.all())
eq_(expunge_list([SubClass(data='s1', somedata='somedata')]),
SubClass.query.all())
|
|
"""Alexa capabilities."""
import logging
from typing import List, Optional
from homeassistant.components import (
cover,
fan,
image_processing,
input_number,
light,
timer,
vacuum,
)
from homeassistant.components.alarm_control_panel import ATTR_CODE_FORMAT, FORMAT_NUMBER
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
import homeassistant.components.climate.const as climate
import homeassistant.components.media_player.const as media_player
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_IDLE,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
STATE_UNLOCKED,
)
from homeassistant.core import State
import homeassistant.util.color as color_util
import homeassistant.util.dt as dt_util
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_PRESETS,
DATE_FORMAT,
PERCENTAGE_FAN_MAP,
Inputs,
)
from .errors import UnsupportedProperty
from .resources import (
AlexaCapabilityResource,
AlexaGlobalCatalog,
AlexaModeResource,
AlexaPresetResource,
AlexaSemantics,
)
_LOGGER = logging.getLogger(__name__)
class AlexaCapability:
"""Base class for Alexa capability interfaces.
The Smart Home Skills API defines a number of "capability interfaces",
roughly analogous to domains in Home Assistant. The supported interfaces
describe what actions can be performed on a particular device.
https://developer.amazon.com/docs/device-apis/message-guide.html
"""
supported_locales = {"en-US"}
def __init__(self, entity: State, instance: Optional[str] = None):
"""Initialize an Alexa capability."""
self.entity = entity
self.instance = instance
def name(self) -> str:
"""Return the Alexa API name of this interface."""
raise NotImplementedError
@staticmethod
def properties_supported() -> List[dict]:
"""Return what properties this entity supports."""
return []
@staticmethod
def properties_proactively_reported() -> bool:
"""Return True if properties asynchronously reported."""
return False
@staticmethod
def properties_retrievable() -> bool:
"""Return True if properties can be retrieved."""
return False
@staticmethod
def properties_non_controllable() -> bool:
"""Return True if non controllable."""
return None
@staticmethod
def get_property(name):
"""Read and return a property.
Return value should be a dict, or raise UnsupportedProperty.
Properties can also have a timeOfSample and uncertaintyInMilliseconds,
but returning those metadata is not yet implemented.
"""
raise UnsupportedProperty(name)
@staticmethod
def supports_deactivation():
"""Applicable only to scenes."""
return None
@staticmethod
def capability_proactively_reported():
"""Return True if the capability is proactively reported.
Set properties_proactively_reported() for proactively reported properties.
Applicable to DoorbellEventSource.
"""
return None
@staticmethod
def capability_resources():
"""Return the capability object.
Applicable to ToggleController, RangeController, and ModeController interfaces.
"""
return []
@staticmethod
def configuration():
"""Return the configuration object.
Applicable to the ThermostatController, SecurityControlPanel, ModeController, RangeController,
and EventDetectionSensor.
"""
return []
@staticmethod
def configurations():
"""Return the configurations object.
The plural configurations object is different that the singular configuration object.
Applicable to EqualizerController interface.
"""
return []
@staticmethod
def inputs():
"""Applicable only to media players."""
return []
@staticmethod
def semantics():
"""Return the semantics object.
Applicable to ToggleController, RangeController, and ModeController interfaces.
"""
return []
@staticmethod
def supported_operations():
"""Return the supportedOperations object."""
return []
@staticmethod
def camera_stream_configurations():
"""Applicable only to CameraStreamController."""
return None
def serialize_discovery(self):
"""Serialize according to the Discovery API."""
result = {"type": "AlexaInterface", "interface": self.name(), "version": "3"}
instance = self.instance
if instance is not None:
result["instance"] = instance
properties_supported = self.properties_supported()
if properties_supported:
result["properties"] = {
"supported": self.properties_supported(),
"proactivelyReported": self.properties_proactively_reported(),
"retrievable": self.properties_retrievable(),
}
proactively_reported = self.capability_proactively_reported()
if proactively_reported is not None:
result["proactivelyReported"] = proactively_reported
non_controllable = self.properties_non_controllable()
if non_controllable is not None:
result["properties"]["nonControllable"] = non_controllable
supports_deactivation = self.supports_deactivation()
if supports_deactivation is not None:
result["supportsDeactivation"] = supports_deactivation
capability_resources = self.capability_resources()
if capability_resources:
result["capabilityResources"] = capability_resources
configuration = self.configuration()
if configuration:
result["configuration"] = configuration
# The plural configurations object is different than the singular configuration object above.
configurations = self.configurations()
if configurations:
result["configurations"] = configurations
semantics = self.semantics()
if semantics:
result["semantics"] = semantics
supported_operations = self.supported_operations()
if supported_operations:
result["supportedOperations"] = supported_operations
inputs = self.inputs()
if inputs:
result["inputs"] = inputs
camera_stream_configurations = self.camera_stream_configurations()
if camera_stream_configurations:
result["cameraStreamConfigurations"] = camera_stream_configurations
return result
def serialize_properties(self):
"""Return properties serialized for an API response."""
for prop in self.properties_supported():
prop_name = prop["name"]
try:
prop_value = self.get_property(prop_name)
except UnsupportedProperty:
raise
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unexpected error getting %s.%s property from %s",
self.name(),
prop_name,
self.entity,
)
prop_value = None
if prop_value is None:
continue
result = {
"name": prop_name,
"namespace": self.name(),
"value": prop_value,
"timeOfSample": dt_util.utcnow().strftime(DATE_FORMAT),
"uncertaintyInMilliseconds": 0,
}
instance = self.instance
if instance is not None:
result["instance"] = instance
yield result
class Alexa(AlexaCapability):
"""Implements Alexa Interface.
Although endpoints implement this interface implicitly,
The API suggests you should explicitly include this interface.
https://developer.amazon.com/docs/device-apis/alexa-interface.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa"
class AlexaEndpointHealth(AlexaCapability):
"""Implements Alexa.EndpointHealth.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-when-alexa-requests-it
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EndpointHealth"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "connectivity"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "connectivity":
raise UnsupportedProperty(name)
if self.entity.state == STATE_UNAVAILABLE:
return {"value": "UNREACHABLE"}
return {"value": "OK"}
class AlexaPowerController(AlexaCapability):
"""Implements Alexa.PowerController.
https://developer.amazon.com/docs/device-apis/alexa-powercontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerState":
raise UnsupportedProperty(name)
if self.entity.domain == climate.DOMAIN:
is_on = self.entity.state != climate.HVAC_MODE_OFF
elif self.entity.domain == vacuum.DOMAIN:
is_on = self.entity.state == vacuum.STATE_CLEANING
elif self.entity.domain == timer.DOMAIN:
is_on = self.entity.state != STATE_IDLE
else:
is_on = self.entity.state != STATE_OFF
return "ON" if is_on else "OFF"
class AlexaLockController(AlexaCapability):
"""Implements Alexa.LockController.
https://developer.amazon.com/docs/device-apis/alexa-lockcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"es-US",
"fr-CA",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
"pt-BR",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.LockController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "lockState"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "lockState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_LOCKED:
return "LOCKED"
if self.entity.state == STATE_UNLOCKED:
return "UNLOCKED"
return "JAMMED"
class AlexaSceneController(AlexaCapability):
"""Implements Alexa.SceneController.
https://developer.amazon.com/docs/device-apis/alexa-scenecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, supports_deactivation):
"""Initialize the entity."""
super().__init__(entity)
self.supports_deactivation = lambda: supports_deactivation
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SceneController"
class AlexaBrightnessController(AlexaCapability):
"""Implements Alexa.BrightnessController.
https://developer.amazon.com/docs/device-apis/alexa-brightnesscontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
"pt-BR",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.BrightnessController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "brightness"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "brightness":
raise UnsupportedProperty(name)
if "brightness" in self.entity.attributes:
return round(self.entity.attributes["brightness"] / 255.0 * 100)
return 0
class AlexaColorController(AlexaCapability):
"""Implements Alexa.ColorController.
https://developer.amazon.com/docs/device-apis/alexa-colorcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
"pt-BR",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "color"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "color":
raise UnsupportedProperty(name)
hue, saturation = self.entity.attributes.get(light.ATTR_HS_COLOR, (0, 0))
return {
"hue": hue,
"saturation": saturation / 100.0,
"brightness": self.entity.attributes.get(light.ATTR_BRIGHTNESS, 0) / 255.0,
}
class AlexaColorTemperatureController(AlexaCapability):
"""Implements Alexa.ColorTemperatureController.
https://developer.amazon.com/docs/device-apis/alexa-colortemperaturecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
"pt-BR",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorTemperatureController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "colorTemperatureInKelvin"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "colorTemperatureInKelvin":
raise UnsupportedProperty(name)
if "color_temp" in self.entity.attributes:
return color_util.color_temperature_mired_to_kelvin(
self.entity.attributes["color_temp"]
)
return None
class AlexaPercentageController(AlexaCapability):
"""Implements Alexa.PercentageController.
https://developer.amazon.com/docs/device-apis/alexa-percentagecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PercentageController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "percentage"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "percentage":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, 0)
if self.entity.domain == cover.DOMAIN:
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION, 0)
return 0
class AlexaSpeaker(AlexaCapability):
"""Implements Alexa.Speaker.
https://developer.amazon.com/docs/device-apis/alexa-speaker.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.Speaker"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "volume"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.SUPPORT_VOLUME_MUTE:
properties.append({"name": "muted"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name == "volume":
current_level = self.entity.attributes.get(
media_player.ATTR_MEDIA_VOLUME_LEVEL
)
if current_level is not None:
return round(float(current_level) * 100)
if name == "muted":
return bool(
self.entity.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
)
return None
class AlexaStepSpeaker(AlexaCapability):
"""Implements Alexa.StepSpeaker.
https://developer.amazon.com/docs/device-apis/alexa-stepspeaker.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"it-IT",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.StepSpeaker"
class AlexaPlaybackController(AlexaCapability):
"""Implements Alexa.PlaybackController.
https://developer.amazon.com/docs/device-apis/alexa-playbackcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US", "fr-FR"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackController"
def supported_operations(self):
"""Return the supportedOperations object.
Supported Operations: FastForward, Next, Pause, Play, Previous, Rewind, StartOver, Stop
"""
supported_features = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
operations = {
media_player.SUPPORT_NEXT_TRACK: "Next",
media_player.SUPPORT_PAUSE: "Pause",
media_player.SUPPORT_PLAY: "Play",
media_player.SUPPORT_PREVIOUS_TRACK: "Previous",
media_player.SUPPORT_STOP: "Stop",
}
return [
value
for operation, value in operations.items()
if operation & supported_features
]
class AlexaInputController(AlexaCapability):
"""Implements Alexa.InputController.
https://developer.amazon.com/docs/device-apis/alexa-inputcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.InputController"
def inputs(self):
"""Return the list of valid supported inputs."""
source_list = self.entity.attributes.get(
media_player.ATTR_INPUT_SOURCE_LIST, []
)
return AlexaInputController.get_valid_inputs(source_list)
@staticmethod
def get_valid_inputs(source_list):
"""Return list of supported inputs."""
input_list = []
for source in source_list:
formatted_source = (
source.lower().replace("-", "").replace("_", "").replace(" ", "")
)
if formatted_source in Inputs.VALID_SOURCE_NAME_MAP:
input_list.append(
{"name": Inputs.VALID_SOURCE_NAME_MAP[formatted_source]}
)
return input_list
class AlexaTemperatureSensor(AlexaCapability):
"""Implements Alexa.TemperatureSensor.
https://developer.amazon.com/docs/device-apis/alexa-temperaturesensor.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TemperatureSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "temperature"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "temperature":
raise UnsupportedProperty(name)
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = self.entity.state
if self.entity.domain == climate.DOMAIN:
unit = self.hass.config.units.temperature_unit
temp = self.entity.attributes.get(climate.ATTR_CURRENT_TEMPERATURE)
if temp in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning("Invalid temp value %s for %s", temp, self.entity.entity_id)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
class AlexaContactSensor(AlexaCapability):
"""Implements Alexa.ContactSensor.
The Alexa.ContactSensor interface describes the properties and events used
to report the state of an endpoint that detects contact between two
surfaces. For example, a contact sensor can report whether a door or window
is open.
https://developer.amazon.com/docs/device-apis/alexa-contactsensor.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-IN",
"en-US",
"es-ES",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ContactSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaMotionSensor(AlexaCapability):
"""Implements Alexa.MotionSensor.
https://developer.amazon.com/docs/device-apis/alexa-motionsensor.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-IN",
"en-US",
"es-ES",
"it-IT",
"ja-JP",
"pt-BR",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.MotionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaThermostatController(AlexaCapability):
"""Implements Alexa.ThermostatController.
https://developer.amazon.com/docs/device-apis/alexa-thermostatcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
"pt-BR",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ThermostatController"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "thermostatMode"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & climate.SUPPORT_TARGET_TEMPERATURE:
properties.append({"name": "targetSetpoint"})
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
properties.append({"name": "lowerSetpoint"})
properties.append({"name": "upperSetpoint"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if self.entity.state == STATE_UNAVAILABLE:
return None
if name == "thermostatMode":
preset = self.entity.attributes.get(climate.ATTR_PRESET_MODE)
if preset in API_THERMOSTAT_PRESETS:
mode = API_THERMOSTAT_PRESETS[preset]
else:
mode = API_THERMOSTAT_MODES.get(self.entity.state)
if mode is None:
_LOGGER.error(
"%s (%s) has unsupported state value '%s'",
self.entity.entity_id,
type(self.entity),
self.entity.state,
)
raise UnsupportedProperty(name)
return mode
unit = self.hass.config.units.temperature_unit
if name == "targetSetpoint":
temp = self.entity.attributes.get(ATTR_TEMPERATURE)
elif name == "lowerSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_LOW)
elif name == "upperSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_HIGH)
else:
raise UnsupportedProperty(name)
if temp is None:
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning(
"Invalid temp value %s for %s in %s", temp, name, self.entity.entity_id
)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
def configuration(self):
"""Return configuration object.
Translates climate HVAC_MODES and PRESETS to supported Alexa ThermostatMode Values.
ThermostatMode Value must be AUTO, COOL, HEAT, ECO, OFF, or CUSTOM.
"""
supported_modes = []
hvac_modes = self.entity.attributes.get(climate.ATTR_HVAC_MODES)
for mode in hvac_modes:
thermostat_mode = API_THERMOSTAT_MODES.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
preset_modes = self.entity.attributes.get(climate.ATTR_PRESET_MODES)
if preset_modes:
for mode in preset_modes:
thermostat_mode = API_THERMOSTAT_PRESETS.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
# Return False for supportsScheduling until supported with event listener in handler.
configuration = {"supportsScheduling": False}
if supported_modes:
configuration["supportedModes"] = supported_modes
return configuration
class AlexaPowerLevelController(AlexaCapability):
"""Implements Alexa.PowerLevelController.
https://developer.amazon.com/docs/device-apis/alexa-powerlevelcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerLevelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerLevel"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerLevel":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed)
return None
class AlexaSecurityPanelController(AlexaCapability):
"""Implements Alexa.SecurityPanelController.
https://developer.amazon.com/docs/device-apis/alexa-securitypanelcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"es-US",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
"pt-BR",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SecurityPanelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "armState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "armState":
raise UnsupportedProperty(name)
arm_state = self.entity.state
if arm_state == STATE_ALARM_ARMED_HOME:
return "ARMED_STAY"
if arm_state == STATE_ALARM_ARMED_AWAY:
return "ARMED_AWAY"
if arm_state == STATE_ALARM_ARMED_NIGHT:
return "ARMED_NIGHT"
if arm_state == STATE_ALARM_ARMED_CUSTOM_BYPASS:
return "ARMED_STAY"
return "DISARMED"
def configuration(self):
"""Return configuration object with supported authorization types."""
code_format = self.entity.attributes.get(ATTR_CODE_FORMAT)
supported = self.entity.attributes[ATTR_SUPPORTED_FEATURES]
configuration = {}
supported_arm_states = [{"value": "DISARMED"}]
if supported & SUPPORT_ALARM_ARM_AWAY:
supported_arm_states.append({"value": "ARMED_AWAY"})
if supported & SUPPORT_ALARM_ARM_HOME:
supported_arm_states.append({"value": "ARMED_STAY"})
if supported & SUPPORT_ALARM_ARM_NIGHT:
supported_arm_states.append({"value": "ARMED_NIGHT"})
configuration["supportedArmStates"] = supported_arm_states
if code_format == FORMAT_NUMBER:
configuration["supportedAuthorizationTypes"] = [{"type": "FOUR_DIGIT_PIN"}]
return configuration
class AlexaModeController(AlexaCapability):
"""Implements Alexa.ModeController.
The instance property must be unique across ModeController, RangeController, ToggleController within the same device.
The instance property should be a concatenated string of device domain period and single word.
e.g. fan.speed & fan.direction.
The instance property must not contain words from other instance property strings within the same device.
e.g. Instance property cover.position & cover.tilt_position will cause the Alexa.Discovery directive to fail.
An instance property string value may be reused for different devices.
https://developer.amazon.com/docs/device-apis/alexa-modecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ModeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "mode"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "mode":
raise UnsupportedProperty(name)
# Fan Direction
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
mode = self.entity.attributes.get(fan.ATTR_DIRECTION, None)
if mode in (fan.DIRECTION_FORWARD, fan.DIRECTION_REVERSE, STATE_UNKNOWN):
return f"{fan.ATTR_DIRECTION}.{mode}"
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
# Return state instead of position when using ModeController.
mode = self.entity.state
if mode in (
cover.STATE_OPEN,
cover.STATE_OPENING,
cover.STATE_CLOSED,
cover.STATE_CLOSING,
STATE_UNKNOWN,
):
return f"{cover.ATTR_POSITION}.{mode}"
return None
def configuration(self):
"""Return configuration with modeResources."""
if isinstance(self._resource, AlexaCapabilityResource):
return self._resource.serialize_configuration()
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Direction Resource
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
self._resource = AlexaModeResource(
[AlexaGlobalCatalog.SETTING_DIRECTION], False
)
self._resource.add_mode(
f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_FORWARD}", [fan.DIRECTION_FORWARD]
)
self._resource.add_mode(
f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_REVERSE}", [fan.DIRECTION_REVERSE]
)
return self._resource.serialize_capability_resources()
# Cover Position Resources
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._resource = AlexaModeResource(
["Position", AlexaGlobalCatalog.SETTING_OPENING], False
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}",
[AlexaGlobalCatalog.VALUE_OPEN],
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}",
[AlexaGlobalCatalog.VALUE_CLOSE],
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.custom",
["Custom", AlexaGlobalCatalog.SETTING_PRESET],
)
return self._resource.serialize_capability_resources()
return None
def semantics(self):
"""Build and return semantics object."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
lower_labels = [AlexaSemantics.ACTION_LOWER]
raise_labels = [AlexaSemantics.ACTION_RAISE]
self._semantics = AlexaSemantics()
# Add open/close semantics if tilt is not supported.
if not supported & cover.SUPPORT_SET_TILT_POSITION:
lower_labels.append(AlexaSemantics.ACTION_CLOSE)
raise_labels.append(AlexaSemantics.ACTION_OPEN)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_CLOSED],
f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}",
)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_OPEN],
f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}",
)
self._semantics.add_action_to_directive(
lower_labels,
"SetMode",
{"mode": f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}"},
)
self._semantics.add_action_to_directive(
raise_labels,
"SetMode",
{"mode": f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}"},
)
return self._semantics.serialize_semantics()
return None
class AlexaRangeController(AlexaCapability):
"""Implements Alexa.RangeController.
The instance property must be unique across ModeController, RangeController, ToggleController within the same device.
The instance property should be a concatenated string of device domain period and single word.
e.g. fan.speed & fan.direction.
The instance property must not contain words from other instance property strings within the same device.
e.g. Instance property cover.position & cover.tilt_position will cause the Alexa.Discovery directive to fail.
An instance property string value may be reused for different devices.
https://developer.amazon.com/docs/device-apis/alexa-rangecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.RangeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "rangeValue"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "rangeValue":
raise UnsupportedProperty(name)
# Return None for unavailable and unknown states.
# Allows the Alexa.EndpointHealth Interface to handle the unavailable state in a stateReport.
if self.entity.state in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
# Fan Speed
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
speed_list = self.entity.attributes.get(fan.ATTR_SPEED_LIST)
speed = self.entity.attributes.get(fan.ATTR_SPEED)
if speed_list is not None and speed is not None:
speed_index = next(
(i for i, v in enumerate(speed_list) if v == speed), None
)
return speed_index
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION)
# Cover Tilt
if self.instance == f"{cover.DOMAIN}.tilt":
return self.entity.attributes.get(cover.ATTR_CURRENT_TILT_POSITION)
# Input Number Value
if self.instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
return float(self.entity.state)
# Vacuum Fan Speed
if self.instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
speed_list = self.entity.attributes.get(vacuum.ATTR_FAN_SPEED_LIST)
speed = self.entity.attributes.get(vacuum.ATTR_FAN_SPEED)
if speed_list is not None and speed is not None:
speed_index = next(
(i for i, v in enumerate(speed_list) if v == speed), None
)
return speed_index
return None
def configuration(self):
"""Return configuration with presetResources."""
if isinstance(self._resource, AlexaCapabilityResource):
return self._resource.serialize_configuration()
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Speed Resources
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
speed_list = self.entity.attributes[fan.ATTR_SPEED_LIST]
max_value = len(speed_list) - 1
self._resource = AlexaPresetResource(
labels=[AlexaGlobalCatalog.SETTING_FAN_SPEED],
min_value=0,
max_value=max_value,
precision=1,
)
for index, speed in enumerate(speed_list):
labels = []
if isinstance(speed, str):
labels.append(speed.replace("_", " "))
if index == 1:
labels.append(AlexaGlobalCatalog.VALUE_MINIMUM)
if index == max_value:
labels.append(AlexaGlobalCatalog.VALUE_MAXIMUM)
if len(labels) > 0:
self._resource.add_preset(value=index, labels=labels)
return self._resource.serialize_capability_resources()
# Cover Position Resources
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._resource = AlexaPresetResource(
["Position", AlexaGlobalCatalog.SETTING_OPENING],
min_value=0,
max_value=100,
precision=1,
unit=AlexaGlobalCatalog.UNIT_PERCENT,
)
return self._resource.serialize_capability_resources()
# Cover Tilt Resources
if self.instance == f"{cover.DOMAIN}.tilt":
self._resource = AlexaPresetResource(
["Tilt", "Angle", AlexaGlobalCatalog.SETTING_DIRECTION],
min_value=0,
max_value=100,
precision=1,
unit=AlexaGlobalCatalog.UNIT_PERCENT,
)
return self._resource.serialize_capability_resources()
# Input Number Value
if self.instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
min_value = float(self.entity.attributes[input_number.ATTR_MIN])
max_value = float(self.entity.attributes[input_number.ATTR_MAX])
precision = float(self.entity.attributes.get(input_number.ATTR_STEP, 1))
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._resource = AlexaPresetResource(
["Value", AlexaGlobalCatalog.SETTING_PRESET],
min_value=min_value,
max_value=max_value,
precision=precision,
unit=unit,
)
self._resource.add_preset(
value=min_value, labels=[AlexaGlobalCatalog.VALUE_MINIMUM]
)
self._resource.add_preset(
value=max_value, labels=[AlexaGlobalCatalog.VALUE_MAXIMUM]
)
return self._resource.serialize_capability_resources()
# Vacuum Fan Speed Resources
if self.instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
speed_list = self.entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
max_value = len(speed_list) - 1
self._resource = AlexaPresetResource(
labels=[AlexaGlobalCatalog.SETTING_FAN_SPEED],
min_value=0,
max_value=max_value,
precision=1,
)
for index, speed in enumerate(speed_list):
labels = [speed.replace("_", " ")]
if index == 1:
labels.append(AlexaGlobalCatalog.VALUE_MINIMUM)
if index == max_value:
labels.append(AlexaGlobalCatalog.VALUE_MAXIMUM)
self._resource.add_preset(value=index, labels=labels)
return self._resource.serialize_capability_resources()
return None
def semantics(self):
"""Build and return semantics object."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
lower_labels = [AlexaSemantics.ACTION_LOWER]
raise_labels = [AlexaSemantics.ACTION_RAISE]
self._semantics = AlexaSemantics()
# Add open/close semantics if tilt is not supported.
if not supported & cover.SUPPORT_SET_TILT_POSITION:
lower_labels.append(AlexaSemantics.ACTION_CLOSE)
raise_labels.append(AlexaSemantics.ACTION_OPEN)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_CLOSED], value=0
)
self._semantics.add_states_to_range(
[AlexaSemantics.STATES_OPEN], min_value=1, max_value=100
)
self._semantics.add_action_to_directive(
lower_labels, "SetRangeValue", {"rangeValue": 0}
)
self._semantics.add_action_to_directive(
raise_labels, "SetRangeValue", {"rangeValue": 100}
)
return self._semantics.serialize_semantics()
# Cover Tilt
if self.instance == f"{cover.DOMAIN}.tilt":
self._semantics = AlexaSemantics()
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_CLOSE], "SetRangeValue", {"rangeValue": 0}
)
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_OPEN], "SetRangeValue", {"rangeValue": 100}
)
self._semantics.add_states_to_value([AlexaSemantics.STATES_CLOSED], value=0)
self._semantics.add_states_to_range(
[AlexaSemantics.STATES_OPEN], min_value=1, max_value=100
)
return self._semantics.serialize_semantics()
return None
class AlexaToggleController(AlexaCapability):
"""Implements Alexa.ToggleController.
The instance property must be unique across ModeController, RangeController, ToggleController within the same device.
The instance property should be a concatenated string of device domain period and single word.
e.g. fan.speed & fan.direction.
The instance property must not contain words from other instance property strings within the same device.
e.g. Instance property cover.position & cover.tilt_position will cause the Alexa.Discovery directive to fail.
An instance property string value may be reused for different devices.
https://developer.amazon.com/docs/device-apis/alexa-togglecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
"pt-BR",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ToggleController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "toggleState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "toggleState":
raise UnsupportedProperty(name)
# Fan Oscillating
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
is_on = bool(self.entity.attributes.get(fan.ATTR_OSCILLATING))
return "ON" if is_on else "OFF"
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Oscillating Resource
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
self._resource = AlexaCapabilityResource(
[AlexaGlobalCatalog.SETTING_OSCILLATE, "Rotate", "Rotation"]
)
return self._resource.serialize_capability_resources()
return None
class AlexaChannelController(AlexaCapability):
"""Implements Alexa.ChannelController.
https://developer.amazon.com/docs/device-apis/alexa-channelcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
"pt-BR",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ChannelController"
class AlexaDoorbellEventSource(AlexaCapability):
"""Implements Alexa.DoorbellEventSource.
https://developer.amazon.com/docs/device-apis/alexa-doorbelleventsource.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"es-US",
"fr-CA",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.DoorbellEventSource"
def capability_proactively_reported(self):
"""Return True for proactively reported capability."""
return True
class AlexaPlaybackStateReporter(AlexaCapability):
"""Implements Alexa.PlaybackStateReporter.
https://developer.amazon.com/docs/device-apis/alexa-playbackstatereporter.html
"""
supported_locales = {"de-DE", "en-GB", "en-US", "es-MX", "fr-FR"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackStateReporter"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "playbackState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "playbackState":
raise UnsupportedProperty(name)
playback_state = self.entity.state
if playback_state == STATE_PLAYING:
return {"state": "PLAYING"}
if playback_state == STATE_PAUSED:
return {"state": "PAUSED"}
return {"state": "STOPPED"}
class AlexaSeekController(AlexaCapability):
"""Implements Alexa.SeekController.
https://developer.amazon.com/docs/device-apis/alexa-seekcontroller.html
"""
supported_locales = {"de-DE", "en-GB", "en-US", "es-MX"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SeekController"
class AlexaEventDetectionSensor(AlexaCapability):
"""Implements Alexa.EventDetectionSensor.
https://developer.amazon.com/docs/device-apis/alexa-eventdetectionsensor.html
"""
supported_locales = {"en-US"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EventDetectionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "humanPresenceDetectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "humanPresenceDetectionState":
raise UnsupportedProperty(name)
human_presence = "NOT_DETECTED"
state = self.entity.state
# Return None for unavailable and unknown states.
# Allows the Alexa.EndpointHealth Interface to handle the unavailable state in a stateReport.
if state in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
if self.entity.domain == image_processing.DOMAIN:
if int(state):
human_presence = "DETECTED"
elif state == STATE_ON:
human_presence = "DETECTED"
return {"value": human_presence}
def configuration(self):
"""Return supported detection types."""
return {
"detectionMethods": ["AUDIO", "VIDEO"],
"detectionModes": {
"humanPresence": {
"featureAvailability": "ENABLED",
"supportsNotDetected": True,
}
},
}
class AlexaEqualizerController(AlexaCapability):
"""Implements Alexa.EqualizerController.
https://developer.amazon.com/en-US/docs/alexa/device-apis/alexa-equalizercontroller.html
"""
supported_locales = {"de-DE", "en-IN", "en-US", "es-ES", "it-IT", "ja-JP", "pt-BR"}
VALID_SOUND_MODES = {
"MOVIE",
"MUSIC",
"NIGHT",
"SPORT",
"TV",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EqualizerController"
def properties_supported(self):
"""Return what properties this entity supports.
Either bands, mode or both can be specified. Only mode is supported at this time.
"""
return [{"name": "mode"}]
def get_property(self, name):
"""Read and return a property."""
if name != "mode":
raise UnsupportedProperty(name)
sound_mode = self.entity.attributes.get(media_player.ATTR_SOUND_MODE)
if sound_mode and sound_mode.upper() in self.VALID_SOUND_MODES:
return sound_mode.upper()
return None
def configurations(self):
"""Return the sound modes supported in the configurations object."""
configurations = None
supported_sound_modes = self.get_valid_inputs(
self.entity.attributes.get(media_player.ATTR_SOUND_MODE_LIST, [])
)
if supported_sound_modes:
configurations = {"modes": {"supported": supported_sound_modes}}
return configurations
@classmethod
def get_valid_inputs(cls, sound_mode_list):
"""Return list of supported inputs."""
input_list = []
for sound_mode in sound_mode_list:
sound_mode = sound_mode.upper()
if sound_mode in cls.VALID_SOUND_MODES:
input_list.append({"name": sound_mode})
return input_list
class AlexaTimeHoldController(AlexaCapability):
"""Implements Alexa.TimeHoldController.
https://developer.amazon.com/docs/device-apis/alexa-timeholdcontroller.html
"""
supported_locales = {"en-US"}
def __init__(self, entity, allow_remote_resume=False):
"""Initialize the entity."""
super().__init__(entity)
self._allow_remote_resume = allow_remote_resume
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TimeHoldController"
def configuration(self):
"""Return configuration object.
Set allowRemoteResume to True if Alexa can restart the operation on the device.
When false, Alexa does not send the Resume directive.
"""
return {"allowRemoteResume": self._allow_remote_resume}
class AlexaCameraStreamController(AlexaCapability):
"""Implements Alexa.CameraStreamController.
https://developer.amazon.com/docs/device-apis/alexa-camerastreamcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"hi-IN",
"it-IT",
"ja-JP",
"pt-BR",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.CameraStreamController"
def camera_stream_configurations(self):
"""Return cameraStreamConfigurations object."""
return [
{
"protocols": ["HLS"],
"resolutions": [{"width": 1280, "height": 720}],
"authorizationTypes": ["NONE"],
"videoCodecs": ["H264"],
"audioCodecs": ["AAC"],
}
]
|
|
import collections
import itertools
import numpy as np
import sklearn.cluster
import networkx as nx
import scrapely.htmlpage as hp
import _kernel as _ker
import dtw
def to_rows(d):
"""Make a square matrix with rows equal to 'd'.
>>> print to_rows(np.array([1,2,3,4]))
[[1 2 3 4]
[1 2 3 4]
[1 2 3 4]
[1 2 3 4]]
"""
return np.tile(d, (len(d), 1))
def to_cols(d):
"""Make a square matrix with columns equal to 'd'.
>>> print ker.to_cols(np.array([1,2,3,4]))
[[1 1 1 1]
[2 2 2 2]
[3 3 3 3]
[4 4 4 4]]
"""
return np.tile(d.reshape(len(d), -1), (1, len(d)))
def normalize_kernel(K):
"""New kernel with unit diagonal.
K'[i, j] = K[i, j]/sqrt(K[i,i]*K[j,j])
"""
d = np.diag(K).copy()
d[d == 0] = 1.0
return K/np.sqrt(to_rows(d)*to_cols(d))
def kernel_to_distance(K):
"""Build a distance matrix.
From the dot product:
|u - v|^2 = (u - v)(u - v) = u^2 + v^2 - 2uv
"""
d = np.diag(K)
D = to_rows(d) + to_cols(d) - 2*K
D[D < 0] = 0.0 # numerical error can make D go a little below 0
return np.sqrt(D)
def tree_size_distance(page_tree):
"""Build a distance matrix comparing subtree sizes.
If T1 and T2 are trees and N1 and N2 the number of nodes within:
|T1 - T2| = |N1 - N2|/(N1 + N2)
Since:
N1 >= 1
N2 >= 1
Then:
0 <= |T1 - T2| < 1
"""
s = page_tree.tree_size()
a = to_cols(s).astype(float)
b = to_rows(s).astype(float)
return np.abs(a - b)/(a + b)
def must_separate(nodes, page_tree):
"""Given a sequence of nodes and a PageTree return a list of pairs
of nodes such that one is the ascendant/descendant of the other"""
separate = []
for src in nodes:
m = page_tree.match[src]
if m >= 0:
for tgt in range(src+1, m):
if tgt in nodes:
separate.append((src, tgt))
return separate
def cut_descendants(D, nodes, page_tree):
"""Given the distance matrix D, a set of nodes and a PageTree
perform a multicut of the complete graph of nodes separating
the nodes that are descendant/ascendants of each other according to the
PageTree"""
index = {node: i for i, node in enumerate(nodes)}
separate = [(index[i], index[j])
for i, j in must_separate(nodes, page_tree)]
if separate:
D = D[nodes, :][:, nodes].copy()
for i, j in separate:
D[i, j] = D[j, i] = np.inf
E = _ker.min_dist_complete(D)
eps = min(E[i,j] for i, j in separate)
components = nx.connected_components(
nx.Graph((nodes[i], nodes[j])
for (i, j) in zip(*np.nonzero(E < eps))))
else:
components = [nodes]
return components
def labels_to_clusters(labels):
"""Given a an assignment of cluster label to each item return the a list
of sets, where each set is a cluster"""
return [np.flatnonzero(labels==label) for label in range(np.max(labels)+1)]
def clusters_to_labels(clusters, n_samples):
"""Given a list with clusters label each item"""
labels = np.repeat(-1, n_samples)
for i, c in enumerate(clusters):
for j in c:
labels[j] = i
return labels
def boost(d, k=2):
"""Given a distance between 0 and 1 make it more nonlinear"""
return 1 - (1 - d)**k
class TreeClustering(object):
def __init__(self, page_tree):
self.page_tree = page_tree
def fit_predict(self, X, min_cluster_size=6, d1=1.0, d2=0.1, eps=1.0,
separate_descendants=True):
"""Fit the data X and label each sample.
X is a kernel of size (n_samples, n_samples). From this kernel the
distance matrix is computed and averaged with the tree size distance,
and DBSCAN applied to the result. Finally, we enforce the constraint
that a node cannot be inside the same cluster of any of its ascendants.
Parameters
---------
X : np.array
Kernel matrix
min_cluster_size : int
Parameter to DBSCAN
eps : int
Parameter to DBSCAN
d1 : float
Weight of distance computed from X
d2 : float
Weight of distance computed from tree size
separate_ascendants: bool
True to enfonce the cannot-link constraints
Returns
-------
np.array
A label for each sample
"""
Y = boost(tree_size_distance(self.page_tree), 2)
D = d1*X + d2*Y
clt = sklearn.cluster.DBSCAN(
eps=eps, min_samples=min_cluster_size, metric='precomputed')
self.clusters = []
for c in labels_to_clusters(clt.fit_predict(D)):
if len(c) >= min_cluster_size:
if separate_descendants:
self.clusters += filter(lambda x: len(x) >= min_cluster_size,
cut_descendants(D, c, self.page_tree))
else:
self.clusters.append(c)
self.labels = clusters_to_labels(self.clusters, D.shape[0])
return self.labels
def cluster(page_tree, K, eps=1.2, d1=1.0, d2=0.1, separate_descendants=True):
"""Asign to each node in the tree a cluster label.
Returns
-------
np.array
For each node a label id. Label ID -1 means that the node
is an outlier (it isn't part of any cluster).
"""
return TreeClustering(page_tree).fit_predict(
kernel_to_distance(normalize_kernel(K)),
eps=eps, d1=d1, d2=d2,
separate_descendants=separate_descendants)
def clusters_tournament(ptree, labels):
"""A cluster 'wins' if some node inside the cluster is the ascendant
of another node in the other cluster"""
L = np.max(labels) + 1
T = np.zeros((L, L), dtype=int)
for i, m in enumerate(ptree.match):
li = labels[i]
if li != -1:
for j in range(max(i + 1, m)):
lj = labels[j]
if lj != -1:
T[li, lj] += 1
return T
def _make_acyclic(T, labels):
"""See https://en.wikipedia.org/wiki/Feedback_arc_set"""
n = T.shape[0]
if n == 0:
return []
i = np.random.randint(0, n)
L = []
R = []
for j in range(n):
if j != i:
if T[i, j] > T[j, i]:
R.append(j)
else:
L.append(j)
return (make_acyclic(T[L, :][:, L], labels[L]) +
[labels[i]] +
make_acyclic(T[R, :][:, R], labels[R]))
def make_acyclic(T, labels=None):
"""Tiven a tournament T, try to rank the clusters in a consisten
way"""
if labels is None:
labels = np.arange(T.shape[0])
return _make_acyclic(T, labels)
def separate_clusters(ptree, labels):
"""Make sure no tree node is contained in two different clusters"""
ranking = make_acyclic(clusters_tournament(ptree, labels))
clusters = labels_to_clusters(labels)
labels = labels.copy()
for i in ranking:
for node in clusters[i]:
labels[node+1:max(node+1, ptree.match[node])] = -1
return labels
def score_cluster(ptree, cluster, k=4):
"""Given a cluster assign a score. The higher the score the more probable
that the cluster truly represents a repeating item"""
if len(cluster) <= 1:
return 0.0
D = sklearn.neighbors.kneighbors_graph(
ptree.distance[cluster, :][:, cluster], min(len(cluster) - 1, k),
metric='precomputed', mode='distance')
score = 0.0
for i, j in zip(*D.nonzero()):
a = cluster[i]
b = cluster[j]
si = max(a+1, ptree.match[a]) - a
sj = max(b+1, ptree.match[b]) - b
score += min(si, sj)/D[i, j]**2
return score
def some_root_has_label(labels, item, label):
for root in item:
if labels[root] == label:
return True
return False
def extract_items_with_label(ptree, labels, label_to_extract):
"""Extract all items inside the labeled PageTree that are marked or have
a sibling that is marked with label_to_extract.
Returns
-------
List[tuple]
Where each tuple is the roots of the extracted subtrees.
"""
items = []
i = 0
while i < len(labels):
children = ptree.children(i)
if np.any(labels[children] == label_to_extract):
first = None
item = []
for c in children:
m = labels[c]
if m != -1:
if first is None:
first = m
elif m == first:
if item:
items.append(tuple(item))
item = []
# Only append tags as item roots
if isinstance(ptree.page.parsed_body[ptree.index[c]], hp.HtmlTag):
item.append(c)
if item:
items.append(tuple(item))
i = ptree.match[i]
else:
i += 1
return filter(lambda item: some_root_has_label(labels, item, label_to_extract),
items)
def vote(sequence):
"""Return the most frequent item in sequence"""
return max(collections.Counter(sequence).iteritems(),
key=lambda kv: kv[1])[0]
def regularize_item_length(ptree, labels, item_locations, max_items_cut_per=0.33):
"""Make sure all item locations have the same number of roots"""
if not item_locations:
return item_locations
min_item_length = vote(len(item_location) for item_location in item_locations)
cut_items = sum(len(item_location) > min_item_length
for item_location in item_locations)
if cut_items > max_items_cut_per*len(item_locations):
return []
item_locations = filter(lambda x: len(x) >= min_item_length,
item_locations)
if cut_items > 0:
label_count = collections.Counter(
labels[root] for item_location in item_locations
for root in item_location)
new_item_locations = []
for item_location in item_locations:
if len(item_location) > min_item_length:
scored = sorted(
((label_count[labels[root]], root) for root in item_location),
reverse=True)
keep = set(x[1] for x in scored[:min_item_length])
new_item_location = tuple(
root
for root in item_location
if root in keep)
else:
new_item_location = item_location
new_item_locations.append(new_item_location)
else:
new_item_locations = item_locations
return new_item_locations
def extract_items(ptree, labels, min_n_items=6):
"""Extract the repeating items.
The algorithm to extract the repeating items goes as follows:
1. Determine the label that covers most children on the page
2. If a node with that label has siblings, extract the siblings too,
even if they have other labels.
The output is a list of lists of items
"""
labels = separate_clusters(ptree, labels)
scores = sorted(
enumerate(score_cluster(ptree, cluster)
for cluster in labels_to_clusters(labels)),
key=lambda kv: kv[1], reverse=True)
items = []
for label, score in scores:
cluster = extract_items_with_label(ptree, labels, label)
if len(cluster) < min_n_items:
continue
t = regularize_item_length(ptree, labels, cluster)
if len(t) >= min_n_items:
items.append(t)
return items
def path_distance(path_1, path_2):
"""Compute the prefix distance between the two paths.
>>> p1 = [1, 0, 3, 4, 5, 6]
>>> p2 = [1, 0, 2, 2, 2, 2, 2, 2]
>>> print path_distance(p1, p2)
6
"""
d = max(len(path_1), len(path_2))
for a, b in zip(path_1, path_2):
if a != b:
break
d -= 1
return d
def pairwise_path_distance(path_seq_1, path_seq_2):
"""Compute all pairwise distances between paths in path_seq_1 and
path_seq_2"""
N1 = len(path_seq_1)
N2 = len(path_seq_2)
D = np.zeros((N1, N2))
for i in range(N1):
q1 = path_seq_1[i]
for j in range(N2):
D[i, j] = path_distance(q1, path_seq_2[j])
return D
def extract_path_seq_1(ptree, item):
paths = []
for root in item:
for path in ptree.prefixes_at(root):
paths.append((path[0], path))
return paths
def extract_path_seq(ptree, items):
all_paths = []
for item in items:
paths = extract_path_seq_1(ptree, item)
all_paths.append(paths)
return all_paths
def map_paths_1(func, paths):
return [(leaf, [func(node) for node in path])
for leaf, path in paths]
def map_paths(func, paths):
return [map_paths_1(func, path_set) for path_set in paths]
def find_cliques(G, min_size):
"""Find all cliques in G above a given size.
If a node is part of a larger clique is deleted from the smaller ones.
Returns
-------
dict
Mapping nodes to clique ID
"""
cliques = []
for K in nx.find_cliques(G):
if len(K) >= min_size:
cliques.append(set(K))
cliques.sort(reverse=True, key=lambda x: len(x))
L = set()
for K in cliques:
K -= L
L |= K
cliques = [J for J in cliques if len(J) >= min_size]
node_to_clique = {}
for i, K in enumerate(cliques):
for node in K:
if node not in node_to_clique:
node_to_clique[node] = i
return node_to_clique
def match_graph(all_paths):
"""Build a graph where n1 and n2 share an edge if they have
been matched using DTW"""
G = nx.Graph()
for path_set_1, path_set_2 in itertools.combinations(all_paths, 2):
n1, p1 = zip(*path_set_1)
n2, p2 = zip(*path_set_2)
D = pairwise_path_distance(p1, p2)
DTW = dtw.from_distance(D)
a1, a2 = dtw.path(DTW)
m = dtw.match(a1, a2, D)
for i, j in enumerate(m):
if j != -1:
G.add_edge(n1[i], n2[j])
return G
def align_items(ptree, items, node_to_clique):
n_cols = max(node_to_clique.values()) + 1
table = np.zeros((len(items), n_cols), dtype=int) - 1
for i, item in enumerate(items):
for root in item:
for c in range(root, max(root + 1, ptree.match[root])):
try:
table[i, node_to_clique[c]] = c
except KeyError:
pass
return table
def extract_item_table(ptree, items, labels):
return align_items(
ptree,
items,
find_cliques(
match_graph(map_paths(
lambda x: labels[x], extract_path_seq(ptree, items))),
0.5*len(items))
)
ItemTable = collections.namedtuple('ItemTable', ['items', 'cells'])
class ItemExtract(object):
def __init__(self, page_tree, k_max_depth=2, k_decay=0.5,
c_eps=1.2, c_d1=1.0, c_d2=1.0, separate_descendants=True):
"""Perform all extraction operations in sequence.
Parameters
----------
k_max_depth : int
Parameter to kernel computation
k_decay : float
Parameter to kernel computation
c_eps : float
Parameter to clustering
c_d1 : float
Parameter to clustering
c_d2 : float
Parameter to clustering
separate_descendants : bool
Parameter to clustering
"""
self.page_tree = page_tree
self.kernel = _ker.kernel(page_tree, max_depth=k_max_depth, decay=k_decay)
self.labels = cluster(
page_tree, self.kernel, eps=c_eps, d1=c_d1, d2=c_d2,
separate_descendants=separate_descendants)
self.items = extract_items(page_tree, self.labels)
self.tables = [ItemTable(items, extract_item_table(page_tree, items, self.labels))
for items in self.items]
self.table_fragments = [
ItemTable([page_tree.fragment_index(np.array(root)) for root in item],
page_tree.fragment_index(fields))
for item, fields in self.tables]
|
|
"""Schevo-specific exceptions."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
# ======================================================================
# Runtime errors
class Restricted(RuntimeError):
"""The attempted operation was restricted."""
class BackendConflictError(RuntimeError):
"""Transaction could not be executed; too many backend conflict errors."""
class DatabaseAlreadyExists(RuntimeError):
"""The database already exists."""
def __init__(self, url):
message = 'Schevo database already at %r.' % url
RuntimeError.__init__(self, message)
self.url = url
class DatabaseDoesNotExist(RuntimeError):
"""The database does not exist."""
def __init__(self, url):
message = 'Schevo database not found at %r.' % url
RuntimeError.__init__(self, message)
self.url = url
class DatabaseExecutingTransaction(RuntimeError):
"""The operation cannot be completed while the database is
executing a transaction."""
class DatabaseFileLocked(RuntimeError):
"""The database file is in use by another process."""
def __init__(self):
message = 'The database file is being used by another process.'
RuntimeError.__init__(self, message)
class DatabaseFormatMismatch(RuntimeError):
"""The internal structure of the database is not in the correct
format."""
def __init__(self, current_format, required_format):
message = (
'Source database must be in format %i; currently in format %i.'
% (required_format, current_format)
)
RuntimeError.__init__(self, message)
self.current_format = current_format
self.required_format = required_format
class DatabaseMismatch(RuntimeError):
"""A value from one database was used incorrectly in another."""
def __init__(self, field_name, field_value):
message = (
'%r field of %r cannot be resolved to the current database.'
% (field_name, field_value)
)
RuntimeError.__init__(self, message)
self.field_name = field_name
self.field_value = field_value
class DatabaseVersionMismatch(RuntimeError):
"""The schema version being evolved to is not the version
subsequent to the current database schema."""
def __init__(self, current_version, expected_version, requested_version):
message = (
'Current version is %i; expected: %i; requested: %i.'
% (current_version, expected_version, requested_version)
)
RuntimeError.__init__(self, message)
self.current_version = current_version
self.expected_version = expected_version
self.requested_version = requested_version
class DeleteRestricted(Restricted):
"""Delete attempted on an instance with foreign references."""
def __init__(self, entity=None, referring_entity=None,
referring_field_name=None):
message = 'Cannot delete; referenced by one or more other entities.'
Restricted.__init__(self, message)
self.restrictions = set()
if (entity is not None
and referring_entity is not None
and referring_field_name is not None
):
self.add(entity, referring_entity, referring_field_name)
def add(self, entity, referring_entity, referring_field_name):
self.restrictions.add((
entity,
referring_entity,
referring_field_name,
))
class ExtentExists(KeyError):
"""An extent already exists."""
def __init__(self, extent_name):
message = 'Extent %r already exists.' % extent_name
KeyError.__init__(self, message)
self.extent_name = extent_name
class ExtentDoesNotExist(KeyError):
"""An extent does not exist."""
def __init__(self, extent_name):
message = 'Extent %r does not exist.' % extent_name
KeyError.__init__(self, message)
self.extent_name = extent_name
class EntityExists(KeyError):
"""An entity already exists."""
def __init__(self, extent_name, oid):
message = (
'Entity OID %i already exists in extent %r.'
% (oid, extent_name)
)
KeyError.__init__(self, message)
self.extent_name = extent_name
self.oid = oid
class EntityDoesNotExist(KeyError):
"""An entity does not exist."""
def __init__(self, extent_name, field_name=None, oid=None):
if field_name is not None:
message = (
'Entity referenced in field %r does not exist in extent %r.'
% (field_name, extent_name)
)
elif oid is not None:
message = (
'OID %i does not exist in extent %r.'
% (oid, extent_name)
)
KeyError.__init__(self, message)
self.extent_name = extent_name
self.field_name = field_name
self.oid = oid
class FieldDoesNotExist(KeyError):
"""A field does not exist."""
def __init__(self, object_or_name, field_name, new_field_name=None):
message = (
'Field %r does not exist in %r'
% (field_name, object_or_name)
)
if new_field_name is not None:
message += (
' while attempting to rename field to %r'
% new_field_name
)
message += '.'
KeyError.__init__(self, message)
self.object_or_name = object_or_name
self.field_name = field_name
self.new_field_name = new_field_name
class FieldReadonly(AttributeError):
"""Cannot set values of readonly fields."""
def __init__(self, message, field, instance):
AttributeError.__init__(self, message)
self.field = field
self.instance = instance
class FieldRequired(AttributeError):
"""Must set values of required fields."""
def __init__(self, message, field, instance):
AttributeError.__init__(self, message)
self.field = field
self.instance = instance
class FindoneFoundMoreThanOne(Exception):
"""Findone found more than one match."""
def __init__(self, extent_name, criteria):
message = (
'Found more than one match in extent %r for criteria %r.'
% (extent_name, criteria)
)
Exception.__init__(self, message)
self.extent_name = extent_name
self.criteria = criteria[:]
class IndexDoesNotExist(Exception):
"""An index does not exist."""
def __init__(self, extent_name, index_spec):
message = (
'Index %r not found in extent %r.'
% (index_spec, extent_name)
)
Exception.__init__(self, message)
self.extent_name = extent_name
self.index_spec = index_spec
class KeyCollision(KeyError):
"""An entity with the given keys already exists."""
def __init__(self, extent_name, key_spec, field_values):
message = (
'Duplicate values %r for key %r in extent %r.'
% (field_values, key_spec, extent_name)
)
KeyError.__init__(self, message)
self.extent_name = extent_name
self.key_spec = key_spec
self.field_values = field_values
class SchemaFileIOError(IOError):
"""The schema file could not be read."""
class TransactionAlreadyExecuted(RuntimeError):
"""A transaction was already executed and cannot be re-executed."""
def __init__(self, transaction):
message = 'Transaction %r already executed.' % transaction
RuntimeError.__init__(self, message)
self.transaction = transaction
class TransactionExpired(RuntimeError):
"""Something changed in the database that caused this transaction to
expire."""
def __init__(self, transaction, original_rev, current_rev):
message = (
'Transaction %r expired; original entity revision was %i, now %i.'
% (transaction, original_rev, current_rev)
)
RuntimeError.__init__(self, message)
self.transaction = transaction
self.original_rev = original_rev
self.current_rev = current_rev
class TransactionFieldsNotChanged(RuntimeError):
"""No transaction field values were changed."""
def __init__(self, transaction):
message = (
'Transaction %r requires at least one field changed.'
% transaction
)
RuntimeError.__init__(self, message)
self.transaction = transaction
class TransactionNotExecuted(RuntimeError):
"""A transaction was not yet executed."""
def __init__(self, transaction):
message = (
'Transaction %r must be executed to get its changes '
'or undo transaction.'
% transaction
)
RuntimeError.__init__(self, message)
self.transaction = transaction
class TransactionRuleViolation(RuntimeError):
"""A transaction rule was violated."""
def __init__(self, message, **kwargs):
RuntimeError.__init__(self, message)
self.__dict__.update(kwargs)
# ======================================================================
# Schema errors
class SchemaError(SyntaxError):
"""An error was found in the schema."""
class AmbiguousFieldDefinition(SchemaError):
"""A field defition's attributes were ambiguous."""
def __init__(self, reason):
message = 'Ambiguous field definition: %r' % reason
SchemaError.__init__(self, message)
self.reason = reason
## def __init__(self, reason, class_name, field_name):
## message = (
## 'Ambiguous field definition for %r in class %r: %r'
## % (field_name, class_name, reason)
## )
## SchemaError.__init__(self, message)
## self.reason = reason
## self.class_name = class_name
## self.field_name = field_name
class KeyIndexOverlap(SchemaError):
"""Key specs and index specs must not overlap."""
def __init__(self, class_name, overlapping_specs):
message = (
'Cannot use same spec for both key and index in entity class %r.'
% class_name
)
SchemaError.__init__(self, message)
self.class_name = class_name
self.overlapping_specs = overlapping_specs
class TransactionExecuteRedefinitionRestricted(SchemaError):
"""Overriding `__init__` or `_execute` is not allowed in this class."""
def __init__(self, class_name, base_classes):
message = (
'Transaction subclass %r, with bases %r, '
'tried to override __init__ or _execute, '
'but that is not allowed with those bases.'
% (class_name, base_classes)
)
SchemaError.__init__(self, message)
self.class_name = class_name
self.base_classes = base_classes
class UnsupportedFieldType(SchemaError):
"""The field type is not supported by the database engine in use."""
def __init__(self, reason):
message = 'Unsupported field type: %s' % reason
SchemaError.__init__(self, message)
self.reason = reason
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.execute("create index canvas_comment_id_and_visibility_and_parent_comment_id_and_ugq on canvas_comment (id, visibility, parent_comment_id, ugq);")
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.APIApp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attribution_copy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'attribution_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Category']"}),
'created_on_iphone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Comment']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'star_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'ugq': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['auth.User']"})
},
u'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': u"orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
u'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': u"orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': u"orm['canvas.Content']"}),
'stroke_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['canvas.FacebookUser']", 'symmetrical': 'False', 'blank': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': u"orm['canvas.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': u"orm['auth.User']"})
},
u'canvas.friendjoinednotificationreceipt': {
'Meta': {'unique_together': "(('actor', 'recipient'),)", 'object_name': 'FriendJoinedNotificationReceipt'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'follower_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']", 'null': 'True'}),
'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}),
'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': u"orm['auth.User']"})
},
u'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': u"orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
u'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
|
# Copyright (c) 2014 Alex Meade
# Copyright (c) 2015 Yogesh Kshirsagar
# Copyright (c) 2015 Michael Price
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import json
import mock
from simplejson import scanner
from jacket.storage import exception
from jacket.storage import test
from jacket.tests.storage.unit.volume.drivers.netapp.eseries import fakes as \
eseries_fake
from jacket.storage.volume.drivers.netapp.eseries import exception as es_exception
from jacket.storage.volume.drivers.netapp.eseries import client
from jacket.storage.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetAppEseriesClientDriverTestCase(test.TestCase):
"""Test case for NetApp e-series client."""
def setUp(self):
super(NetAppEseriesClientDriverTestCase, self).setUp()
self.mock_log = mock.Mock()
self.mock_object(client, 'LOG', self.mock_log)
self.fake_password = 'mysecret'
self.my_client = client.RestClient('http', 'host', '80', '/test',
'user', self.fake_password,
system_id='fake_sys_id')
self.my_client._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP
fake_response = mock.Mock()
fake_response.status_code = 200
self.my_client.invoke_service = mock.Mock(return_value=fake_response)
self.my_client.api_version = '01.52.9000.1'
@ddt.data(200, 201, 203, 204)
def test_eval_response_success(self, status_code):
fake_resp = mock.Mock()
fake_resp.status_code = status_code
self.assertIsNone(self.my_client._eval_response(fake_resp))
@ddt.data(300, 400, 404, 500)
def test_eval_response_failure(self, status_code):
fake_resp = mock.Mock()
fake_resp.status_code = status_code
expected_msg = "Response error code - %s." % status_code
with self.assertRaisesRegex(es_exception.WebServiceException,
expected_msg) as exc:
self.my_client._eval_response(fake_resp)
self.assertEqual(status_code, exc.status_code)
@ddt.data(('30', 'storage array password.*?incorrect'),
('authFailPassword', 'storage array password.*?incorrect'),
('unknown', None))
@ddt.unpack
def test_eval_response_422(self, ret_code, exc_regex):
status_code = 422
fake_resp = mock.Mock()
fake_resp.text = "fakeError"
fake_resp.json = mock.Mock(return_value={'retcode': ret_code})
fake_resp.status_code = status_code
exc_regex = exc_regex if exc_regex is not None else fake_resp.text
with self.assertRaisesRegexp(es_exception.WebServiceException,
exc_regex) as exc:
self.my_client._eval_response(fake_resp)
self.assertEqual(status_code, exc.status_code)
def test_eval_response_424(self):
status_code = 424
fake_resp = mock.Mock()
fake_resp.status_code = status_code
fake_resp.text = "Fake Error Message"
with self.assertRaisesRegex(es_exception.WebServiceException,
"The storage-system is offline") as exc:
self.my_client._eval_response(fake_resp)
self.assertEqual(status_code, exc.status_code)
def test_register_storage_system_does_not_log_password(self):
self.my_client._eval_response = mock.Mock()
self.my_client.register_storage_system([], password=self.fake_password)
for call in self.mock_log.debug.mock_calls:
__, args, __ = call
self.assertNotIn(self.fake_password, args[0])
def test_update_stored_system_password_does_not_log_password(self):
self.my_client._eval_response = mock.Mock()
self.my_client.update_stored_system_password(
password=self.fake_password)
for call in self.mock_log.debug.mock_calls:
__, args, __ = call
self.assertNotIn(self.fake_password, args[0])
def test_list_target_wwpns(self):
fake_hardware_inventory = copy.deepcopy(
eseries_fake.HARDWARE_INVENTORY)
mock_hardware_inventory = mock.Mock(
return_value=fake_hardware_inventory)
self.mock_object(self.my_client, 'list_hardware_inventory',
mock_hardware_inventory)
expected_wwpns = [eseries_fake.WWPN, eseries_fake.WWPN_2]
actual_wwpns = self.my_client.list_target_wwpns()
self.assertEqual(expected_wwpns, actual_wwpns)
def test_list_target_wwpns_single_wwpn(self):
fake_hardware_inventory = copy.deepcopy(
eseries_fake.HARDWARE_INVENTORY)
fake_hardware_inventory['fibrePorts'] = [
fake_hardware_inventory['fibrePorts'][0]
]
mock_hardware_inventory = mock.Mock(
return_value=fake_hardware_inventory)
self.mock_object(self.my_client, 'list_hardware_inventory',
mock_hardware_inventory)
expected_wwpns = [eseries_fake.WWPN]
actual_wwpns = self.my_client.list_target_wwpns()
self.assertEqual(expected_wwpns, actual_wwpns)
def test_list_target_wwpns_no_wwpn(self):
fake_hardware_inventory = copy.deepcopy(
eseries_fake.HARDWARE_INVENTORY)
fake_hardware_inventory['fibrePorts'] = []
mock_hardware_inventory = mock.Mock(
return_value=fake_hardware_inventory)
self.mock_object(self.my_client, 'list_hardware_inventory',
mock_hardware_inventory)
expected_wwpns = []
actual_wwpns = self.my_client.list_target_wwpns()
self.assertEqual(expected_wwpns, actual_wwpns)
def test_get_host_group_by_name(self):
groups = copy.deepcopy(eseries_fake.HOST_GROUPS)
group = groups[0]
self.mock_object(self.my_client, 'list_host_groups',
new_attr=mock.Mock(return_value=groups))
result = self.my_client.get_host_group_by_name(group['label'])
self.assertEqual(group, result)
def test_move_volume_mapping_via_symbol(self):
invoke = self.mock_object(self.my_client, '_invoke',
mock.Mock(return_value='ok'))
host_ref = 'host'
cluster_ref = 'cluster'
lun_id = 10
expected_data = {'lunMappingRef': host_ref, 'lun': lun_id,
'mapRef': cluster_ref}
result = self.my_client.move_volume_mapping_via_symbol(host_ref,
cluster_ref,
lun_id)
invoke.assert_called_once_with('POST', '/storage-systems/{system-id}/'
'symbol/moveLUNMapping',
expected_data)
self.assertEqual({'lun': lun_id}, result)
def test_move_volume_mapping_via_symbol_fail(self):
self.mock_object(self.my_client, '_invoke',
mock.Mock(return_value='failure'))
self.assertRaises(
exception.NetAppDriverException,
self.my_client.move_volume_mapping_via_symbol, '1', '2', 10)
def test_create_host_from_ports_fc(self):
label = 'fake_host'
host_type = 'linux'
port_type = 'fc'
port_ids = [eseries_fake.WWPN, eseries_fake.WWPN_2]
expected_ports = [
{'type': port_type, 'port': eseries_fake.WWPN, 'label': mock.ANY},
{'type': port_type, 'port': eseries_fake.WWPN_2,
'label': mock.ANY}]
mock_create_host = self.mock_object(self.my_client, 'create_host')
self.my_client.create_host_with_ports(label, host_type, port_ids,
port_type)
mock_create_host.assert_called_once_with(label, host_type,
expected_ports, None)
def test_host_from_ports_with_no_ports_provided_fc(self):
label = 'fake_host'
host_type = 'linux'
port_type = 'fc'
port_ids = []
expected_ports = []
mock_create_host = self.mock_object(self.my_client, 'create_host')
self.my_client.create_host_with_ports(label, host_type, port_ids,
port_type)
mock_create_host.assert_called_once_with(label, host_type,
expected_ports, None)
def test_create_host_from_ports_iscsi(self):
label = 'fake_host'
host_type = 'linux'
port_type = 'iscsi'
port_ids = [eseries_fake.INITIATOR_NAME,
eseries_fake.INITIATOR_NAME_2]
expected_ports = [
{'type': port_type, 'port': eseries_fake.INITIATOR_NAME,
'label': mock.ANY},
{'type': port_type, 'port': eseries_fake.INITIATOR_NAME_2,
'label': mock.ANY}]
mock_create_host = self.mock_object(self.my_client, 'create_host')
self.my_client.create_host_with_ports(label, host_type, port_ids,
port_type)
mock_create_host.assert_called_once_with(label, host_type,
expected_ports, None)
def test_get_volume_mappings_for_volume(self):
volume_mapping_1 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2['volumeRef'] = '2'
self.mock_object(self.my_client, 'get_volume_mappings',
mock.Mock(return_value=[volume_mapping_1,
volume_mapping_2]))
mappings = self.my_client.get_volume_mappings_for_volume(
eseries_fake.VOLUME)
self.assertEqual([volume_mapping_1], mappings)
def test_get_volume_mappings_for_host(self):
volume_mapping_1 = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2['volumeRef'] = '2'
volume_mapping_2['mapRef'] = 'hostRef'
self.mock_object(self.my_client, 'get_volume_mappings',
mock.Mock(return_value=[volume_mapping_1,
volume_mapping_2]))
mappings = self.my_client.get_volume_mappings_for_host(
'hostRef')
self.assertEqual([volume_mapping_2], mappings)
def test_get_volume_mappings_for_hostgroup(self):
volume_mapping_1 = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2['volumeRef'] = '2'
volume_mapping_2['mapRef'] = 'hostGroupRef'
self.mock_object(self.my_client, 'get_volume_mappings',
mock.Mock(return_value=[volume_mapping_1,
volume_mapping_2]))
mappings = self.my_client.get_volume_mappings_for_host_group(
'hostGroupRef')
self.assertEqual([volume_mapping_2], mappings)
def test_to_pretty_dict_string(self):
dict = {
'foo': 'bar',
'fu': {
'nested': 'boo'
}
}
expected_dict_string = ("""{
"foo": "bar",
"fu": {
"nested": "boo"
}
}""")
dict_string = self.my_client._to_pretty_dict_string(dict)
self.assertEqual(expected_dict_string, dict_string)
def test_log_http_request(self):
mock_log = self.mock_object(client, 'LOG')
verb = "POST"
url = "/v2/test/me"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = {}
body_string = "{}"
self.my_client._log_http_request(verb, url, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(verb, final_msg)
self.assertIn(url, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_log_http_request_no_body(self):
mock_log = self.mock_object(client, 'LOG')
verb = "POST"
url = "/v2/test/me"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = None
body_string = ""
self.my_client._log_http_request(verb, url, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(verb, final_msg)
self.assertIn(url, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_log_http_response(self):
mock_log = self.mock_object(client, 'LOG')
status = "200"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = {}
body_string = "{}"
self.my_client._log_http_response(status, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(status, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_log_http_response_no_body(self):
mock_log = self.mock_object(client, 'LOG')
status = "200"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = None
body_string = ""
self.my_client._log_http_response(status, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(status, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_add_autosupport_data(self):
self.mock_object(
client.RestClient, 'get_eseries_api_info',
mock.Mock(return_value=(
eseries_fake.FAKE_ASUP_DATA['operating-mode'],
eseries_fake.FAKE_ABOUT_RESPONSE['version'])))
self.mock_object(
self.my_client, 'get_asup_info',
mock.Mock(return_value=eseries_fake.GET_ASUP_RETURN))
self.mock_object(
self.my_client, 'set_counter',
mock.Mock(return_value={'value': 1}))
mock_invoke = self.mock_object(
self.my_client, '_invoke',
mock.Mock(return_value=eseries_fake.FAKE_ASUP_DATA))
client.RestClient.add_autosupport_data(
self.my_client,
eseries_fake.FAKE_KEY,
eseries_fake.FAKE_ASUP_DATA
)
mock_invoke.assert_called_with(*eseries_fake.FAKE_POST_INVOKE_DATA)
@ddt.data((eseries_fake.FAKE_SERIAL_NUMBERS,
eseries_fake.HARDWARE_INVENTORY),
(eseries_fake.FAKE_DEFAULT_SERIAL_NUMBER, {}),
(eseries_fake.FAKE_SERIAL_NUMBER,
eseries_fake.HARDWARE_INVENTORY_SINGLE_CONTROLLER))
@ddt.unpack
def test_get_asup_info_serial_numbers(self, expected_serial_numbers,
controllers):
self.mock_object(
client.RestClient, 'list_hardware_inventory',
mock.Mock(return_value=controllers))
self.mock_object(
client.RestClient, 'list_storage_system',
mock.Mock(return_value={}))
sn = client.RestClient.get_asup_info(self.my_client)['serial_numbers']
self.assertEqual(expected_serial_numbers, sn)
def test_get_asup_info_model_name(self):
self.mock_object(
client.RestClient, 'list_hardware_inventory',
mock.Mock(return_value=eseries_fake.HARDWARE_INVENTORY))
self.mock_object(
client.RestClient, 'list_storage_system',
mock.Mock(return_value=eseries_fake.STORAGE_SYSTEM))
model_name = client.RestClient.get_asup_info(self.my_client)['model']
self.assertEqual(eseries_fake.HARDWARE_INVENTORY['controllers'][0]
['modelName'], model_name)
def test_get_asup_info_model_name_empty_controllers_list(self):
self.mock_object(
client.RestClient, 'list_hardware_inventory',
mock.Mock(return_value={}))
self.mock_object(
client.RestClient, 'list_storage_system',
mock.Mock(return_value={}))
model_name = client.RestClient.get_asup_info(self.my_client)['model']
self.assertEqual(eseries_fake.FAKE_DEFAULT_MODEL, model_name)
def test_get_eseries_api_info(self):
fake_invoke_service = mock.Mock()
fake_invoke_service.json = mock.Mock(
return_value=eseries_fake.FAKE_ABOUT_RESPONSE)
self.mock_object(
client.RestClient, '_get_resource_url',
mock.Mock(return_value=eseries_fake.FAKE_RESOURCE_URL))
self.mock_object(
self.my_client, 'invoke_service',
mock.Mock(return_value=fake_invoke_service))
eseries_info = client.RestClient.get_eseries_api_info(
self.my_client, verify=False)
self.assertEqual((eseries_fake.FAKE_ASUP_DATA['operating-mode'],
eseries_fake.FAKE_ABOUT_RESPONSE['version']),
eseries_info)
def test_list_ssc_storage_pools(self):
self.my_client.features = mock.Mock()
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.SSC_POOLS)
pools = client.RestClient.list_ssc_storage_pools(self.my_client)
self.assertEqual(eseries_fake.SSC_POOLS, pools)
def test_get_ssc_storage_pool(self):
fake_pool = eseries_fake.SSC_POOLS[0]
self.my_client.features = mock.Mock()
self.my_client._invoke = mock.Mock(
return_value=fake_pool)
pool = client.RestClient.get_ssc_storage_pool(self.my_client,
fake_pool['poolId'])
self.assertEqual(fake_pool, pool)
@ddt.data(('volumes', True), ('volumes', False),
('volume', True), ('volume', False))
@ddt.unpack
def test_get_volume_api_path(self, path_key, ssc_available):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_available)
expected_key = 'ssc_' + path_key if ssc_available else path_key
expected = self.my_client.RESOURCE_PATHS.get(expected_key)
actual = self.my_client._get_volume_api_path(path_key)
self.assertEqual(expected, actual)
@ddt.data(True, False)
def test_get_volume_api_path_invalid(self, ssc_available):
key = 'invalidKey'
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_available)
self.assertRaises(KeyError, self.my_client._get_volume_api_path, key)
def test_list_volumes(self):
url = client.RestClient.RESOURCE_PATHS['ssc_volumes']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUMES)
volumes = client.RestClient.list_volumes(self.my_client)
self.assertEqual(eseries_fake.VOLUMES, volumes)
self.my_client._invoke.assert_called_once_with('GET', url)
@ddt.data(client.RestClient.ID, client.RestClient.WWN,
client.RestClient.NAME)
def test_list_volume_v1(self, uid_field_name):
url = client.RestClient.RESOURCE_PATHS['volumes']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUMES)
volume = client.RestClient.list_volume(self.my_client,
fake_volume[uid_field_name])
self.my_client._invoke.assert_called_once_with('GET', url)
self.assertEqual(fake_volume, volume)
def test_list_volume_v1_not_found(self):
url = client.RestClient.RESOURCE_PATHS['volumes']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUMES)
self.assertRaises(exception.VolumeNotFound,
client.RestClient.list_volume,
self.my_client, 'fakeId')
self.my_client._invoke.assert_called_once_with('GET', url)
def test_list_volume_v2(self):
url = client.RestClient.RESOURCE_PATHS['ssc_volume']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client._invoke = mock.Mock(return_value=fake_volume)
volume = client.RestClient.list_volume(self.my_client,
fake_volume['id'])
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
mock.ANY})
self.assertEqual(fake_volume, volume)
def test_list_volume_v2_not_found(self):
status_code = 404
url = client.RestClient.RESOURCE_PATHS['ssc_volume']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
msg = "Response error code - %s." % status_code
self.my_client._invoke = mock.Mock(
side_effect=es_exception.WebServiceException(message=msg,
status_code=
status_code))
self.assertRaises(exception.VolumeNotFound,
client.RestClient.list_volume,
self.my_client, 'fakeId')
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
mock.ANY})
def test_list_volume_v2_failure(self):
status_code = 422
url = client.RestClient.RESOURCE_PATHS['ssc_volume']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
msg = "Response error code - %s." % status_code
self.my_client._invoke = mock.Mock(
side_effect=es_exception.WebServiceException(message=msg,
status_code=
status_code))
self.assertRaises(es_exception.WebServiceException,
client.RestClient.list_volume, self.my_client,
'fakeId')
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
mock.ANY})
def test_create_volume_V1(self):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
create_volume = self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUME)
volume = client.RestClient.create_volume(self.my_client,
'fakePool', '1', 1)
args, kwargs = create_volume.call_args
verb, url, body = args
# Ensure the correct API was used
self.assertEqual('/storage-systems/{system-id}/volumes', url)
self.assertEqual(eseries_fake.VOLUME, volume)
def test_create_volume_V2(self):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
create_volume = self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUME)
volume = client.RestClient.create_volume(self.my_client,
'fakePool', '1', 1)
args, kwargs = create_volume.call_args
verb, url, body = args
# Ensure the correct API was used
self.assertIn('/storage-systems/{system-id}/ssc/volumes', url,
'The legacy API was used!')
self.assertEqual(eseries_fake.VOLUME, volume)
def test_create_volume_unsupported_specs(self):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
self.my_client.api_version = '01.52.9000.1'
self.assertRaises(exception.NetAppDriverException,
client.RestClient.create_volume, self.my_client,
'1', 'label', 1, read_cache=True)
@ddt.data(True, False)
def test_update_volume(self, ssc_api_enabled):
label = 'updatedName'
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
expected_volume = copy.deepcopy(fake_volume)
expected_volume['name'] = label
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_api_enabled)
self.my_client._invoke = mock.Mock(return_value=expected_volume)
updated_volume = self.my_client.update_volume(fake_volume['id'],
label)
if ssc_api_enabled:
url = self.my_client.RESOURCE_PATHS.get('ssc_volume')
else:
url = self.my_client.RESOURCE_PATHS.get('volume')
self.my_client._invoke.assert_called_once_with('POST', url,
{'name': label},
**{'object-id':
fake_volume['id']}
)
self.assertDictMatch(expected_volume, updated_volume)
def test_get_pool_operation_progress(self):
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
fake_response = copy.deepcopy(eseries_fake.FAKE_POOL_ACTION_PROGRESS)
self.my_client._invoke = mock.Mock(return_value=fake_response)
response = self.my_client.get_pool_operation_progress(fake_pool['id'])
url = self.my_client.RESOURCE_PATHS.get('pool_operation_progress')
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
fake_pool['id']})
self.assertEqual(fake_response, response)
def test_extend_volume(self):
new_capacity = 10
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
self.my_client._invoke = mock.Mock(return_value=fake_volume)
expanded_volume = self.my_client.expand_volume(fake_volume['id'],
new_capacity, False)
url = self.my_client.RESOURCE_PATHS.get('volume_expand')
body = {'expansionSize': new_capacity, 'sizeUnit': 'gb'}
self.my_client._invoke.assert_called_once_with('POST', url, body,
**{'object-id':
fake_volume['id']})
self.assertEqual(fake_volume, expanded_volume)
def test_extend_volume_thin(self):
new_capacity = 10
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
self.my_client._invoke = mock.Mock(return_value=fake_volume)
expanded_volume = self.my_client.expand_volume(fake_volume['id'],
new_capacity, True)
url = self.my_client.RESOURCE_PATHS.get('thin_volume_expand')
body = {'newVirtualSize': new_capacity, 'sizeUnit': 'gb',
'newRepositorySize': new_capacity}
self.my_client._invoke.assert_called_once_with('POST', url, body,
**{'object-id':
fake_volume['id']})
self.assertEqual(fake_volume, expanded_volume)
@ddt.data(True, False)
def test_delete_volume(self, ssc_api_enabled):
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_api_enabled)
self.my_client._invoke = mock.Mock()
self.my_client.delete_volume(fake_volume['id'])
if ssc_api_enabled:
url = self.my_client.RESOURCE_PATHS.get('ssc_volume')
else:
url = self.my_client.RESOURCE_PATHS.get('volume')
self.my_client._invoke.assert_called_once_with('DELETE', url,
**{'object-id':
fake_volume['id']})
def test_list_snapshot_group(self):
grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=grp))
fake_ref = 'fake'
result = self.my_client.list_snapshot_group(fake_ref)
self.assertEqual(grp, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_group'],
**{'object-id': fake_ref})
def test_list_snapshot_groups(self):
grps = [copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)]
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=grps))
result = self.my_client.list_snapshot_groups()
self.assertEqual(grps, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_groups'])
def test_delete_snapshot_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
fake_ref = 'fake'
self.my_client.delete_snapshot_group(fake_ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['snapshot_group'],
**{'object-id': fake_ref})
@ddt.data((None, None, None, None, None), ('1', 50, 75, 32, 'purgepit'))
@ddt.unpack
def test_create_snapshot_group(self, pool_id, repo, warn, limit, policy):
vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vol))
snap_grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
result = self.my_client.create_snapshot_group(
snap_grp['label'], snap_grp['id'], pool_id, repo, warn, limit,
policy)
self.assertEqual(vol, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_groups'],
{'baseMappableObjectId': snap_grp['id'], 'name': snap_grp['label'],
'storagePoolId': pool_id, 'repositoryPercentage': repo,
'warningThreshold': warn, 'autoDeleteLimit': limit,
'fullPolicy': policy})
def test_list_snapshot_volumes(self):
vols = [copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)]
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vols))
result = self.my_client.list_snapshot_volumes()
self.assertEqual(vols, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_volumes'])
def test_delete_snapshot_volume(self):
invoke = self.mock_object(self.my_client, '_invoke')
fake_ref = 'fake'
self.my_client.delete_snapshot_volume(fake_ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['snapshot_volume'],
**{'object-id': fake_ref})
@ddt.data((None, None, None, None), ('1', 50, 75, 'readWrite'))
@ddt.unpack
def test_create_snapshot_volume(self, pool_id, repo, warn, mode):
vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vol))
result = self.my_client.create_snapshot_volume(
vol['basePIT'], vol['label'], vol['id'], pool_id,
repo, warn, mode)
self.assertEqual(vol, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_volumes'],
mock.ANY)
def test_update_snapshot_volume(self):
snap_id = '1'
label = 'name'
pct = 99
vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vol))
result = self.my_client.update_snapshot_volume(snap_id, label, pct)
self.assertEqual(vol, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_volume'],
{'name': label, 'fullThreshold': pct}, **{'object-id': snap_id})
def test_create_snapshot_image(self):
img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=img))
grp_id = '1'
result = self.my_client.create_snapshot_image(grp_id)
self.assertEqual(img, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_images'],
{'groupId': grp_id})
def test_list_snapshot_image(self):
img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=img))
fake_ref = 'fake'
result = self.my_client.list_snapshot_image(fake_ref)
self.assertEqual(img, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_image'],
**{'object-id': fake_ref})
def test_list_snapshot_images(self):
imgs = [copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)]
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=imgs))
result = self.my_client.list_snapshot_images()
self.assertEqual(imgs, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_images'])
def test_delete_snapshot_image(self):
invoke = self.mock_object(self.my_client, '_invoke')
fake_ref = 'fake'
self.my_client.delete_snapshot_image(fake_ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['snapshot_image'],
**{'object-id': fake_ref})
def test_create_consistency_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
name = 'fake'
self.my_client.create_consistency_group(name)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['cgroups'], mock.ANY)
def test_list_consistency_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
ref = 'fake'
self.my_client.get_consistency_group(ref)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['cgroup'],
**{'object-id': ref})
def test_list_consistency_groups(self):
invoke = self.mock_object(self.my_client, '_invoke')
self.my_client.list_consistency_groups()
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['cgroups'])
def test_delete_consistency_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
ref = 'fake'
self.my_client.delete_consistency_group(ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['cgroup'],
**{'object-id': ref})
def test_add_consistency_group_member(self):
invoke = self.mock_object(self.my_client, '_invoke')
vol_id = eseries_fake.VOLUME['id']
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.add_consistency_group_member(vol_id, cg_id)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['cgroup_members'],
mock.ANY, **{'object-id': cg_id})
def test_remove_consistency_group_member(self):
invoke = self.mock_object(self.my_client, '_invoke')
vol_id = eseries_fake.VOLUME['id']
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.remove_consistency_group_member(vol_id, cg_id)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['cgroup_member'],
**{'object-id': cg_id, 'vol-id': vol_id})
def test_create_consistency_group_snapshot(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.create_consistency_group_snapshot(cg_id)
invoke.assert_called_once_with('POST', path, **{'object-id': cg_id})
@ddt.data(0, 32)
def test_delete_consistency_group_snapshot(self, seq_num):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.delete_consistency_group_snapshot(cg_id, seq_num)
invoke.assert_called_once_with(
'DELETE', path, **{'object-id': cg_id, 'seq-num': seq_num})
def test_get_consistency_group_snapshots(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.get_consistency_group_snapshots(cg_id)
invoke.assert_called_once_with(
'GET', path, **{'object-id': cg_id})
def test_create_cg_snapshot_view(self):
cg_snap_view = copy.deepcopy(
eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME)
view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=cg_snap_view))
list_views = self.mock_object(
self.my_client, 'list_cg_snapshot_views',
mock.Mock(return_value=[view]))
name = view['name']
snap_id = view['basePIT']
path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.create_cg_snapshot_view(cg_id, name, snap_id)
invoke.assert_called_once_with(
'POST', path, mock.ANY, **{'object-id': cg_id})
list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef'])
def test_create_cg_snapshot_view_not_found(self):
cg_snap_view = copy.deepcopy(
eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME)
view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=cg_snap_view))
list_views = self.mock_object(
self.my_client, 'list_cg_snapshot_views',
mock.Mock(return_value=[view]))
del_view = self.mock_object(self.my_client, 'delete_cg_snapshot_view')
name = view['name']
# Ensure we don't get a match on the retrieved views
snap_id = None
path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.assertRaises(
exception.NetAppDriverException,
self.my_client.create_cg_snapshot_view, cg_id, name, snap_id)
invoke.assert_called_once_with(
'POST', path, mock.ANY, **{'object-id': cg_id})
list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef'])
del_view.assert_called_once_with(cg_id, cg_snap_view['id'])
def test_list_cg_snapshot_views(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot_views')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
view_id = 'id'
self.my_client.list_cg_snapshot_views(cg_id, view_id)
invoke.assert_called_once_with(
'GET', path, **{'object-id': cg_id, 'view-id': view_id})
def test_delete_cg_snapshot_view(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snap_view')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
view_id = 'id'
self.my_client.delete_cg_snapshot_view(cg_id, view_id)
invoke.assert_called_once_with(
'DELETE', path, **{'object-id': cg_id, 'view-id': view_id})
@ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3',
'01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4')
def test_api_version_not_support_asup(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertFalse(self.my_client.features.AUTOSUPPORT.supported)
@ddt.data('01.52.9000.3', '01.52.9000.4', '01.52.8999.2',
'01.52.8999.3', '01.53.8999.3', '01.53.9000.2',
'02.51.9000.3', '02.52.8999.3', '02.51.8999.2')
def test_api_version_supports_asup(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertTrue(self.my_client.features.AUTOSUPPORT.supported)
@ddt.data('00.00.00.00', '01.52.9000.1', '01.52.9001.2', '00.53.9001.3',
'01.53.9090.1', '1.53.9010.14', '0.53.9011.15')
def test_api_version_not_support_ssc_api(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertFalse(self.my_client.features.SSC_API_V2.supported)
@ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1',
'01.53.9010.20', '01.53.9010.17', '01.54.9000.1',
'02.51.9000.3', '02.52.8999.3', '02.51.8999.2')
def test_api_version_supports_ssc_api(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertTrue(self.my_client.features.SSC_API_V2.supported)
@ddt.data('00.00.00.00', '01.52.9000.5', '01.52.9001.2', '00.53.9001.3',
'01.52.9090.1', '1.52.9010.7', '0.53.9011.7')
def test_api_version_not_support_1_3(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertFalse(self.my_client.features.REST_1_3_RELEASE.supported)
@ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1',
'01.54.9010.20', '01.54.9000.1', '02.51.9000.3',
'02.52.8999.3', '02.51.8999.2')
def test_api_version_1_3(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertTrue(self.my_client.features.REST_1_3_RELEASE.supported)
def test_invoke_bad_content_type(self):
"""Tests the invoke behavior with a non-JSON response"""
fake_response = mock.Mock()
fake_response.json = mock.Mock(side_effect=scanner.JSONDecodeError(
'', '{}', 1))
fake_response.status_code = 424
fake_response.text = "Fake Response"
self.mock_object(self.my_client, 'invoke_service',
mock.Mock(return_value=fake_response))
self.assertRaises(es_exception.WebServiceException,
self.my_client._invoke, 'GET',
eseries_fake.FAKE_ENDPOINT_HTTP)
def test_list_backend_store(self):
path = self.my_client.RESOURCE_PATHS.get('persistent-store')
fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE)
invoke = self.mock_object(
self.my_client, '_invoke', mock.Mock(
return_value=fake_store))
expected = json.loads(fake_store.get('value'))
result = self.my_client.list_backend_store('key')
self.assertEqual(expected, result)
invoke.assert_called_once_with('GET', path, key='key')
def test_save_backend_store(self):
path = self.my_client.RESOURCE_PATHS.get('persistent-stores')
fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE)
key = 'key'
invoke = self.mock_object(
self.my_client, '_invoke',
mock.Mock())
self.my_client.save_backend_store(key, fake_store)
invoke.assert_called_once_with('POST', path, mock.ANY)
@ddt.ddt
class TestWebserviceClientTestCase(test.TestCase):
def setUp(self):
"""sets up the mock tests"""
super(TestWebserviceClientTestCase, self).setUp()
self.mock_log = mock.Mock()
self.mock_object(client, 'LOG', self.mock_log)
self.webclient = client.WebserviceClient('http', 'host', '80',
'/test', 'user', '****')
@ddt.data({'params': {'host': None, 'scheme': 'https', 'port': '80'}},
{'params': {'host': 'host', 'scheme': None, 'port': '80'}},
{'params': {'host': 'host', 'scheme': 'http', 'port': None}})
@ddt.unpack
def test__validate_params_value_error(self, params):
"""Tests various scenarios for ValueError in validate method"""
self.assertRaises(exception.InvalidInput,
self.webclient._validate_params, **params)
def test_invoke_service_no_endpoint_error(self):
"""Tests Exception and Log error if no endpoint is provided"""
self.webclient._endpoint = None
log_error = 'Unexpected error while invoking web service'
self.assertRaises(exception.NetAppDriverException,
self.webclient.invoke_service)
self.assertTrue(self.mock_log.exception.find(log_error))
def test_invoke_service(self):
"""Tests if invoke_service evaluates the right response"""
self.webclient._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP
self.mock_object(self.webclient.conn, 'request',
mock.Mock(return_value=eseries_fake.FAKE_INVOC_MSG))
result = self.webclient.invoke_service()
self.assertIsNotNone(result)
|
|
import sys
import os
import traceback
from collections import deque
try:
import simplejson as json
except ImportError:
import json
json_encode = lambda x: json.dumps(x)
json_decode = lambda x: json.loads(x)
#reads lines and reconstructs newlines appropriately
def readMsg():
msg = ""
while True:
line = sys.stdin.readline()[0:-1]
if line == "end":
break
msg = msg + line + "\n"
return json_decode(msg[0:-1])
MODE = None
ANCHOR_TUPLE = None
#queue up commands we read while trying to read taskids
pending_commands = deque()
def readTaskIds():
if pending_taskids:
return pending_taskids.popleft()
else:
msg = readMsg()
while type(msg) is not list:
pending_commands.append(msg)
msg = readMsg()
return msg
#queue up taskids we read while trying to read commands/tuples
pending_taskids = deque()
def readCommand():
if pending_commands:
return pending_commands.popleft()
else:
msg = readMsg()
while type(msg) is list:
pending_taskids.append(msg)
msg = readMsg()
return msg
def readTuple():
cmd = readCommand()
return Tuple(cmd["id"], cmd["comp"], cmd["stream"], cmd["task"], cmd["tuple"])
def sendMsgToParent(msg):
print json_encode(msg)
print "end"
sys.stdout.flush()
def sync():
sendMsgToParent({'command':'sync'})
def sendpid(heartbeatdir):
pid = os.getpid()
sendMsgToParent({'pid':pid})
open(heartbeatdir + "/" + str(pid), "w").close()
def emit(*args, **kwargs):
__emit(*args, **kwargs)
return readTaskIds()
def emitDirect(task, *args, **kwargs):
kwargs[directTask] = task
__emit(*args, **kwargs)
def __emit(*args, **kwargs):
global MODE
if MODE == Bolt:
emitBolt(*args, **kwargs)
elif MODE == Spout:
emitSpout(*args, **kwargs)
def emitBolt(tup, stream=None, anchors = [], directTask=None):
global ANCHOR_TUPLE
if ANCHOR_TUPLE is not None:
anchors = [ANCHOR_TUPLE]
m = {"command": "emit"}
if stream is not None:
m["stream"] = stream
m["anchors"] = map(lambda a: a.id, anchors)
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def emitSpout(tup, stream=None, id=None, directTask=None):
m = {"command": "emit"}
if id is not None:
m["id"] = id
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def ack(tup):
sendMsgToParent({"command": "ack", "id": tup.id})
def fail(tup):
sendMsgToParent({"command": "fail", "id": tup.id})
def log(msg):
sendMsgToParent({"command": "log", "msg": msg})
def initComponent():
setupInfo = readMsg()
sendpid(setupInfo['pidDir'])
return [setupInfo['conf'], setupInfo['context']]
class Tuple:
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
def __repr__(self):
return '<%s%s>' % (
self.__class__.__name__,
''.join(' %s=%r' % (k, self.__dict__[k]) for k in sorted(self.__dict__.keys())))
class Bolt:
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
conf, context = initComponent()
self.initialize(conf, context)
try:
while True:
tup = readTuple()
self.process(tup)
except Exception, e:
log(traceback.format_exc(e))
class BasicBolt:
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
global ANCHOR_TUPLE
conf, context = initComponent()
self.initialize(conf, context)
try:
while True:
tup = readTuple()
ANCHOR_TUPLE = tup
self.process(tup)
ack(tup)
except Exception, e:
log(traceback.format_exc(e))
class Spout:
def initialize(self, conf, context):
pass
def ack(self, id):
pass
def fail(self, id):
pass
def nextTuple(self):
pass
def run(self):
global MODE
MODE = Spout
conf, context = initComponent()
self.initialize(conf, context)
try:
while True:
msg = readCommand()
if msg["command"] == "next":
self.nextTuple()
if msg["command"] == "ack":
self.ack(msg["id"])
if msg["command"] == "fail":
self.fail(msg["id"])
sync()
except Exception, e:
log(traceback.format_exc(e))
|
|
"""Fourier Series"""
from __future__ import print_function, division
from sympy import pi, oo
from sympy.core.expr import Expr
from sympy.core.add import Add
from sympy.core.sympify import sympify
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Symbol
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.functions.elementary.trigonometric import sin, cos
from sympy.sets.sets import Interval
from sympy.series.series_class import SeriesBase
from sympy.series.sequences import SeqFormula
def fourier_cos_seq(func, limits, n):
"""Returns the cos sequence in a fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
cos_term = cos(2*n*pi*x / L)
formula = 2 * cos_term * integrate(func * cos_term, limits) / L
a0 = formula.subs(n, S.Zero) / 2
return a0, SeqFormula(2 * cos_term * integrate(func * cos_term, limits)
/ L, (n, 1, oo))
def fourier_sin_seq(func, limits, n):
"""Returns the sin sequence in a fourier series"""
from sympy.integrals import integrate
x, L = limits[0], limits[2] - limits[1]
sin_term = sin(2*n*pi*x / L)
return SeqFormula(2 * sin_term * integrate(func * sin_term, limits)
/ L, (n, 1, oo))
def _process_limits(func, limits):
"""
limits should be of the form (x, start, stop)
x should be a symbol. Both start and stop should be bounded
* If x is not given, x is determined from func
* if limits is None. limit of the form (x, -pi, pi) is returned
Examples
========
>>> from sympy import pi
>>> from sympy.series.fourier import _process_limits as pari
>>> from sympy.abc import x
>>> pari(x**2, (x, -2, 2))
(x, -2, 2)
>>> pari(x**2, (-2, 2))
(x, -2, 2)
>>> pari(x**2, None)
(x, -pi, pi)
"""
def _find_x(func):
free = func.free_symbols
if len(func.free_symbols) == 1:
return free.pop()
elif len(func.free_symbols) == 0:
return Dummy('k')
else:
raise ValueError(
" specify dummy variables for %s. If the function contains"
" more than one free symbol, a dummy variable should be"
" supplied explicitly e.g. FourierSeries(m*n**2, (n, -pi, pi))"
% func)
x, start, stop = None, None, None
if limits is None:
x, start, stop = _find_x(func), -pi, pi
if is_sequence(limits, Tuple):
if len(limits) == 3:
x, start, stop = limits
elif len(limits) == 2:
x = _find_x(func)
start, stop = limits
if not isinstance(x, Symbol) or start is None or stop is None:
raise ValueError('Invalid limits given: %s' % str(limits))
unbounded = [S.NegativeInfinity, S.Infinity]
if start in unbounded or stop in unbounded:
raise ValueError("Both the start and end value should be bounded")
return sympify((x, start, stop))
class FourierSeries(SeriesBase):
r"""Represents fourier sine/cosine series
This class only represents a fourier series.
No computation is performed.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
See Also
========
sympy.series.fourier.fourier_series
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1][0]
@property
def period(self):
return (self.args[1][1], self.args[1][2])
@property
def a0(self):
return self.args[2][0]
@property
def an(self):
return self.args[2][1]
@property
def bn(self):
return self.args[2][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def truncate(self, n=3):
""""returns the first n terms(non-zero) of the series
if n is none returns an iterator"""
if n is None:
return iter(self)
terms = []
for t in self:
if len(terms) == n:
break
if t is not S.Zero:
terms.append(t)
return Add(*terms)
def shift(self, s):
"""
Shift the function by a
term independent of x
f(x) -> f(x) + s
This is fast, if fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
a0 = self.a0 + s
sfunc = self.function + s
return self.func(sfunc, self.args[1], (a0, self.an, self.bn))
def shiftx(self, s):
"""
Shift x by a
term independent of x
f(x) -> f(x + s)
This is fast, if fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x + s)
bn = self.bn.subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def scale(self, s):
"""
Scale function by a
term independent of x
f(x) -> s * f(x)
This is fast, if fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.coeff_mul(s)
bn = self.bn.coeff_mul(s)
a0 = self.a0 * s
sfunc = self.args[0] * s
return self.func(sfunc, self.args[1], (a0, an, bn))
def scalex(self, s):
"""
Scale x by a
term independent of x
f(x) -> f(s*x)
This is fast, if fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x * s)
bn = self.bn.subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def _eval_as_leading_term(self, x):
for t in self:
if t is not S.Zero:
return t
def _eval_term(self, pt):
if pt == 0:
return self.a0
return self.an.coeff(pt) + self.bn.coeff(pt)
def __neg__(self):
return self.scale(-1)
def __add__(self, other):
if isinstance(other, FourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
an = self.an + other.an
bn = self.bn + other.bn
a0 = self.a0 + other.a0
return self.func(function, self.args[1], (a0, an, bn))
return Add(self, other)
def __sub__(self, other):
return self.__add__(-other)
def fourier_series(f, limits=None):
"""Computes fourier sine/cosine series expansion
returns a ``FourierSeries`` object
Examples
========
>>> from sympy import fourier_series, pi, cos
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.truncate(n=3)
-4*cos(x) + cos(2*x) + pi**2/3
Shifting
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
Scaling
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
Notes
=====
Computing a fourier series can be slow
due to the integration required in computing
an, bn.
It is faster to compute fourier series of a function
by using shifting and scaling on an already
computed fourier series rather than computing
again.
eg. If Fourier series of x**2 is known
fourier series of x**2 - 1 can be found by shifting by -1
See Also
========
sympy.series.fourier.FourierSeries
References
==========
.. [1] mathworld.wolfram.com/FourierSeries.html
"""
f = sympify(f)
limits = _process_limits(f, limits)
x = limits[0]
if x not in f.free_symbols:
return f
n = Dummy('n')
neg_f = f.subs(x, -x)
if f == neg_f:
a0, an = fourier_cos_seq(f, limits, n)
bn = SeqFormula(0, (1, oo))
elif f == -neg_f:
a0 = S.Zero
an = SeqFormula(0, (1, oo))
bn = fourier_sin_seq(f, limits, n)
else:
a0, an = fourier_cos_seq(f, limits, n)
bn = fourier_sin_seq(f, limits, n)
return FourierSeries(f, limits, (a0, an, bn))
|
|
#!/usr/bin/env python
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi
import multiprocessing
import os
import subprocess
import sys
import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
start_port_server.start_port_server()
def fnize(s):
out = ''
for c in s:
if c in '<>, /':
if len(out) and out[-1] == '_': continue
out += '_'
else:
out += c
return out
# index html
index_html = """
<html>
<head>
<title>Microbenchmark Results</title>
</head>
<body>
"""
def heading(name):
global index_html
index_html += "<h1>%s</h1>\n" % name
def link(txt, tgt):
global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
cgi.escape(tgt, quote=True), cgi.escape(txt))
def text(txt):
global index_html
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
def collect_latency(bm_name, args):
"""generate latency profiles"""
benchmarks = []
profile_analysis = []
cleanup = []
heading('Latency Profiles: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec(['bins/basicprof/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=0.05'],
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
profile_analysis.append(
jobset.JobSpec([sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
'--source', '%s.trace' % fnize(line), '--fmt', 'simple',
'--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None))
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
# periodically flush out the list of jobs: profile_analysis jobs at least
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_perf(bm_name, args):
"""generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name)
subprocess.check_call(
['make', bm_name,
'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
benchmarks = []
profile_analysis = []
cleanup = []
for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
'--benchmark_list_tests']).splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
'-g', '-F', '997',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=10']))
profile_analysis.append(
jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
environ = {
'PERF_BASE_NAME': fnize(line),
'OUTPUT_DIR': 'reports',
'OUTPUT_FILENAME': fnize(line),
}))
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
# periodically flush out the list of jobs: temporary space required for this
# processing is large
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call(
['make', bm_name,
'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
cmd = ['bins/%s/%s' % (cfg, bm_name),
'--benchmark_out=%s.%s.json' % (base_json_name, cfg),
'--benchmark_out_format=json']
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
return subprocess.check_output(cmd)
def collect_summary(bm_name, args):
heading('Summary: %s [no counters]' % bm_name)
text(run_summary(bm_name, 'opt', bm_name))
heading('Summary: %s [with counters]' % bm_name)
text(run_summary(bm_name, 'counters', bm_name))
if args.bigquery_upload:
with open('%s.csv' % bm_name, 'w') as f:
f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py',
'%s.counters.json' % bm_name,
'%s.opt.json' % bm_name]))
subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name])
collectors = {
'latency': collect_latency,
'perf': collect_perf,
'summary': collect_summary,
}
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument('-c', '--collect',
choices=sorted(collectors.keys()),
nargs='*',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b', '--benchmarks',
default=['bm_fullstack_unary_ping_pong',
'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump',
'bm_closure',
'bm_cq',
'bm_call_create',
'bm_error',
'bm_chttp2_hpack',
'bm_metadata',
'bm_fullstack_trickle',
],
nargs='+',
type=str,
help='Which microbenchmarks should be run')
argp.add_argument('--diff_perf',
default=None,
type=str,
help='Diff microbenchmarks against this git revision')
argp.add_argument('--bigquery_upload',
default=False,
action='store_const',
const=True,
help='Upload results from summary collection to bigquery')
argp.add_argument('--summary_time',
default=None,
type=int,
help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
try:
for collect in args.collect:
for bm_name in args.benchmarks:
collectors[collect](bm_name, args)
if args.diff_perf:
if 'summary' not in args.collect:
for bm_name in args.benchmarks:
run_summary(bm_name, 'opt', bm_name)
run_summary(bm_name, 'counters', bm_name)
where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_perf])
comparables = []
subprocess.check_call(['make', 'clean'])
try:
for bm_name in args.benchmarks:
try:
run_summary(bm_name, 'opt', '%s.old' % bm_name)
run_summary(bm_name, 'counters', '%s.old' % bm_name)
comparables.append(bm_name)
except subprocess.CalledProcessError, e:
pass
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
for bm_name in comparables:
diff = subprocess.check_output(['tools/profiling/microbenchmarks/bm_diff.py',
'%s.counters.json' % bm_name,
'%s.opt.json' % bm_name,
'%s.old.counters.json' % bm_name,
'%s.old.opt.json' % bm_name]).strip()
if diff:
heading('Performance diff: %s' % bm_name)
text(diff)
finally:
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f:
f.write(index_html)
|
|
"""Config flow for Universal Devices ISY994 integration."""
import logging
from urllib.parse import urlparse
from pyisy.configuration import Configuration
from pyisy.connection import Connection
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.components import ssdp
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from .const import (
CONF_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING,
CONF_TLS_VER,
CONF_VAR_SENSOR_STRING,
DEFAULT_IGNORE_STRING,
DEFAULT_RESTORE_LIGHT_STATE,
DEFAULT_SENSOR_STRING,
DEFAULT_TLS_VERSION,
DEFAULT_VAR_SENSOR_STRING,
ISY_URL_POSTFIX,
UDN_UUID_PREFIX,
)
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
def _data_schema(schema_input):
"""Generate schema with defaults."""
return vol.Schema(
{
vol.Required(CONF_HOST, default=schema_input.get(CONF_HOST, "")): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_TLS_VER, default=DEFAULT_TLS_VERSION): vol.In([1.1, 1.2]),
},
extra=vol.ALLOW_EXTRA,
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
user = data[CONF_USERNAME]
password = data[CONF_PASSWORD]
host = urlparse(data[CONF_HOST])
tls_version = data.get(CONF_TLS_VER)
if host.scheme == "http":
https = False
port = host.port or 80
elif host.scheme == "https":
https = True
port = host.port or 443
else:
_LOGGER.error("isy994 host value in configuration is invalid")
raise InvalidHost
# Connect to ISY controller.
isy_conf = await hass.async_add_executor_job(
_fetch_isy_configuration,
host.hostname,
port,
user,
password,
https,
tls_version,
host.path,
)
if not isy_conf or "name" not in isy_conf or not isy_conf["name"]:
raise CannotConnect
# Return info that you want to store in the config entry.
return {"title": f"{isy_conf['name']} ({host.hostname})", "uuid": isy_conf["uuid"]}
def _fetch_isy_configuration(
address, port, username, password, use_https, tls_ver, webroot
):
"""Validate and fetch the configuration from the ISY."""
try:
isy_conn = Connection(
address,
port,
username,
password,
use_https,
tls_ver,
webroot=webroot,
)
except ValueError as err:
raise InvalidAuth(err.args[0]) from err
return Configuration(xml=isy_conn.get_config())
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Universal Devices ISY994."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the isy994 config flow."""
self.discovered_conf = {}
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
info = None
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidHost:
errors["base"] = "invalid_host"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if not errors:
await self.async_set_unique_id(info["uuid"], raise_on_progress=False)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user",
data_schema=_data_schema(self.discovered_conf),
errors=errors,
)
async def async_step_import(self, user_input):
"""Handle import."""
return await self.async_step_user(user_input)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered isy994."""
friendly_name = discovery_info[ssdp.ATTR_UPNP_FRIENDLY_NAME]
url = discovery_info[ssdp.ATTR_SSDP_LOCATION]
mac = discovery_info[ssdp.ATTR_UPNP_UDN]
if mac.startswith(UDN_UUID_PREFIX):
mac = mac[len(UDN_UUID_PREFIX) :]
if url.endswith(ISY_URL_POSTFIX):
url = url[: -len(ISY_URL_POSTFIX)]
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
self.discovered_conf = {
CONF_NAME: friendly_name,
CONF_HOST: url,
}
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["title_placeholders"] = self.discovered_conf
return await self.async_step_user()
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for isy994."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = self.config_entry.options
restore_light_state = options.get(
CONF_RESTORE_LIGHT_STATE, DEFAULT_RESTORE_LIGHT_STATE
)
ignore_string = options.get(CONF_IGNORE_STRING, DEFAULT_IGNORE_STRING)
sensor_string = options.get(CONF_SENSOR_STRING, DEFAULT_SENSOR_STRING)
var_sensor_string = options.get(
CONF_VAR_SENSOR_STRING, DEFAULT_VAR_SENSOR_STRING
)
options_schema = vol.Schema(
{
vol.Optional(CONF_IGNORE_STRING, default=ignore_string): str,
vol.Optional(CONF_SENSOR_STRING, default=sensor_string): str,
vol.Optional(CONF_VAR_SENSOR_STRING, default=var_sensor_string): str,
vol.Required(
CONF_RESTORE_LIGHT_STATE, default=restore_light_state
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=options_schema)
class InvalidHost(exceptions.HomeAssistantError):
"""Error to indicate the host value is invalid."""
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities that match patterns in a tf.Graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
class Pattern(object):
"""The parent class of all patterns (e.g. OpTypePattern and OneofPattern)."""
@abc.abstractmethod
def match(self, op, tensor):
"""Returns the result of matching op/tensor against this pattern."""
raise NotImplementedError('Method "match" not implemented.')
class OpTypePattern(Pattern):
"""A tree pattern that matches TF expressions with certain op types."""
def __init__(self, op_type, name=None, inputs=None, ordered_inputs=True):
"""Initializes an OpTypePattern.
Args:
op_type: string that specifies the allowed types of the root. It can be
(1) an op type, e.g. 'Conv2D',
(2) '*', i.e. wildcard, or
(3) multiple op types separated by '|', e.g., 'Relu|Relu6'.
We could use regex strings, which might be worthwhile when we have many
similar TF op types.
name: Optional string. The name of the pattern that can be looked up in
MatchResult.
inputs: Optional list of `Pattern`s or strings that specify the
patterns for the inputs of a matching op. If None, this pattern accepts
any inputs of a matching op.
ordered_inputs: Defaults to True. If False, will match any op that
matches a permutation of the inputs.
Raises:
ValueError: if too many inputs are provided when order_inputs is False.
"""
self._op_type = op_type
self._name = name
if inputs is None:
inputs = []
if len(inputs) > 8:
raise ValueError(
'Only < 8 inputs are allowed when ordered_inputs is False.')
self._inputs = [
input_pattern
if isinstance(input_pattern, Pattern) else OpTypePattern(input_pattern)
for input_pattern in inputs
]
self._ordered_inputs = ordered_inputs
@property
def name(self):
return self._name
def match(self, op, tensor):
if self._op_type != '*':
if op.type not in self._op_type.split('|'):
return None
match_result = MatchResult()
match_result.add(self, op, tensor)
if not self._inputs:
# If pattern.inputs is empty, skips the rest and accepts all the inputs.
return match_result
if len(op.inputs) != len(self._inputs):
return None
input_patterns_list = [self._inputs]
# If order doesn't matter for the inputs, then make sure we match at least
# one permutation of the inputs.
if not self._ordered_inputs:
input_patterns_list = list(itertools.permutations(self._inputs))
for input_patterns in input_patterns_list:
match_failed = False
for input_tensor, input_pattern in zip(op.inputs, input_patterns):
input_match_result = input_pattern.match(input_tensor.op, input_tensor)
if input_match_result is None:
match_failed = True
break
match_result.merge_from(input_match_result)
if not match_failed:
return match_result
return None
class OneofPattern(Pattern):
"""Matches one of the given sub-patterns."""
def __init__(self, sub_patterns):
self._sub_patterns = sub_patterns
def match(self, op, tensor):
for sub_pattern in self._sub_patterns:
match_result = sub_pattern.match(op, tensor)
if match_result is not None:
return match_result
return None
class MatchResult(object):
r"""Encapsulates the result of a match done by GraphMatcher.
MatchResult contains a map from Pattern to the matching op and tensor.
When the matching op has multiple output tensors, the matching tensor is the
output tensor used by the matching op of the parent pattern. E.g., when we
match graph
- +
/ \y0 y1/ \
x split z
|
y (nodes are ops; edges are going up)
against add_pattern defined as
y1_pattern = OpTypePattern('*')
z_pattern = OpTypePattern('*')
add_pattern = OpTypePattern('+', inputs=[y1_pattern, z_pattern])
the matching op of `y1_pattern` is `split`, and the matching tensor of
`y1_pattern`
is `y1` not `y0`.
"""
def __init__(self):
self._pattern_to_op_tensor = {}
self._name_to_pattern = {}
def add(self, pattern, op, tensor):
self._pattern_to_op_tensor[pattern] = op, tensor
if pattern.name is not None:
if pattern.name in self._name_to_pattern:
raise ValueError(
'Name %s is already bound to another pattern' % pattern.name)
self._name_to_pattern[pattern.name] = pattern
def _to_pattern(self, pattern_or_name):
if isinstance(pattern_or_name, Pattern):
return pattern_or_name
if isinstance(pattern_or_name, str):
if pattern_or_name not in self._name_to_pattern:
return None
return self._name_to_pattern[pattern_or_name]
raise ValueError('pattern_or_name has type %s. Expect Pattern or str.' %
type(pattern_or_name))
def _get_op_tensor(self, pattern_or_name):
pattern = self._to_pattern(pattern_or_name)
if pattern is None:
return None
if pattern not in self._pattern_to_op_tensor:
return None
return self._pattern_to_op_tensor[pattern]
def get_op(self, pattern_or_name):
op_tensor = self._get_op_tensor(pattern_or_name)
return op_tensor[0] if op_tensor else None
def get_tensor(self, pattern_or_name):
op_tensor = self._get_op_tensor(pattern_or_name)
return op_tensor[1] if op_tensor else None
def merge_from(self, other_match_result):
# pylint: disable=protected-access
self._pattern_to_op_tensor.update(other_match_result._pattern_to_op_tensor)
self._name_to_pattern.update(other_match_result._name_to_pattern)
# pylint: enable=protected-access
class GraphMatcher(object):
"""Checks if a particular subgraph matches a given pattern."""
def __init__(self, pattern):
"""Initializes a GraphMatcher.
Args:
pattern: The `Pattern` against which `GraphMatcher` matches
subgraphs.
"""
self._pattern = pattern
def _match_pattern(self, pattern, op, tensor):
"""Returns whether an TF expression rooted at `op` matches `pattern`.
If there is a match, adds to `self._match_result` the matching op and tensor
with key `pattern`.
Args:
pattern: An `Pattern`.
op: A `tf.Operation` to match against the pattern.
tensor: the output `tf.Tensor` of `op` that is used by the matching op of
`pattern`'s parent. Can be None if `pattern` is already the root of the
pattern tree.
Returns:
True if an TF expression rooted at `op` matches `pattern`.
"""
match_result = pattern.match(op, tensor)
if match_result is None:
return False
self._match_result.merge_from(match_result)
return True
def match_op(self, op):
"""Matches `op` against `self._pattern`.
Args:
op: `tf.Operation` to match against the pattern.
Returns:
Returns a `MatchResult` if `op` matches the pattern; otherwise, returns
None.
"""
self._match_result = MatchResult()
if not self._match_pattern(self._pattern, op, tensor=None):
return None
return self._match_result
def match_ops(self, ops):
"""Matches each operation in `ops` against `self._pattern`.
Args:
ops: collection of `tf.Operation` to match against the pattern.
Yields:
`MatchResult` for each `tf.Operation` that matches the pattern.
"""
for op in ops:
match_result = self.match_op(op)
if match_result:
yield match_result
def match_graph(self, graph):
"""Matches each operation in `graph` against `self._pattern`.
Args:
graph: `tf.Graph` containing operations to match.
Yields:
`MatchResult` for each `tf.Operation` in `graph` that matches the pattern.
"""
# Python 3.3.2+ implements `yield from`, but for now:
for match_result in self.match_ops(graph.get_operations()):
yield match_result
|
|
"""Default variable filters."""
from __future__ import unicode_literals
import re
import random as random_module
import unicodedata
from decimal import Decimal, InvalidOperation, Context, ROUND_HALF_UP
from functools import wraps
from pprint import pformat
from django.template.base import Variable, Library, VariableDoesNotExist
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (conditional_escape, escapejs, fix_ampersands,
escape, urlize as urlize_impl, linebreaks, strip_tags)
from django.utils.http import urlquote
from django.utils.text import Truncator, wrap, phone2numeric
from django.utils.safestring import mark_safe, SafeData, mark_for_escaping
from django.utils import six
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from django.utils.text import normalize_newlines
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
import warnings
warnings.warn("Setting the %s attribute of a template filter "
"function is deprecated; use @register.filter(%s=%s) "
"instead" % (attr, attr, getattr(func, attr)),
DeprecationWarning)
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
@register.filter("fix_ampersands", is_safe=True)
@stringfilter
def fix_ampersands_filter(value):
"""Replaces ampersands with ``&`` entities."""
return fix_ampersands(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completley invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
from django.utils.text import slugify
return slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
return mark_safe(urlize_impl(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(urlize_impl(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
from django.utils.html import remove_tags
return remove_tags(value, tags)
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=True)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = '\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_text(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if not value:
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1<<10
MB = 1<<20
GB = 1<<30
TB = 1<<40
PB = 1<<50
if bytes < KB:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < MB:
return ugettext("%s KB") % filesize_number_format(bytes / KB)
if bytes < GB:
return ugettext("%s MB") % filesize_number_format(bytes / MB)
if bytes < TB:
return ugettext("%s GB") % filesize_number_format(bytes / GB)
if bytes < PB:
return ugettext("%s TB") % filesize_number_format(bytes / TB)
return ugettext("%s PB") % filesize_number_format(bytes / PB)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not ',' in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s" % force_text(e, errors="replace")
|
|
import copy
import numpy as np
from numpy.testing import (
assert_,
assert_equal,
assert_allclose,
assert_array_equal
)
import pytest
from pytest import raises, warns
from scipy.signal._peak_finding import (
argrelmax,
argrelmin,
peak_prominences,
peak_widths,
_unpack_condition_args,
find_peaks,
find_peaks_cwt,
_identify_ridge_lines
)
from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning
def _gen_gaussians(center_locs, sigmas, total_length):
xdata = np.arange(0, total_length).astype(float)
out_data = np.zeros(total_length, dtype=float)
for ind, sigma in enumerate(sigmas):
tmp = (xdata - center_locs[ind]) / sigma
out_data += np.exp(-(tmp**2))
return out_data
def _gen_gaussians_even(sigmas, total_length):
num_peaks = len(sigmas)
delta = total_length / (num_peaks + 1)
center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int)
out_data = _gen_gaussians(center_locs, sigmas, total_length)
return out_data, center_locs
def _gen_ridge_line(start_locs, max_locs, length, distances, gaps):
"""
Generate coordinates for a ridge line.
Will be a series of coordinates, starting a start_loc (length 2).
The maximum distance between any adjacent columns will be
`max_distance`, the max distance between adjacent rows
will be `map_gap'.
`max_locs` should be the size of the intended matrix. The
ending coordinates are guaranteed to be less than `max_locs`,
although they may not approach `max_locs` at all.
"""
def keep_bounds(num, max_val):
out = max(num, 0)
out = min(out, max_val)
return out
gaps = copy.deepcopy(gaps)
distances = copy.deepcopy(distances)
locs = np.zeros([length, 2], dtype=int)
locs[0, :] = start_locs
total_length = max_locs[0] - start_locs[0] - sum(gaps)
if total_length < length:
raise ValueError('Cannot generate ridge line according to constraints')
dist_int = length / len(distances) - 1
gap_int = length / len(gaps) - 1
for ind in range(1, length):
nextcol = locs[ind - 1, 1]
nextrow = locs[ind - 1, 0] + 1
if (ind % dist_int == 0) and (len(distances) > 0):
nextcol += ((-1)**ind)*distances.pop()
if (ind % gap_int == 0) and (len(gaps) > 0):
nextrow += gaps.pop()
nextrow = keep_bounds(nextrow, max_locs[0])
nextcol = keep_bounds(nextcol, max_locs[1])
locs[ind, :] = [nextrow, nextcol]
return [locs[:, 0], locs[:, 1]]
class TestLocalMaxima1d(object):
def test_empty(self):
"""Test with empty signal."""
x = np.array([], dtype=np.float64)
for array in _local_maxima_1d(x):
assert_equal(array, np.array([]))
assert_(array.base is None)
def test_linear(self):
"""Test with linear signal."""
x = np.linspace(0, 100)
for array in _local_maxima_1d(x):
assert_equal(array, np.array([]))
assert_(array.base is None)
def test_simple(self):
"""Test with simple signal."""
x = np.linspace(-10, 10, 50)
x[2::3] += 1
expected = np.arange(2, 50, 3)
for array in _local_maxima_1d(x):
# For plateaus of size 1, the edges are identical with the
# midpoints
assert_equal(array, expected)
assert_(array.base is None)
def test_flat_maxima(self):
"""Test if flat maxima are detected correctly."""
x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10,
-5, -5, -5, -5, -5, -10])
midpoints, left_edges, right_edges = _local_maxima_1d(x)
assert_equal(midpoints, np.array([2, 4, 8, 12, 18]))
assert_equal(left_edges, np.array([2, 4, 7, 11, 16]))
assert_equal(right_edges, np.array([2, 5, 9, 14, 20]))
@pytest.mark.parametrize('x', [
np.array([1., 0, 2]),
np.array([3., 3, 0, 4, 4]),
np.array([5., 5, 5, 0, 6, 6, 6]),
])
def test_signal_edges(self, x):
"""Test if behavior on signal edges is correct."""
for array in _local_maxima_1d(x):
assert_equal(array, np.array([]))
assert_(array.base is None)
def test_exceptions(self):
"""Test input validation and raised exceptions."""
with raises(ValueError, match="wrong number of dimensions"):
_local_maxima_1d(np.ones((1, 1)))
with raises(ValueError, match="expected 'float64_t'"):
_local_maxima_1d(np.ones(1, dtype=int))
with raises(TypeError, match="list"):
_local_maxima_1d([1., 2.])
with raises(TypeError, match="'x' must not be None"):
_local_maxima_1d(None)
class TestRidgeLines(object):
def test_empty(self):
test_matr = np.zeros([20, 100])
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
assert_(len(lines) == 0)
def test_minimal(self):
test_matr = np.zeros([20, 100])
test_matr[0, 10] = 1
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
assert_(len(lines) == 1)
test_matr = np.zeros([20, 100])
test_matr[0:2, 10] = 1
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
assert_(len(lines) == 1)
def test_single_pass(self):
distances = [0, 1, 2, 5]
gaps = [0, 1, 2, 0, 1]
test_matr = np.zeros([20, 50]) + 1e-12
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_distances = np.full(20, max(distances))
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
assert_array_equal(identified_lines, [line])
def test_single_bigdist(self):
distances = [0, 1, 2, 5]
gaps = [0, 1, 2, 4]
test_matr = np.zeros([20, 50])
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 3
max_distances = np.full(20, max_dist)
#This should get 2 lines, since the distance is too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
assert_(len(identified_lines) == 2)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
def test_single_biggap(self):
distances = [0, 1, 2, 5]
max_gap = 3
gaps = [0, 4, 2, 1]
test_matr = np.zeros([20, 50])
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 6
max_distances = np.full(20, max_dist)
#This should get 2 lines, since the gap is too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
assert_(len(identified_lines) == 2)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
def test_single_biggaps(self):
distances = [0]
max_gap = 1
gaps = [3, 6]
test_matr = np.zeros([50, 50])
length = 30
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 1
max_distances = np.full(50, max_dist)
#This should get 3 lines, since the gaps are too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
assert_(len(identified_lines) == 3)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
class TestArgrel(object):
def test_empty(self):
# Regression test for gh-2832.
# When there are no relative extrema, make sure that
# the number of empty arrays returned matches the
# dimension of the input.
empty_array = np.array([], dtype=int)
z1 = np.zeros(5)
i = argrelmin(z1)
assert_equal(len(i), 1)
assert_array_equal(i[0], empty_array)
z2 = np.zeros((3,5))
row, col = argrelmin(z2, axis=0)
assert_array_equal(row, empty_array)
assert_array_equal(col, empty_array)
row, col = argrelmin(z2, axis=1)
assert_array_equal(row, empty_array)
assert_array_equal(col, empty_array)
def test_basic(self):
# Note: the docstrings for the argrel{min,max,extrema} functions
# do not give a guarantee of the order of the indices, so we'll
# sort them before testing.
x = np.array([[1, 2, 2, 3, 2],
[2, 1, 2, 2, 3],
[3, 2, 1, 2, 2],
[2, 3, 2, 1, 2],
[1, 2, 3, 2, 1]])
row, col = argrelmax(x, axis=0)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [4, 0, 1])
row, col = argrelmax(x, axis=1)
order = np.argsort(row)
assert_equal(row[order], [0, 3, 4])
assert_equal(col[order], [3, 1, 2])
row, col = argrelmin(x, axis=0)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [1, 2, 3])
row, col = argrelmin(x, axis=1)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [1, 2, 3])
def test_highorder(self):
order = 2
sigmas = [1.0, 2.0, 10.0, 5.0, 15.0]
test_data, act_locs = _gen_gaussians_even(sigmas, 500)
test_data[act_locs + order] = test_data[act_locs]*0.99999
test_data[act_locs - order] = test_data[act_locs]*0.99999
rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0]
assert_(len(rel_max_locs) == len(act_locs))
assert_((rel_max_locs == act_locs).all())
def test_2d_gaussians(self):
sigmas = [1.0, 2.0, 10.0]
test_data, act_locs = _gen_gaussians_even(sigmas, 100)
rot_factor = 20
rot_range = np.arange(0, len(test_data)) - rot_factor
test_data_2 = np.vstack([test_data, test_data[rot_range]])
rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1)
for rw in range(0, test_data_2.shape[0]):
inds = (rel_max_rows == rw)
assert_(len(rel_max_cols[inds]) == len(act_locs))
assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all())
class TestPeakProminences(object):
def test_empty(self):
"""
Test if an empty array is returned if no peaks are provided.
"""
out = peak_prominences([1, 2, 3], [])
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
assert_(arr.size == 0)
assert_(arr.dtype == dtype)
out = peak_prominences([], [])
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
assert_(arr.size == 0)
assert_(arr.dtype == dtype)
def test_basic(self):
"""
Test if height of prominences is correctly calculated in signal with
rising baseline (peak widths are 1 sample).
"""
# Prepare basic signal
x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1])
peaks = np.array([1, 2, 4, 6])
lbases = np.array([0, 0, 0, 5])
rbases = np.array([3, 3, 5, 7])
proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0)
# Test if calculation matches handcrafted result
out = peak_prominences(x, peaks)
assert_equal(out[0], proms)
assert_equal(out[1], lbases)
assert_equal(out[2], rbases)
def test_edge_cases(self):
"""
Test edge cases.
"""
# Peaks have same height, prominence and bases
x = [0, 2, 1, 2, 1, 2, 0]
peaks = [1, 3, 5]
proms, lbases, rbases = peak_prominences(x, peaks)
assert_equal(proms, [2, 2, 2])
assert_equal(lbases, [0, 0, 0])
assert_equal(rbases, [6, 6, 6])
# Peaks have same height & prominence but different bases
x = [0, 1, 0, 1, 0, 1, 0]
peaks = np.array([1, 3, 5])
proms, lbases, rbases = peak_prominences(x, peaks)
assert_equal(proms, [1, 1, 1])
assert_equal(lbases, peaks - 1)
assert_equal(rbases, peaks + 1)
def test_non_contiguous(self):
"""
Test with non-C-contiguous input arrays.
"""
x = np.repeat([-9, 9, 9, 0, 3, 1], 2)
peaks = np.repeat([1, 2, 4], 2)
proms, lbases, rbases = peak_prominences(x[::2], peaks[::2])
assert_equal(proms, [9, 9, 2])
assert_equal(lbases, [0, 0, 3])
assert_equal(rbases, [3, 3, 5])
def test_wlen(self):
"""
Test if wlen actually shrinks the evaluation range correctly.
"""
x = [0, 1, 2, 3, 1, 0, -1]
peak = [3]
# Test rounding behavior of wlen
assert_equal(peak_prominences(x, peak), [3., 0, 6])
for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]:
assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i])
def test_exceptions(self):
"""
Verify that exceptions and warnings are raised.
"""
# x with dimension > 1
with raises(ValueError, match='1-D array'):
peak_prominences([[0, 1, 1, 0]], [1, 2])
# peaks with dimension > 1
with raises(ValueError, match='1-D array'):
peak_prominences([0, 1, 1, 0], [[1, 2]])
# x with dimension < 1
with raises(ValueError, match='1-D array'):
peak_prominences(3, [0,])
# empty x with supplied
with raises(ValueError, match='not a valid index'):
peak_prominences([], [0])
# invalid indices with non-empty x
for p in [-100, -1, 3, 1000]:
with raises(ValueError, match='not a valid index'):
peak_prominences([1, 0, 2], [p])
# peaks is not cast-able to np.intp
with raises(TypeError, match='cannot safely cast'):
peak_prominences([0, 1, 1, 0], [1.1, 2.3])
# wlen < 3
with raises(ValueError, match='wlen'):
peak_prominences(np.arange(10), [3, 5], wlen=1)
def test_warnings(self):
"""
Verify that appropriate warnings are raised.
"""
msg = "some peaks have a prominence of 0"
for p in [0, 1, 2]:
with warns(PeakPropertyWarning, match=msg):
peak_prominences([1, 0, 2], [p,])
with warns(PeakPropertyWarning, match=msg):
peak_prominences([0, 1, 1, 1, 0], [2], wlen=2)
class TestPeakWidths(object):
def test_empty(self):
"""
Test if an empty array is returned if no peaks are provided.
"""
widths = peak_widths([], [])[0]
assert_(isinstance(widths, np.ndarray))
assert_equal(widths.size, 0)
widths = peak_widths([1, 2, 3], [])[0]
assert_(isinstance(widths, np.ndarray))
assert_equal(widths.size, 0)
out = peak_widths([], [])
for arr in out:
assert_(isinstance(arr, np.ndarray))
assert_equal(arr.size, 0)
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
def test_basic(self):
"""
Test a simple use case with easy to verify results at different relative
heights.
"""
x = np.array([1, 0, 1, 2, 1, 0, -1])
prominence = 2
for rel_height, width_true, lip_true, rip_true in [
(0., 0., 3., 3.), # raises warning
(0.25, 1., 2.5, 3.5),
(0.5, 2., 2., 4.),
(0.75, 3., 1.5, 4.5),
(1., 4., 1., 5.),
(2., 5., 1., 6.),
(3., 5., 1., 6.)
]:
width_calc, height, lip_calc, rip_calc = peak_widths(
x, [3], rel_height)
assert_allclose(width_calc, width_true)
assert_allclose(height, 2 - rel_height * prominence)
assert_allclose(lip_calc, lip_true)
assert_allclose(rip_calc, rip_true)
def test_non_contiguous(self):
"""
Test with non-C-contiguous input arrays.
"""
x = np.repeat([0, 100, 50], 4)
peaks = np.repeat([1], 3)
result = peak_widths(x[::4], peaks[::3])
assert_equal(result, [0.75, 75, 0.75, 1.5])
def test_exceptions(self):
"""
Verify that argument validation works as intended.
"""
with raises(ValueError, match='1-D array'):
# x with dimension > 1
peak_widths(np.zeros((3, 4)), np.ones(3))
with raises(ValueError, match='1-D array'):
# x with dimension < 1
peak_widths(3, [0])
with raises(ValueError, match='1-D array'):
# peaks with dimension > 1
peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
with raises(ValueError, match='1-D array'):
# peaks with dimension < 1
peak_widths(np.arange(10), 3)
with raises(ValueError, match='not a valid index'):
# peak pos exceeds x.size
peak_widths(np.arange(10), [8, 11])
with raises(ValueError, match='not a valid index'):
# empty x with peaks supplied
peak_widths([], [1, 2])
with raises(TypeError, match='cannot safely cast'):
# peak cannot be safely casted to intp
peak_widths(np.arange(10), [1.1, 2.3])
with raises(ValueError, match='rel_height'):
# rel_height is < 0
peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1)
with raises(TypeError, match='None'):
# prominence data contains None
peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
def test_warnings(self):
"""
Verify that appropriate warnings are raised.
"""
msg = "some peaks have a width of 0"
with warns(PeakPropertyWarning, match=msg):
# Case: rel_height is 0
peak_widths([0, 1, 0], [1], rel_height=0)
with warns(PeakPropertyWarning, match=msg):
# Case: prominence is 0 and bases are identical
peak_widths(
[0, 1, 1, 1, 0], [2],
prominence_data=(np.array([0.], np.float64),
np.array([2], np.intp),
np.array([2], np.intp))
)
def test_mismatching_prominence_data(self):
"""Test with mismatching peak and / or prominence data."""
x = [0, 1, 0]
peak = [1]
for i, (prominences, left_bases, right_bases) in enumerate([
((1.,), (-1,), (2,)), # left base not in x
((1.,), (0,), (3,)), # right base not in x
((1.,), (2,), (0,)), # swapped bases same as peak
((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks
((1., 1.), (0,), (2,)), # arrays with different shapes
((1.,), (0, 0), (2,)), # arrays with different shapes
((1.,), (0,), (2, 2)) # arrays with different shapes
]):
# Make sure input is matches output of signal.peak_prominences
prominence_data = (np.array(prominences, dtype=np.float64),
np.array(left_bases, dtype=np.intp),
np.array(right_bases, dtype=np.intp))
# Test for correct exception
if i < 3:
match = "prominence data is invalid for peak"
else:
match = "arrays in `prominence_data` must have the same shape"
with raises(ValueError, match=match):
peak_widths(x, peak, prominence_data=prominence_data)
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
def test_intersection_rules(self):
"""Test if x == eval_height counts as an intersection."""
# Flatt peak with two possible intersection points if evaluated at 1
x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
# relative height is 0 -> width is 0 as well, raises warning
assert_allclose(peak_widths(x, peaks=[5], rel_height=0),
[(0.,), (3.,), (5.,), (5.,)])
# width_height == x counts as intersection -> nearest 1 is chosen
assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3),
[(4.,), (1.,), (3.,), (7.,)])
def test_unpack_condition_args():
"""
Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
"""
x = np.arange(10)
amin_true = x
amax_true = amin_true + 10
peaks = amin_true[1::2]
# Test unpacking with None or interval
assert_((None, None) == _unpack_condition_args((None, None), x, peaks))
assert_((1, None) == _unpack_condition_args(1, x, peaks))
assert_((1, None) == _unpack_condition_args((1, None), x, peaks))
assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks))
assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks))
# Test if borders are correctly reduced with `peaks`
amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
assert_equal(amin_calc, amin_true[peaks])
assert_equal(amax_calc, amax_true[peaks])
# Test raises if array borders don't match x
with raises(ValueError, match="array size of lower"):
_unpack_condition_args(amin_true, np.arange(11), peaks)
with raises(ValueError, match="array size of upper"):
_unpack_condition_args((None, amin_true), np.arange(11), peaks)
class TestFindPeaks(object):
# Keys of optionally returned properties
property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds',
'prominences', 'left_bases', 'right_bases', 'widths',
'width_heights', 'left_ips', 'right_ips'}
def test_constant(self):
"""
Test behavior for signal without local maxima.
"""
open_interval = (None, None)
peaks, props = find_peaks(np.ones(10),
height=open_interval, threshold=open_interval,
prominence=open_interval, width=open_interval)
assert_(peaks.size == 0)
for key in self.property_keys:
assert_(props[key].size == 0)
def test_plateau_size(self):
"""
Test plateau size condition for peaks.
"""
# Prepare signal with peaks with peak_height == plateau_size
plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111])
x = np.zeros(plateau_sizes.size * 2 + 1)
x[1::2] = plateau_sizes
repeats = np.ones(x.size, dtype=int)
repeats[1::2] = x[1::2]
x = np.repeat(x, repeats)
# Test full output
peaks, props = find_peaks(x, plateau_size=(None, None))
assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100])
assert_equal(props["plateau_sizes"], plateau_sizes)
assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2)
assert_equal(props["right_edges"], peaks + plateau_sizes // 2)
# Test conditions
assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100])
assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7])
assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33])
def test_height_condition(self):
"""
Test height condition for peaks.
"""
x = (0., 1/3, 0., 2.5, 0, 4., 0)
peaks, props = find_peaks(x, height=(None, None))
assert_equal(peaks, np.array([1, 3, 5]))
assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]))
assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]))
assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]))
assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]))
def test_threshold_condition(self):
"""
Test threshold condition for peaks.
"""
x = (0, 2, 1, 4, -1)
peaks, props = find_peaks(x, threshold=(None, None))
assert_equal(peaks, np.array([1, 3]))
assert_equal(props['left_thresholds'], np.array([2, 3]))
assert_equal(props['right_thresholds'], np.array([1, 5]))
assert_equal(find_peaks(x, threshold=2)[0], np.array([3]))
assert_equal(find_peaks(x, threshold=3.5)[0], np.array([]))
assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]))
assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]))
assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([]))
def test_distance_condition(self):
"""
Test distance condition for peaks.
"""
# Peaks of different height with constant distance 3
peaks_all = np.arange(1, 21, 3)
x = np.zeros(21)
x[peaks_all] += np.linspace(1, 2, peaks_all.size)
# Test if peaks with "minimal" distance are still selected (distance = 3)
assert_equal(find_peaks(x, distance=3)[0], peaks_all)
# Select every second peak (distance > 3)
peaks_subset = find_peaks(x, distance=3.0001)[0]
# Test if peaks_subset is subset of peaks_all
assert_(
np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0
)
# Test if every second peak was removed
assert_equal(np.diff(peaks_subset), 6)
# Test priority of peak removal
x = [-2, 1, -1, 0, -3]
peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size
assert_(peaks_subset.size == 1 and peaks_subset[0] == 1)
def test_prominence_condition(self):
"""
Test prominence condition for peaks.
"""
x = np.linspace(0, 10, 100)
peaks_true = np.arange(1, 99, 2)
offset = np.linspace(1, 10, peaks_true.size)
x[peaks_true] += offset
prominences = x[peaks_true] - x[peaks_true + 1]
interval = (3, 9)
keep = np.nonzero(
(interval[0] <= prominences) & (prominences <= interval[1]))
peaks_calc, properties = find_peaks(x, prominence=interval)
assert_equal(peaks_calc, peaks_true[keep])
assert_equal(properties['prominences'], prominences[keep])
assert_equal(properties['left_bases'], 0)
assert_equal(properties['right_bases'], peaks_true[keep] + 1)
def test_width_condition(self):
"""
Test width condition for peaks.
"""
x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0])
peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75)
assert_equal(peaks.size, 1)
assert_equal(peaks, 7)
assert_allclose(props['widths'], 1.35)
assert_allclose(props['width_heights'], 1.)
assert_allclose(props['left_ips'], 6.4)
assert_allclose(props['right_ips'], 7.75)
def test_properties(self):
"""
Test returned properties.
"""
open_interval = (None, None)
x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9]
peaks, props = find_peaks(x,
height=open_interval, threshold=open_interval,
prominence=open_interval, width=open_interval)
assert_(len(props) == len(self.property_keys))
for key in self.property_keys:
assert_(peaks.size == props[key].size)
def test_raises(self):
"""
Test exceptions raised by function.
"""
with raises(ValueError, match="1-D array"):
find_peaks(np.array(1))
with raises(ValueError, match="1-D array"):
find_peaks(np.ones((2, 2)))
with raises(ValueError, match="distance"):
find_peaks(np.arange(10), distance=-1)
@pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0",
"ignore:some peaks have a width of 0")
def test_wlen_smaller_plateau(self):
"""
Test behavior of prominence and width calculation if the given window
length is smaller than a peak's plateau size.
Regression test for gh-9110.
"""
peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None),
width=(None, None), wlen=2)
assert_equal(peaks, 2)
assert_equal(props["prominences"], 0)
assert_equal(props["widths"], 0)
assert_equal(props["width_heights"], 1)
for key in ("left_bases", "right_bases", "left_ips", "right_ips"):
assert_equal(props[key], peaks)
class TestFindPeaksCwt(object):
def test_find_peaks_exact(self):
"""
Generate a series of gaussians and attempt to find the peak locations.
"""
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
num_points = 500
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas))
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,
min_length=None)
np.testing.assert_array_equal(found_locs, act_locs,
"Found maximum locations did not equal those expected")
def test_find_peaks_withnoise(self):
"""
Verify that peak locations are (approximately) found
for a series of gaussians with added noise.
"""
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
num_points = 500
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas))
noise_amp = 0.07
np.random.seed(18181911)
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
found_locs = find_peaks_cwt(test_data, widths, min_length=15,
gap_thresh=1, min_snr=noise_amp / 5)
np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +
'of peaks found than expected')
diffs = np.abs(found_locs - act_locs)
max_diffs = np.array(sigmas) / 5
np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +
'by more than %s' % (max_diffs))
def test_find_peaks_nopeak(self):
"""
Verify that no peak is found in
data that's just noise.
"""
noise_amp = 1.0
num_points = 100
np.random.seed(181819141)
test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)
widths = np.arange(10, 50)
found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)
np.testing.assert_equal(len(found_locs), 0)
def test_find_peaks_window_size(self):
"""
Verify that window_size is passed correctly to private function and
affects the result.
"""
sigmas = [2.0, 2.0]
num_points = 1000
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas), 0.2)
noise_amp = 0.05
np.random.seed(18181911)
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
# Possibly contrived negative region to throw off peak finding
# when window_size is too large
test_data[250:320] -= 1
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
min_length=None, window_size=None)
with pytest.raises(AssertionError):
assert found_locs.size == act_locs.size
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
min_length=None, window_size=20)
assert found_locs.size == act_locs.size
|
|
from __future__ import absolute_import, unicode_literals
import warnings
from collections import OrderedDict
from django.conf.urls import url
from django.core.urlresolvers import reverse
from django.http import Http404
from rest_framework import status
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from wagtail.api import APIField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.utils import resolve_model_string
from wagtail.wagtaildocs.models import get_document_model
from wagtail.wagtailimages import get_image_model
from .filters import ChildOfFilter, DescendantOfFilter, FieldsFilter, OrderingFilter, SearchFilter
from .pagination import WagtailPagination
from .serializers import (
BaseSerializer, DocumentSerializer, ImageSerializer, PageSerializer, get_serializer_class)
from .utils import BadRequestError
class BaseAPIEndpoint(GenericViewSet):
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
pagination_class = WagtailPagination
base_serializer_class = BaseSerializer
filter_backends = []
model = None # Set on subclass
known_query_parameters = frozenset([
'limit',
'offset',
'fields',
'order',
'search',
# Used by jQuery for cache-busting. See #1671
'_',
# Required by BrowsableAPIRenderer
'format',
])
extra_api_fields = []
name = None # Set on subclass.
def get_queryset(self):
return self.model.objects.all().order_by('id')
def listing_view(self, request):
queryset = self.get_queryset()
self.check_query_parameters(queryset)
queryset = self.filter_queryset(queryset)
queryset = self.paginate_queryset(queryset)
serializer = self.get_serializer(queryset, many=True)
return self.get_paginated_response(serializer.data)
def detail_view(self, request, pk):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
def handle_exception(self, exc):
if isinstance(exc, Http404):
data = {'message': str(exc)}
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, BadRequestError):
data = {'message': str(exc)}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(BaseAPIEndpoint, self).handle_exception(exc)
def get_api_fields(self, model):
"""
This returns a list of field names that are allowed to
be used in the API (excluding the id field).
"""
api_fields = self.extra_api_fields[:]
if hasattr(model, 'api_fields'):
api_fields.extend(model.api_fields)
# Remove any new-style API field configs (only supported in v2)
def convert_api_fields(fields):
for field in fields:
if isinstance(field, APIField):
warnings.warn(
"class-based api_fields are not supported by the v1 API module. "
"Please update the .api_fields attribute of {}.{} or update to the "
"v2 API.".format(model._meta.app_label, model.__name__)
)
# Ignore fields with custom serializers
if field.serializer is None:
yield field.name
else:
yield field
return list(convert_api_fields(api_fields))
def check_query_parameters(self, queryset):
"""
Ensure that only valid query paramters are included in the URL.
"""
query_parameters = set(self.request.GET.keys())
# All query paramters must be either a field or an operation
allowed_parameters = set(self.get_api_fields(queryset.model))
allowed_parameters = allowed_parameters.union(self.known_query_parameters)
allowed_parameters.add('id')
unknown_parameters = query_parameters - allowed_parameters
if unknown_parameters:
raise BadRequestError(
"query parameter is not an operation or a recognised field: %s"
% ', '.join(sorted(unknown_parameters))
)
def get_serializer_class(self):
request = self.request
# Get model
if self.action == 'listing_view':
model = self.get_queryset().model
else:
model = type(self.get_object())
# Get all available fields
all_fields = self.get_api_fields(model)
# Removes any duplicates in case the developer put "title" in api_fields
all_fields = list(OrderedDict.fromkeys(all_fields))
if self.action == 'listing_view':
# Listing views just show the title field and any other allowed field the user specified
if 'fields' in request.GET:
fields = set(request.GET['fields'].split(','))
else:
fields = {'title'}
unknown_fields = fields - set(all_fields)
if unknown_fields:
raise BadRequestError("unknown fields: %s" % ', '.join(sorted(unknown_fields)))
# Reorder fields so it matches the order of all_fields
fields = [field for field in all_fields if field in fields]
else:
# Detail views show all fields all the time
fields = all_fields
# Always show id and meta first
fields = ['id', 'meta'] + fields
# If showing details, add the parent field
if isinstance(self, PagesAPIEndpoint) and self.get_serializer_context().get('show_details', False):
fields.insert(2, 'parent')
return get_serializer_class(model, fields, base=self.base_serializer_class)
def get_serializer_context(self):
"""
The serialization context differs between listing and detail views.
"""
context = {
'request': self.request,
'view': self,
'router': self.request.wagtailapi_router
}
if self.action == 'detail_view':
context['show_details'] = True
return context
def get_renderer_context(self):
context = super(BaseAPIEndpoint, self).get_renderer_context()
context['indent'] = 4
return context
@classmethod
def get_urlpatterns(cls):
"""
This returns a list of URL patterns for the endpoint
"""
return [
url(r'^$', cls.as_view({'get': 'listing_view'}), name='listing'),
url(r'^(?P<pk>\d+)/$', cls.as_view({'get': 'detail_view'}), name='detail'),
]
@classmethod
def get_object_detail_urlpath(cls, model, pk, namespace=''):
if namespace:
url_name = namespace + ':detail'
else:
url_name = 'detail'
return reverse(url_name, args=(pk, ))
class PagesAPIEndpoint(BaseAPIEndpoint):
base_serializer_class = PageSerializer
filter_backends = [
FieldsFilter,
ChildOfFilter,
DescendantOfFilter,
OrderingFilter,
SearchFilter
]
known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([
'type',
'child_of',
'descendant_of',
])
extra_api_fields = ['title']
name = 'pages'
model = Page
def get_queryset(self):
request = self.request
# Allow pages to be filtered to a specific type
if 'type' not in request.GET:
model = Page
else:
model_name = request.GET['type']
try:
model = resolve_model_string(model_name)
except LookupError:
raise BadRequestError("type doesn't exist")
if not issubclass(model, Page):
raise BadRequestError("type doesn't exist")
# Get live pages that are not in a private section
queryset = model.objects.public().live()
# Filter by site
queryset = queryset.descendant_of(request.site.root_page, inclusive=True)
return queryset
def get_object(self):
base = super(PagesAPIEndpoint, self).get_object()
return base.specific
class ImagesAPIEndpoint(BaseAPIEndpoint):
base_serializer_class = ImageSerializer
filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]
extra_api_fields = ['title', 'tags', 'width', 'height']
name = 'images'
model = get_image_model()
class DocumentsAPIEndpoint(BaseAPIEndpoint):
base_serializer_class = DocumentSerializer
filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]
extra_api_fields = ['title', 'tags']
name = 'documents'
model = get_document_model()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3beta1",
manifest={
"AudioEncoding",
"SpeechModelVariant",
"SsmlVoiceGender",
"OutputAudioEncoding",
"SpeechWordInfo",
"InputAudioConfig",
"VoiceSelectionParams",
"SynthesizeSpeechConfig",
"OutputAudioConfig",
},
)
class AudioEncoding(proto.Enum):
r"""Audio encoding of the audio content sent in the conversational query
request. Refer to the `Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics>`__
for more details.
"""
AUDIO_ENCODING_UNSPECIFIED = 0
AUDIO_ENCODING_LINEAR_16 = 1
AUDIO_ENCODING_FLAC = 2
AUDIO_ENCODING_MULAW = 3
AUDIO_ENCODING_AMR = 4
AUDIO_ENCODING_AMR_WB = 5
AUDIO_ENCODING_OGG_OPUS = 6
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7
class SpeechModelVariant(proto.Enum):
r"""Variant of the specified [Speech
model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to
use.
See the `Cloud Speech
documentation <https://cloud.google.com/speech-to-text/docs/enhanced-models>`__
for which models have different variants. For example, the
"phone_call" model has both a standard and an enhanced variant. When
you use an enhanced model, you will generally receive higher quality
results than for a standard model.
"""
SPEECH_MODEL_VARIANT_UNSPECIFIED = 0
USE_BEST_AVAILABLE = 1
USE_STANDARD = 2
USE_ENHANCED = 3
class SsmlVoiceGender(proto.Enum):
r"""Gender of the voice as described in `SSML voice
element <https://www.w3.org/TR/speech-synthesis11/#edef_voice>`__.
"""
SSML_VOICE_GENDER_UNSPECIFIED = 0
SSML_VOICE_GENDER_MALE = 1
SSML_VOICE_GENDER_FEMALE = 2
SSML_VOICE_GENDER_NEUTRAL = 3
class OutputAudioEncoding(proto.Enum):
r"""Audio encoding of the output audio format in Text-To-Speech."""
OUTPUT_AUDIO_ENCODING_UNSPECIFIED = 0
OUTPUT_AUDIO_ENCODING_LINEAR_16 = 1
OUTPUT_AUDIO_ENCODING_MP3 = 2
OUTPUT_AUDIO_ENCODING_MP3_64_KBPS = 4
OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3
OUTPUT_AUDIO_ENCODING_MULAW = 5
class SpeechWordInfo(proto.Message):
r"""Information for a word recognized by the speech recognizer.
Attributes:
word (str):
The word this info is for.
start_offset (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the
audio that corresponds to the start of the
spoken word. This is an experimental feature and
the accuracy of the time offset can vary.
end_offset (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the
audio that corresponds to the end of the spoken
word. This is an experimental feature and the
accuracy of the time offset can vary.
confidence (float):
The Speech confidence between 0.0 and 1.0 for
this word. A higher number indicates an
estimated greater likelihood that the recognized
word is correct. The default of 0.0 is a
sentinel value indicating that confidence was
not set.
This field is not guaranteed to be fully stable
over time for the same audio input. Users should
also not rely on it to always be provided.
"""
word = proto.Field(proto.STRING, number=3,)
start_offset = proto.Field(proto.MESSAGE, number=1, message=duration_pb2.Duration,)
end_offset = proto.Field(proto.MESSAGE, number=2, message=duration_pb2.Duration,)
confidence = proto.Field(proto.FLOAT, number=4,)
class InputAudioConfig(proto.Message):
r"""Instructs the speech recognizer on how to process the audio
content.
Attributes:
audio_encoding (google.cloud.dialogflowcx_v3beta1.types.AudioEncoding):
Required. Audio encoding of the audio content
to process.
sample_rate_hertz (int):
Sample rate (in Hertz) of the audio content sent in the
query. Refer to `Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics>`__
for more details.
enable_word_info (bool):
Optional. If ``true``, Dialogflow returns
[SpeechWordInfo][google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo]
in
[StreamingRecognitionResult][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult]
with information about the recognized speech words, e.g.
start and end time offsets. If false or unspecified, Speech
doesn't return any word-level information.
phrase_hints (Sequence[str]):
Optional. A list of strings containing words and phrases
that the speech recognizer should recognize with higher
likelihood.
See `the Cloud Speech
documentation <https://cloud.google.com/speech-to-text/docs/basics#phrase-hints>`__
for more details.
model (str):
Optional. Which Speech model to select for the given
request. Select the model best suited to your domain to get
best results. If a model is not explicitly specified, then
we auto-select a model based on the parameters in the
InputAudioConfig. If enhanced speech model is enabled for
the agent and an enhanced version of the specified model for
the language does not exist, then the speech is recognized
using the standard version of the specified model. Refer to
`Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics#select-model>`__
for more details.
model_variant (google.cloud.dialogflowcx_v3beta1.types.SpeechModelVariant):
Optional. Which variant of the [Speech
model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model]
to use.
single_utterance (bool):
Optional. If ``false`` (default), recognition does not cease
until the client closes the stream. If ``true``, the
recognizer will detect a single spoken utterance in input
audio. Recognition ceases when it detects the audio's voice
has stopped or paused. In this case, once a detected intent
is received, the client should close the stream and start a
new request with a new stream as needed. Note: This setting
is relevant only for streaming methods.
"""
audio_encoding = proto.Field(proto.ENUM, number=1, enum="AudioEncoding",)
sample_rate_hertz = proto.Field(proto.INT32, number=2,)
enable_word_info = proto.Field(proto.BOOL, number=13,)
phrase_hints = proto.RepeatedField(proto.STRING, number=4,)
model = proto.Field(proto.STRING, number=7,)
model_variant = proto.Field(proto.ENUM, number=10, enum="SpeechModelVariant",)
single_utterance = proto.Field(proto.BOOL, number=8,)
class VoiceSelectionParams(proto.Message):
r"""Description of which voice to use for speech synthesis.
Attributes:
name (str):
Optional. The name of the voice. If not set, the service
will choose a voice based on the other parameters such as
language_code and
[ssml_gender][google.cloud.dialogflow.cx.v3beta1.VoiceSelectionParams.ssml_gender].
For the list of available voices, please refer to `Supported
voices and
languages <https://cloud.google.com/text-to-speech/docs/voices>`__.
ssml_gender (google.cloud.dialogflowcx_v3beta1.types.SsmlVoiceGender):
Optional. The preferred gender of the voice. If not set, the
service will choose a voice based on the other parameters
such as language_code and
[name][google.cloud.dialogflow.cx.v3beta1.VoiceSelectionParams.name].
Note that this is only a preference, not requirement. If a
voice of the appropriate gender is not available, the
synthesizer should substitute a voice with a different
gender rather than failing the request.
"""
name = proto.Field(proto.STRING, number=1,)
ssml_gender = proto.Field(proto.ENUM, number=2, enum="SsmlVoiceGender",)
class SynthesizeSpeechConfig(proto.Message):
r"""Configuration of how speech should be synthesized.
Attributes:
speaking_rate (float):
Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0
is the normal native speed supported by the specific voice.
2.0 is twice as fast, and 0.5 is half as fast. If
unset(0.0), defaults to the native 1.0 speed. Any other
values < 0.25 or > 4.0 will return an error.
pitch (float):
Optional. Speaking pitch, in the range [-20.0, 20.0]. 20
means increase 20 semitones from the original pitch. -20
means decrease 20 semitones from the original pitch.
volume_gain_db (float):
Optional. Volume gain (in dB) of the normal native volume
supported by the specific voice, in the range [-96.0, 16.0].
If unset, or set to a value of 0.0 (dB), will play at normal
native signal amplitude. A value of -6.0 (dB) will play at
approximately half the amplitude of the normal native signal
amplitude. A value of +6.0 (dB) will play at approximately
twice the amplitude of the normal native signal amplitude.
We strongly recommend not to exceed +10 (dB) as there's
usually no effective increase in loudness for any value
greater than that.
effects_profile_id (Sequence[str]):
Optional. An identifier which selects 'audio
effects' profiles that are applied on (post
synthesized) text to speech. Effects are applied
on top of each other in the order they are
given.
voice (google.cloud.dialogflowcx_v3beta1.types.VoiceSelectionParams):
Optional. The desired voice of the
synthesized audio.
"""
speaking_rate = proto.Field(proto.DOUBLE, number=1,)
pitch = proto.Field(proto.DOUBLE, number=2,)
volume_gain_db = proto.Field(proto.DOUBLE, number=3,)
effects_profile_id = proto.RepeatedField(proto.STRING, number=5,)
voice = proto.Field(proto.MESSAGE, number=4, message="VoiceSelectionParams",)
class OutputAudioConfig(proto.Message):
r"""Instructs the speech synthesizer how to generate the output
audio content.
Attributes:
audio_encoding (google.cloud.dialogflowcx_v3beta1.types.OutputAudioEncoding):
Required. Audio encoding of the synthesized
audio content.
sample_rate_hertz (int):
Optional. The synthesis sample rate (in
hertz) for this audio. If not provided, then the
synthesizer will use the default sample rate
based on the audio encoding. If this is
different from the voice's natural sample rate,
then the synthesizer will honor this request by
converting to the desired sample rate (which
might result in worse audio quality).
synthesize_speech_config (google.cloud.dialogflowcx_v3beta1.types.SynthesizeSpeechConfig):
Optional. Configuration of how speech should
be synthesized.
"""
audio_encoding = proto.Field(proto.ENUM, number=1, enum="OutputAudioEncoding",)
sample_rate_hertz = proto.Field(proto.INT32, number=2,)
synthesize_speech_config = proto.Field(
proto.MESSAGE, number=3, message="SynthesizeSpeechConfig",
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
## Module histogram.py
##
## Copyright (c) 2014 Antonio Valente <[email protected]>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import collections
import random
import threading
import abc
import time
import operator
import math
from . import statistics, exceptions, py3comp
DEFAULT_UNIFORM_RESERVOIR_SIZE = 1028
DEFAULT_TIME_WINDOW_SIZE = 60
DEFAULT_EXPONENTIAL_DECAY_FACTOR = 0.015
def search_greater(values, target):
"""
Return the first index for which target is greater or equal to the first
item of the tuple found in values
"""
first = 0
last = len(values)
while first < last:
middle = (first + last) // 2
if values[middle][0] < target:
first = middle + 1
else:
last = middle
return first
class ReservoirBase(object):
__metaclass__ = abc.ABCMeta
"""
Base class for reservoirs. Subclass and override _do_add, _get_values and
_same_parameters
"""
def add(self, value):
"""
Add a value to the reservoir
The value will be casted to a floating-point, so a TypeError or a
ValueError may be raised.
"""
if not isinstance(value, float):
value = float(value)
return self._do_add(value)
@property
def values(self):
"""
Return the stored values
"""
return self._get_values()
@property
def sorted_values(self):
"""
Sort and return the current sample values
"""
return sorted(self.values)
def same_kind(self, other):
"""
Return True if "other" is an object of the same type and it was
instantiated with the same parameters
"""
return type(self) is type(other) and self._same_parameters(other)
@abc.abstractmethod
def _do_add(self, value):
"""
Add the floating-point value to the reservoir. Override in subclasses
"""
@abc.abstractmethod
def _get_values(self):
"""
Get the current reservoir's content. Override in subclasses
"""
@abc.abstractmethod
def _same_parameters(self, other):
"""
Return True if this object has been instantiated with the same
parameters as "other".
Override in subclasses
"""
class UniformReservoir(ReservoirBase):
"""
A random sampling reservoir of floating-point values. Uses Vitter's
Algorithm R to produce a statistically representative sample
(http://www.cs.umd.edu/~samir/498/vitter.pdf)
"""
def __init__(self, size=DEFAULT_UNIFORM_RESERVOIR_SIZE):
self.size = size
self._values = [0] * size
self.count = 0
self.lock = threading.Lock()
def _do_add(self, value):
changed = False
with self.lock:
if self.count < self.size:
self._values[self.count] = value
changed = True
else:
# not randint() because it yields different values on
# python 3, it would be a nightmare to test.
k = int(random.uniform(0, self.count))
if k < self.size:
self._values[k] = value
changed = True
self.count += 1
return changed
def _get_values(self):
return self._values[:min(self.count, self.size)]
def _same_parameters(self, other):
return self.size == other.size
def __repr__(self):
return "{}({})".format(type(self).__name__, self.size)
class SlidingWindowReservoir(ReservoirBase):
"""
A simple sliding-window reservoir that keeps the last N values
"""
def __init__(self, size=DEFAULT_UNIFORM_RESERVOIR_SIZE):
self.size = size
self.deque = collections.deque(maxlen=self.size)
def _do_add(self, value):
# No need for explicit lock - deques should be thread-safe:
# http://docs.python.org/2/library/collections.html#collections.deque
self.deque.append(value)
def _get_values(self):
return list(self.deque)
def _same_parameters(self, other):
return self.size == other.size
def __repr__(self):
return "{}({})".format(type(self).__name__, self.size)
class SlidingTimeWindowReservoir(ReservoirBase):
"""
A time-sliced reservoir that keeps the values added in the last N seconds
"""
def __init__(self, window_size=DEFAULT_TIME_WINDOW_SIZE):
"""
Build a new sliding time-window reservoir
window_size is the time window size in seconds
"""
self.window_size = window_size
self.lock = threading.Lock()
self.key = operator.itemgetter(0)
self._values = []
def _do_add(self, value):
now = time.time()
with self.lock:
self.tick(now)
self._values.append((now, value))
def tick(self, now):
target = now - self.window_size
# the values are sorted by the first element (timestamp), so let's
# perform a dichotomic search
idx = search_greater(self._values, target)
# older values found, discard them
if idx:
self._values = self._values[idx:]
def _get_values(self):
now = time.time()
with self.lock:
self.tick(now)
return [y for x, y in self._values]
def _same_parameters(self, other):
return self.window_size == other.window_size
def __repr__(self):
return "{}({})".format(type(self).__name__, self.window_size)
class ExponentialDecayingReservoir(ReservoirBase):
"""
An exponential-weighted reservoir which exponentially decays older values
in order to give greater significance to newer ones.
See http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf
"""
# TODO: replace the sort()s with a proper data structure (btree/skiplist).
# However, since the list is keep sorted (and it should be very small),
# the sort() shouldn't dramatically slow down the insertions, also
# considering that the search can be log(n) in that way
RESCALE_THRESHOLD = 3600
EPSILON = 1e-12
def __init__(self, size=DEFAULT_UNIFORM_RESERVOIR_SIZE,
alpha=DEFAULT_EXPONENTIAL_DECAY_FACTOR):
self.size = size
self.alpha = alpha
self.start_time = time.time()
self.lock = threading.Lock()
self.count = 0
self.next_scale_time = self.start_time + self.RESCALE_THRESHOLD
self.key = operator.itemgetter(0)
self._values = []
def _lookup(self, timestamp):
"""
Return the index of the value associated with "timestamp" if any, else
None. Since the timestamps are floating-point values, they are
considered equal if their absolute difference is smaller than
self.EPSILON
"""
idx = search_greater(self._values, timestamp)
if (idx < len(self._values)
and math.fabs(self._values[idx][0] - timestamp) < self.EPSILON):
return idx
return None
def _put(self, timestamp, value):
"""Replace the value associated with "timestamp" or add the new value"""
idx = self._lookup(timestamp)
if idx is not None:
self._values[idx] = (timestamp, value)
else:
self._values.append((timestamp, value))
def _do_add(self, value):
now = time.time()
self.rescale(now)
rnd = random.random()
weighted_time = self.weight(now - self.start_time) / rnd
changed = False
with self.lock:
if self.count < self.size:
self._put(weighted_time, value)
self._values.sort(key=self.key)
changed = True
else:
first = self._values[0][0]
if first < weighted_time:
idx = self._lookup(weighted_time)
if idx is None:
self._values[0] = (weighted_time, value)
self._values.sort(key=self.key)
changed = True
self.count += 1
return changed
def weight(self, t):
return math.exp(self.alpha * t)
def rescale(self, now):
with self.lock:
if now > self.next_scale_time:
original_values = self._values[:]
self._values = []
for i, (k, v) in enumerate(original_values):
k *= math.exp(-self.alpha * (now - self.start_time))
self._put(k, v)
self.count = len(self._values)
self.start_time = now
self.next_scale_time = self.start_time + self.RESCALE_THRESHOLD
def _get_values(self):
return [y for x, y in self._values[:max(self.count, self.size)]]
def _same_parameters(self, other):
return self.size == other.size and self.alpha == other.alpha
def __repr__(self):
return "{}({}, {})".format(type(self).__name__, self.size, self.alpha)
class Histogram(object):
"""A metric which calculates some statistics over the distribution of some
values"""
def __init__(self, reservoir):
self.reservoir = reservoir
def notify(self, value):
"""Add a new value to the metric"""
return self.reservoir.add(value)
def raw_data(self):
"""Return the raw underlying data"""
return self.reservoir.values
def get(self):
"""Return the computed statistics over the gathered data"""
values = self.reservoir.sorted_values
def safe(f, *args):
try:
return f(values, *args)
except exceptions.StatisticsError:
return 0.0
plevels = [50, 75, 90, 95, 99, 99.9]
percentiles = [safe(statistics.percentile, p) for p in plevels]
try:
histogram = statistics.get_histogram(values)
except exceptions.StatisticsError:
histogram = [(0, 0)]
res = dict(
kind="histogram",
min=values[0] if values else 0,
max=values[-1] if values else 0,
arithmetic_mean=safe(statistics.mean),
geometric_mean=safe(statistics.geometric_mean),
harmonic_mean=safe(statistics.harmonic_mean),
median=safe(statistics.median),
variance=safe(statistics.variance),
standard_deviation=safe(statistics.stdev),
skewness=safe(statistics.skewness),
kurtosis=safe(statistics.kurtosis),
percentile=py3comp.zip(plevels, percentiles),
histogram=histogram,
n=len(values))
return res
|
|
#Code by Chris Tralie, Parit Burintrathikul, Justin Wang, Lydia Xu, Billy Wan, and Jay Wang
import sys
import numpy as np
from scipy import sparse
import scipy.io as sio
from scipy.linalg import norm
from scipy.sparse.linalg import lsqr
def saveOffFileExternal(filename, VPos, VColors, ITris):
#Save off file given buffers, not necessarily in the PolyMesh object
nV = VPos.shape[0]
nF = ITris.shape[0]
fout = open(filename, "w")
if VColors.size == 0:
fout.write("OFF\n%i %i %i\n"%(nV, nF, 0))
else:
fout.write("COFF\n%i %i %i\n"%(nV, nF, 0))
for i in range(nV):
fout.write("%g %g %g"%tuple(VPos[i, :]))
if VColors.size > 0:
fout.write(" %g %g %g"%tuple(VColors[i, :]))
fout.write("\n")
for i in range(nF):
fout.write("3 %i %i %i\n"%tuple(ITris[i, :]))
fout.close()
#Return VPos, VColors, and ITris without creating any structure
#(Assumes triangle mesh)
def loadOffFileExternal(filename):
fin = open(filename, 'r')
nVertices = 0
nFaces = 0
lineCount = 0
face = 0
vertex = 0
divideColor = False
VPos = np.zeros((0, 3))
VColors = np.zeros((0, 3))
ITris = np.zeros((0, 3))
for line in fin:
lineCount = lineCount+1
fields = line.split() #Splits whitespace by default
if len(fields) == 0: #Blank line
continue
if fields[0][0] in ['#', '\0', ' '] or len(fields[0]) == 0:
continue
#Check section
if nVertices == 0:
if fields[0] == "OFF" or fields[0] == "COFF":
if len(fields) > 2:
fields[1:4] = [int(field) for field in fields]
[nVertices, nFaces, nEdges] = fields[1:4]
print "nVertices = %i, nFaces = %i"%(nVertices, nFaces)
#Pre-allocate vertex arrays
VPos = np.zeros((nVertices, 3))
VColors = np.zeros((nVertices, 3))
ITris = np.zeros((nFaces, 3))
if fields[0] == "COFF":
divideColor = True
else:
fields[0:3] = [int(field) for field in fields]
[nVertices, nFaces, nEdges] = fields[0:3]
VPos = np.zeros((nVertices, 3))
VColors = np.zeros((nVertices, 3))
ITris = np.zeros((nFaces, 3))
elif vertex < nVertices:
fields = [float(i) for i in fields]
P = [fields[0],fields[1], fields[2]]
color = np.array([0.5, 0.5, 0.5]) #Gray by default
if len(fields) >= 6:
#There is color information
if divideColor:
color = [float(c)/255.0 for c in fields[3:6]]
else:
color = [float(c) for c in fields[3:6]]
VPos[vertex, :] = P
VColors[vertex, :] = color
vertex = vertex+1
elif face < nFaces:
#Assume the vertices are specified in CCW order
fields = [int(i) for i in fields]
ITris[face, :] = fields[1:fields[0]+1]
face = face+1
fin.close()
VPos = np.array(VPos, np.float64)
VColors = np.array(VColors, np.float64)
ITris = np.array(ITris, np.int32)
return (VPos, VColors, ITris)
def loadBaselKeypointMesh():
(VPos, VColors, ITris) = loadOffFileExternal("BUMesh.off")
return (VPos, ITris)
def getBaselBUKeypointsIdx():
idx = sio.loadmat("BaselBUKeypointsIdx")['idx']-1
idx = idx.flatten()
return idx
class VideoMesh(object):
def __init__(self):
self.Frames = np.array([])
self.ITris = np.array([])
#Initialize the basel video with the first (neutral) frame filled in
#and the rest blank
def initBaselVideo(self, filename, NFrames):
(a, b, self.ITris) = loadOffFileExternal("BUMesh.off")
#Grab the keypoints of the chosen basel model
shape = sio.loadmat(filename)['shape']
shape = np.reshape(shape, [len(shape)/3, 3])
idx = getBaselBUKeypointsIdx()
shape = shape[idx, :]
self.Frames = np.zeros((NFrames, shape.shape[0], shape.shape[1]))
self.Frames[0, :, :] = shape
#Load in a bunch of bnd files, assuming the first one is a neutral
#expression
def initBUVideo(self, paths):
(a, b, self.ITris) = loadOffFileExternal("BUMesh.off")
X1 = np.loadtxt(paths[0])
X1 = X1[:, 1::]
NFrames = len(paths)
self.Frames = np.zeros((NFrames, X1.shape[0], X1.shape[1]))
self.Frames[0, :, :] = X1
for i in range(1, NFrames):
X = np.loadtxt(paths[i])
X = X[:, 1::]
self.Frames[i, :, :] = X
def saveFramesOff(self, prefix):
for i in range(self.Frames.shape[0]):
VPos = self.Frames[i, :, :]
fout = open("%s%i.off"%(prefix, i), "w")
fout.write("OFF\n%i %i 0\n"%(VPos.shape[0], self.ITris.shape[0]))
for i in range(VPos.shape[0]):
fout.write("%g %g %g\n"%(VPos[i, 0], VPos[i, 1], VPos[i, 2]))
for i in range(self.ITris.shape[0]):
fout.write("3 %g %g %g\n"%(self.ITris[i, 0], self.ITris[i, 1], self.ITris[i, 2]))
fout.close()
def getLaplacianMatrixCotangent(VPos, ITris, anchorsIdx, anchorWeights = 1):
N = VPos.shape[0]
M = ITris.shape[0]
#Allocate space for the sparse array storage, with 2 entries for every
#edge for every triangle (6 entries per triangle); one entry for directed
#edge ij and ji. Note that this means that edges with two incident triangles
#will have two entries per directed edge, but sparse array will sum them
I = np.zeros(M*6)
J = np.zeros(M*6)
V = np.zeros(M*6)
#Keep track of areas of incident triangles and the number of incident triangles
IA = np.zeros(M*3)
VA = np.zeros(M*3) #Incident areas
VC = 1.0*np.ones(M*3) #Number of incident triangles
#Step 1: Compute cotangent weights
for shift in range(3):
#For all 3 shifts of the roles of triangle vertices
#to compute different cotangent weights
[i, j, k] = [shift, (shift+1)%3, (shift+2)%3]
dV1 = VPos[ITris[:, i], :] - VPos[ITris[:, k], :]
dV2 = VPos[ITris[:, j], :] - VPos[ITris[:, k], :]
Normal = np.cross(dV1, dV2)
#Cotangent is dot product / mag cross product
NMag = np.sqrt(np.sum(Normal**2, 1))
cotAlpha = np.sum(dV1*dV2, 1)/NMag
I[shift*M*2:shift*M*2+M] = ITris[:, i]
J[shift*M*2:shift*M*2+M] = ITris[:, j]
V[shift*M*2:shift*M*2+M] = cotAlpha
I[shift*M*2+M:shift*M*2+2*M] = ITris[:, j]
J[shift*M*2+M:shift*M*2+2*M] = ITris[:, i]
V[shift*M*2+M:shift*M*2+2*M] = cotAlpha
if shift == 0:
#Compute contribution of this triangle to each of the vertices
for k in range(3):
IA[k*M:(k+1)*M] = ITris[:, k]
VA[k*M:(k+1)*M] = 0.5*NMag
#Step 2: Create laplacian matrix
L = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
#Create the diagonal by summing the rows and subtracting off the nondiagonal entries
L = sparse.dia_matrix((L.sum(1).flatten(), 0), L.shape) - L
#Step 3: Add anchors
L = L.tocoo()
I = L.row.tolist()
J = L.col.tolist()
V = L.data.tolist()
I = I + range(N, N+len(anchorsIdx))
J = J + anchorsIdx.tolist()
V = V + [anchorWeights]*len(anchorsIdx)
L = sparse.coo_matrix((V, (I, J)), shape=(N+len(anchorsIdx), N)).tocsr()
return L
def solveLaplacianMesh(VPos, ITris, anchorsIdx, anchors):
N = VPos.shape[0]
L = getLaplacianMatrixCotangent(VPos, ITris, anchorsIdx)
delta = L.dot(VPos)
delta[N:, :] = anchors
sio.savemat("System.mat", {"L":L, "delta":delta})
VPosNew = np.zeros((N, 3))
for k in range(3):
print "Solving Laplacian mesh coordinates %i of %i..."%(k+1, 3)
VPosNew[:, k] = lsqr(L, delta[:, k])[0]
return VPosNew
class DeformationTransferer:
def __init__(self, origVideo, warpedVideo):
self.origVideo = origVideo
self.warpedVideo = warpedVideo
self.origFrames = self.origVideo.Frames
self.warpedFrames = self.warpedVideo.Frames
self.NFrames = self.origFrames.shape[0]
self.ITris = self.origVideo.ITris
self.NFaces = self.ITris.shape[0]
self.count = 0
self.NVertices = self.origFrames.shape[1]
self.NVertices4 = self.NVertices + self.NFaces #original vertices plus 1 new vertex (4th vector) for each face
# Tris4 is Tris plus 4th col indexing 4th vector (which should be mapped to the N to N+F-1 index of VPos4)
self.Tris4 = np.hstack((self.ITris,
np.reshape(np.arange(self.NVertices, self.NVertices4), (self.NFaces, 1))))
print "#####debug info: initial values#########"
print "origFrame shape (NFrames x NVertices x 3):", self.origFrames.shape
print "warpedFrame shape (NFrames x NVertices x 3): ", self.warpedFrames.shape
print "ITris shape:", self.ITris.shape
print "#####end: initial values#########"
def beginDeformationTransfer(self):
resultFrames = np.empty([self.NFrames, self.NVertices, 3]) # this is result array to fill in
resultFrames[0, :, :] = self.warpedFrames[0, :, :]
origOldVPos4 = self.getVPos4(self.origFrames[0, :, :], self.ITris) # old VPos with extra NFaces vectors
warpedOldVPos4 = self.getVPos4(self.warpedFrames[0, :, :], self.ITris)
for i in range(1, self.NFrames):
# 1 orig: get newVPos4
origNewVPos4 = self.getVPos4(self.origFrames[i, :, :], self.ITris)
# 2 orig: use old and new VPos4 to get S-matrix which shape is 3 x 3NFaces
S = self.getSMatrix(origOldVPos4, origNewVPos4, self.Tris4)
# 3 warped: use old VPos4 to get A (coefficient) sparse matrix which shape is 3NFaces x NVertices
A = self.getAMatrix(warpedOldVPos4, self.Tris4)
origOldVPos4 = origNewVPos4
warpedOldVPos4[:, 0] = lsqr(A, S[0, :])[0]
warpedOldVPos4[:, 1] = lsqr(A, S[1, :])[0]
warpedOldVPos4[:, 2] = lsqr(A, S[2, :])[0]
# print "new VPos4 shape:", warpedOldVPos4[np.arange(self.NVertices), :].shape
resultFrames[i, :, :] = warpedOldVPos4[np.arange(self.NVertices), :]
self.warpedVideo.Frames = resultFrames
#get VPos4 (each face has 4 vertices) from VPos3 (each face 3 vertices) with mesh topology given
def getVPos4(self, VPos3, ITris3):
V4 = self.get4thVertex(VPos3, ITris3)
VPos4 = np.vstack((VPos3, V4))
return VPos4
# get4thVertex for each face, aka face normal scaled by reciprocal of sqrt of its length
# (3 vertices's index are stored in every row in ITris)
def get4thVertex(self, VPos3, ITris3):
V1 = VPos3[ITris3[:, 1], :] - VPos3[ITris3[:, 0], :]
V2 = VPos3[ITris3[:, 2], :] - VPos3[ITris3[:, 0], :]
FNormals = np.cross(V1, V2)
FNormalsSqrtLength = np.sqrt(np.sum(FNormals**2, 1))[:, None]
F = FNormals/FNormalsSqrtLength
Vertex4 = VPos3[ITris3[:, 0], :] + F
return Vertex4
def getSMatrix(self, oldVPos4, newVPos4, Tris4):
v2subv1 = oldVPos4[Tris4[:, 1], :] - oldVPos4[Tris4[:, 0], :]
v3subv1 = oldVPos4[Tris4[:, 2], :] - oldVPos4[Tris4[:, 0], :]
v4subv1 = oldVPos4[Tris4[:, 3], :] - oldVPos4[Tris4[:, 0], :]
tildev2subv1 = newVPos4[Tris4[:, 1], :] - newVPos4[Tris4[:, 0], :]
tildev3subv1 = newVPos4[Tris4[:, 2], :] - newVPos4[Tris4[:, 0], :]
tildev4subv1 = newVPos4[Tris4[:, 3], :] - newVPos4[Tris4[:, 0], :]
assert self.NFaces == Tris4.shape[0]
S = np.zeros((3, 0))
for i in range(0, self.NFaces):
vInv = np.linalg.inv((np.vstack((v2subv1[i, :], v3subv1[i, :], v4subv1[i, :]))).T)
tildev = (np.vstack((tildev2subv1[i, :], tildev3subv1[i, :], tildev4subv1[i, :]))).T
S = np.hstack((S, np.dot(tildev, vInv)))
return S
def getAMatrix(self, VPos4, Tris4):
# I, J, and V are parallel numpy arrays that hold the rows, columns, and values of nonzero elements
I = []
J = []
V = []
v2subv1 = VPos4[Tris4[:, 1], :] - VPos4[Tris4[:, 0], :]
v3subv1 = VPos4[Tris4[:, 2], :] - VPos4[Tris4[:, 0], :]
v4subv1 = VPos4[Tris4[:, 3], :] - VPos4[Tris4[:, 0], :]
assert self.NFaces == Tris4.shape[0]
for i in range(0, self.NFaces):
idxRow = i * 3
vInv = np.linalg.inv((np.vstack((v2subv1[i, :], v3subv1[i, :], v4subv1[i, :]))).T) # 3x3
sumOfNegativevInv = np.sum(-1 * vInv, axis = 0) # shape is (3,)
################### ######
# -A-D-G, A, D, G # # x1 #
# -B-E-H, B, E, H # X # x2 #
# -C-F-I, C, F, I # # x3 #
################### # x4 #
######
# sumOfNegativevInv current looks like this, take care when fill in I, J, V
##########################
# -A-D-G, -B-E-H, -C-F-I #
##########################
for j in range(0, 3):
I.append(idxRow + j)
J.append(Tris4[i, 0])
V.append(sumOfNegativevInv[j])
# vInv current looks like this. Same, be careful.
###########
# A, B, C #
# D, E, F #
# G, H, I #
###########
for j in range(0, 3):
for k in range(0, 3):
I.append(idxRow + k)
J.append(Tris4[i, j + 1])
V.append(vInv[j, k])
A = sparse.coo_matrix((V, (I, J)), shape = (3 * self.NFaces, self.NVertices4)).tocsr()
return A
if __name__ == '__main__':
#Load in BU bnd files
buVideo = VideoMesh()
buVideo.initBUVideo(["bu3/F0012/F0012_AN01WH_F3D.bnd", "bu3/F0012/F0012_HA04WH_F3D.bnd"])
NFrames = buVideo.Frames.shape[0]
#Load in basel mesh
baselVertsFile = "BaselVerts.mat"
ITris = sio.loadmat("BaselTris.mat")['ITris']
VPos = sio.loadmat(baselVertsFile)['shape']
VPos = np.reshape(VPos, [len(VPos)/3, 3])
#Create basel video placeholder
baselVideo = VideoMesh()
baselVideo.initBaselVideo(baselVertsFile, NFrames)
#Do coarse deformation transfer
T = DeformationTransferer(buVideo, baselVideo)
T.beginDeformationTransfer()
#Save coarse frames to hard drive
baselVideo.saveFramesOff("Basel")
#Do fine deformation transfer with Laplacian mesh using coarse
#vertices as anchors
idx = getBaselBUKeypointsIdx()
L = getLaplacianMatrixCotangent(VPos, ITris, idx)
VPosNew = solveLaplacianMesh(VPos, ITris, idx, baselVideo.Frames[1, :, :])
saveOffFileExternal("BaselTransfer.off", VPosNew, np.array([]), ITris)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from itertools import izip
from .basecase import (BaseTestCase, cqlshlog, dedent, at_a_time, cqlsh,
TEST_HOST, TEST_PORT)
from .cassconnect import (get_keyspace, testrun_cqlsh, testcall_cqlsh,
cassandra_cursor, split_cql_commands, quote_name)
from .ansi_colors import (ColoredText, lookup_colorcode, lookup_colorname,
lookup_colorletter, ansi_seq)
import unittest
import sys
CONTROL_C = '\x03'
CONTROL_D = '\x04'
class TestCqlshOutput(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertNoHasColors(self, text, msg=None):
self.assertNotRegexpMatches(text, ansi_seq, msg='ANSI CSI sequence found in %r' % text)
def assertHasColors(self, text, msg=None):
self.assertRegexpMatches(text, ansi_seq, msg=msg)
def assertColored(self, coloredtext, colorname):
wanted_colorcode = lookup_colorcode(colorname)
for num, c in enumerate(coloredtext):
if not c.isspace():
ccolor = c.colorcode()
self.assertEqual(ccolor, wanted_colorcode,
msg='Output text %r (char #%d) is colored %s, not %s'
% (coloredtext, num, lookup_colorname(ccolor), colorname))
def assertColorFromTags(self, coloredtext, tags):
for (char, tag) in izip(coloredtext, tags):
if char.isspace():
continue
if tag.isspace():
tag = 'n' # neutral
self.assertEqual(char.colorcode(), lookup_colorletter(tag),
msg='Coloring mismatch.\nExpected coloring: %s\n'
'Actually got: %s\ncolor code: %s'
% (tags, coloredtext.colored_version(), coloredtext.colortags()))
def assertQueriesGiveColoredOutput(self, queries_and_expected_outputs, **kwargs):
"""
Allow queries and expected output to be specified in structured tuples,
along with expected color information.
"""
with testrun_cqlsh(tty=True, **kwargs) as c:
for query, expected in queries_and_expected_outputs:
cqlshlog.debug('Testing %r' % (query,))
output = c.cmd_and_response(query).lstrip("\r\n")
c_output = ColoredText(output)
pairs = at_a_time(dedent(expected).split('\n'), 2)
outlines = c_output.splitlines()
for (plain, colorcodes), outputline in zip(pairs, outlines):
self.assertEqual(outputline.plain().rstrip(), plain)
self.assertColorFromTags(outputline, colorcodes)
def test_no_color_output(self):
for termname in ('', 'dumb', 'vt100'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname},
win_force_colors=False) as c:
c.send('select * from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertNoHasColors(c.read_to_next_prompt())
def test_no_prompt_or_colors_output(self):
for termname in ('', 'dumb', 'vt100', 'xterm'):
cqlshlog.debug('TERM=%r' % termname)
query = 'select * from has_all_types limit 1;'
output, result = testcall_cqlsh(prompt=None, env={'TERM': termname},
tty=False, input=query + '\n')
output = output.splitlines()
for line in output:
self.assertNoHasColors(line)
self.assertNotRegexpMatches(line, r'^cqlsh\S*>')
self.assertEqual(len(output), 6,
msg='output: %r' % '\n'.join(output))
self.assertEqual(output[0], '')
self.assertNicelyFormattedTableHeader(output[1])
self.assertNicelyFormattedTableRule(output[2])
self.assertNicelyFormattedTableData(output[3])
self.assertEqual(output[4].strip(), '')
self.assertEqual(output[5].strip(), '(1 rows)')
def test_color_output(self):
for termname in ('xterm', 'unknown-garbage'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertHasColors(c.read_to_next_prompt())
def test_count_output(self):
self.assertQueriesGiveColoredOutput((
('select count(*) from has_all_types;', """
count
MMMMM
-------
5
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_composite_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 10;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 1000000;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
))
q = 'select COUNT(*) FROM twenty_rows_composite_table limit 1000000;'
self.assertQueriesGiveColoredOutput((
(q, """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
))
def test_static_cf_output(self):
self.assertQueriesGiveColoredOutput((
("select a, b from twenty_rows_table where a in ('1', '13', '2');", """
a | b
RR MM
----+----
1 | 1
YY YY
13 | 13
YY YY
2 | 2
YY YY
(3 rows)
nnnnnnnn
"""),
))
self.assertQueriesGiveColoredOutput((
('select * from dynamic_columns;', """
somekey | column1 | value
RRRRRRR CCCCCCC MMMMM
---------+---------+-------------------------
1 | 1.2 | one point two
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
2 | 2.3 | two point three
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | -0.0001 | negative ten thousandth
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 3.46 | three point four six
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 99 | ninety-nine point oh
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
(5 rows)
nnnnnnnn
"""),
))
def test_empty_cf_output(self):
# we print the header after CASSANDRA-6910
self.assertQueriesGiveColoredOutput((
('select * from empty_table;', """
lonelykey | lonelycol
RRRRRRRRR MMMMMMMMM
-----------+-----------
(0 rows)
"""),
))
q = 'select * from has_all_types where num = 999;'
# same query should show up as empty in cql 3
self.assertQueriesGiveColoredOutput((
(q, """
num | asciicol | bigintcol | blobcol | booleancol | decimalcol | doublecol | floatcol | intcol | smallintcol | textcol | timestampcol | tinyintcol | uuidcol | varcharcol | varintcol
RRR MMMMMMMM MMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMMM MMMMMMMMM MMMMMMMM MMMMMM MMMMMMMMMMM MMMMMMM MMMMMMMMMMMM MMMMMMMMMM MMMMMMM MMMMMMMMMM MMMMMMMMM
-----+----------+-----------+---------+------------+------------+-----------+----------+--------+-------------+---------+--------------+------------+---------+------------+-----------
(0 rows)
"""),
))
def test_columnless_key_output(self):
q = "select a from twenty_rows_table where a in ('1', '2', '-9192');"
self.assertQueriesGiveColoredOutput((
(q, """
a
R
---
1
Y
2
Y
(2 rows)
nnnnnnnn
"""),
))
def test_numeric_output(self):
self.assertQueriesGiveColoredOutput((
('''select intcol, bigintcol, varintcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
intcol | bigintcol | varintcol
MMMMMM MMMMMMMMM MMMMMMMMM
-------------+----------------------+-----------------------------
-12 | 1234567890123456789 | 10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
2147483647 | 9223372036854775807 | 9
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
0 | 0 | 0
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
-2147483648 | -9223372036854775808 | -10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
| |
nnnnnnnnnnn nnnnnnnnnnnnnnnnnnnn nnnnnnnnnnnnnnnnnnnnnnnnnnn
(5 rows)
nnnnnnnn
"""),
('''select decimalcol, doublecol, floatcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
decimalcol | doublecol | floatcol
MMMMMMMMMM MMMMMMMMM MMMMMMMM
------------------+-----------+----------
19952.11882 | 1 | -2.1
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
1E-14 | 1e+07 | 1e+05
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
0.0 | 0 | 0
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
10.0000000000000 | -1004.1 | 1e+08
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
| |
nnnnnnnnnnnnnnnn nnnnnnn nnnnn
(5 rows)
nnnnnnnn
"""),
))
def test_timestamp_output(self):
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
---------------------------------
2012-05-14 12:53:20.000000+0000
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'Etc/UTC'})
try:
import pytz # test only if pytz is available on PYTHONPATH
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
---------------------------------
2012-05-14 09:53:20.000000-0300
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'America/Sao_Paulo'})
except ImportError:
pass
def test_boolean_output(self):
self.assertQueriesGiveColoredOutput((
('select num, booleancol from has_all_types where num in (0, 1, 2, 3);', """
num | booleancol
RRR MMMMMMMMMM
-----+------------
0 | True
G GGGGG
1 | True
G GGGGG
2 | False
G GGGGG
3 | False
G GGGGG
(4 rows)
nnnnnnnn
"""),
))
def test_null_output(self):
# column with metainfo but no values
self.assertQueriesGiveColoredOutput((
("select k, c, notthere from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
))
# all-columns, including a metainfo column has no values (cql3)
self.assertQueriesGiveColoredOutput((
("select * from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
R M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
))
def test_string_output_ascii(self):
self.assertQueriesGiveColoredOutput((
("select * from ascii_with_special_chars where k in (0, 1, 2, 3);", r"""
k | val
R MMM
---+-----------------------------------------------
0 | newline:\n
G YYYYYYYYmm
1 | return\rand null\x00!
G YYYYYYmmYYYYYYYYmmmmY
2 | \x00\x01\x02\x03\x04\x05control chars\x06\x07
G mmmmmmmmmmmmmmmmmmmmmmmmYYYYYYYYYYYYYmmmmmmmm
3 | fake special chars\x00\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(4 rows)
nnnnnnnn
"""),
))
def test_string_output_utf8(self):
# many of these won't line up visually here, to keep the source code
# here ascii-only. note that some of the special Unicode characters
# here will render as double-width or zero-width in unicode-aware
# terminals, but the color-checking machinery here will still treat
# it as one character, so those won't seem to line up visually either.
self.assertQueriesGiveColoredOutput((
("select * from utf8_with_special_chars where k in (0, 1, 2, 3, 4, 5, 6);", u"""
k | val
R MMM
---+-------------------------------
0 | Normal string
G YYYYYYYYYYYYY
1 | Text with\\nnewlines\\n
G YYYYYYYYYmmYYYYYYYYmm
2 | Text with embedded \\x01 char
G YYYYYYYYYYYYYYYYYYYmmmmYYYYY
3 | \u24c8\u24c5\u24ba\u24b8\u24be\u24b6\u24c1\u2008\u249e\u24a3\u249c\u24ad\u24ae and normal ones
G YYYYYYYYYYYYYYYYYYYYYYYYYYYYY
4 | double wides: \u2f91\u2fa4\u2f9a
G YYYYYYYYYYYYYYYYY
5 | zero width\u200bspace
G YYYYYYYYYYYYYYYY
6 | fake special chars\\x00\\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(7 rows)
nnnnnnnn
""".encode('utf-8')),
), env={'LANG': 'en_US.UTF-8'})
def test_blob_output(self):
self.assertQueriesGiveColoredOutput((
("select num, blobcol from has_all_types where num in (0, 1, 2, 3);", r"""
num | blobcol
RRR MMMMMMM
-----+----------------------
0 | 0x000102030405fffefd
G mmmmmmmmmmmmmmmmmmmm
1 | 0xffffffffffffffffff
G mmmmmmmmmmmmmmmmmmmm
2 | 0x
G mmmmmmmmmmmmmmmmmmmm
3 | 0x80
G mmmmmmmmmmmmmmmmmmmm
(4 rows)
nnnnnnnn
"""),
))
def test_prompt(self):
with testrun_cqlsh(tty=True, keyspace=None) as c:
self.assertTrue(c.output_header.splitlines()[-1].endswith('cqlsh> '))
c.send('\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh> '))
cmd = "USE \"%s\";\n" % get_keyspace().replace('"', '""')
c.send(cmd)
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh:%s> ' % (get_keyspace())))
c.send('use system;\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertTrue(output.endswith('cqlsh:system> '))
c.send('use NONEXISTENTKEYSPACE;\n')
outputlines = c.read_to_next_prompt().splitlines()
start_index = 0
if c.realtty:
self.assertEqual(outputlines[start_index], 'use NONEXISTENTKEYSPACE;')
start_index = 1
self.assertTrue(outputlines[start_index+1].endswith('cqlsh:system> '))
midline = ColoredText(outputlines[start_index])
self.assertEqual(midline.plain(),
'InvalidRequest: Error from server: code=2200 [Invalid query] message="Keyspace \'nonexistentkeyspace\' does not exist"')
self.assertColorFromTags(midline,
"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR")
def test_describe_keyspace_output(self):
with testrun_cqlsh(tty=True) as c:
ks = get_keyspace()
qks = quote_name(ks)
for cmd in ('describe keyspace', 'desc keyspace'):
for givename in ('system', '', qks):
for semicolon in ('', ';'):
fullcmd = cmd + (' ' if givename else '') + givename + semicolon
desc = c.cmd_and_response(fullcmd)
self.check_describe_keyspace_output(desc, givename or qks)
# try to actually execute that last keyspace description, with a
# new keyspace name
new_ks_name = 'COPY_OF_' + ks
copy_desc = desc.replace(ks, new_ks_name)
statements = split_cql_commands(copy_desc)
do_drop = True
with cassandra_cursor() as curs:
try:
for stmt in statements:
cqlshlog.debug('TEST EXEC: %s' % stmt)
curs.execute(stmt)
finally:
curs.execute('use system')
if do_drop:
curs.execute('drop keyspace %s' % quote_name(new_ks_name))
def check_describe_keyspace_output(self, output, qksname):
expected_bits = [r'(?im)^CREATE KEYSPACE %s WITH\b' % re.escape(qksname),
r';\s*$',
r'\breplication = {\'class\':']
for expr in expected_bits:
self.assertRegexpMatches(output, expr)
def test_describe_columnfamily_output(self):
# we can change these to regular expressions if/when it makes sense
# to do so; these will likely be subject to lots of adjustments.
# note columns are now comparator-ordered instead of original-order.
table_desc3 = dedent("""
CREATE TABLE %s.has_all_types (
num int PRIMARY KEY,
asciicol ascii,
bigintcol bigint,
blobcol blob,
booleancol boolean,
decimalcol decimal,
doublecol double,
floatcol float,
intcol int,
smallintcol smallint,
textcol text,
timestampcol timestamp,
tinyintcol tinyint,
uuidcol uuid,
varcharcol text,
varintcol varint
) WITH bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND cdc = false
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND crc_check_chance = 1.0
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = '99PERCENTILE';
""" % quote_name(get_keyspace()))
with testrun_cqlsh(tty=True) as c:
for cmdword in ('describe table', 'desc columnfamily'):
for semicolon in (';', ''):
output = c.cmd_and_response('%s has_all_types%s' % (cmdword, semicolon))
self.assertNoHasColors(output)
self.assertSequenceEqual(output.split('\n'), table_desc3.split('\n'))
def test_describe_columnfamilies_output(self):
output_re = r'''
\n
Keyspace [ ] (?P<ksname> \S+ ) \n
-----------* \n
(?P<cfnames> .*? )
\n
'''
ks = get_keyspace()
with testrun_cqlsh(tty=True, keyspace=None) as c:
# when not in a keyspace
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
ksnames = []
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '(?xs) ^ ( %s )+ $' % output_re)
for section in re.finditer('(?xs)' + output_re, output):
ksname = section.group('ksname')
ksnames.append(ksname)
cfnames = section.group('cfnames')
self.assertNotIn('\n\n', cfnames)
if ksname == ks:
self.assertIn('ascii_with_special_chars', cfnames)
self.assertIn('system', ksnames)
self.assertIn(quote_name(ks), ksnames)
# when in a keyspace
c.send('USE %s;\n' % quote_name(ks))
c.read_to_next_prompt()
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertEqual(output[0], '\n')
self.assertEqual(output[-1], '\n')
self.assertNotIn('Keyspace %s' % quote_name(ks), output)
self.assertIn('undefined_values_table', output)
def test_describe_cluster_output(self):
output_re = r'''(?x)
^
\n
Cluster: [ ] (?P<clustername> .* ) \n
Partitioner: [ ] (?P<partitionername> .* ) \n
\n
'''
ringinfo_re = r'''
Range[ ]ownership: \n
(
[ ] .*? [ ][ ] \[ ( \d+ \. ){3} \d+ \] \n
)+
\n
'''
with testrun_cqlsh(tty=True, keyspace=None) as c:
# not in a keyspace
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + '$')
c.send('USE %s;\n' % quote_name(get_keyspace()))
c.read_to_next_prompt()
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + ringinfo_re + '$')
def test_describe_schema_output(self):
with testrun_cqlsh(tty=True) as c:
for semicolon in ('', ';'):
output = c.cmd_and_response('desc full schema' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '^\nCREATE KEYSPACE')
self.assertIn("\nCREATE KEYSPACE system WITH replication = {'class': 'LocalStrategy'} AND durable_writes = true;\n",
output)
self.assertRegexpMatches(output, ';\s*$')
def test_show_output(self):
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('show version;')
self.assertRegexpMatches(output,
'^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Native protocol \S+\]$')
output = c.cmd_and_response('show host;')
self.assertHasColors(output)
self.assertRegexpMatches(output, '^Connected to .* at %s:%d\.$'
% (re.escape(TEST_HOST), TEST_PORT))
@unittest.skipIf(sys.platform == "win32", 'EOF signaling not supported on Windows')
def test_eof_prints_newline(self):
with testrun_cqlsh(tty=True) as c:
c.send(CONTROL_D)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, '\n')
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_exit_prints_no_newline(self):
for semicolon in ('', ';'):
with testrun_cqlsh(tty=True) as c:
cmd = 'exit%s\n' % semicolon
c.send(cmd)
if c.realtty:
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, cmd)
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_help_types(self):
with testrun_cqlsh(tty=True) as c:
c.cmd_and_response('help types')
def test_help(self):
pass
def test_printing_parse_error(self):
pass
def test_printing_lex_error(self):
pass
def test_multiline_statements(self):
pass
def test_cancel_statement(self):
pass
def test_printing_integrity_error(self):
pass
def test_printing_cql_error(self):
pass
def test_empty_line(self):
pass
def test_user_types_output(self):
self.assertQueriesGiveColoredOutput((
("select addresses from users;", r"""
addresses
MMMMMMMMM
--------------------------------------------------------------------------------------------------------------------------------------------
{{city: 'Chelyabinsk', address: '3rd street', zip: null}, {city: 'Chigirinsk', address: null, zip: '676722'}}
BBYYYYBBYYYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYBBYYYBBRRRRBBBBYYYYBBYYYYYYYYYYYYBBYYYYYYYBBRRRRBBYYYBBYYYYYYYYBB
{{city: 'Austin', address: '902 East 5th St. #202', zip: '78702'}, {city: 'Sunnyvale', address: '292 Gibraltar Drive #107', zip: '94089'}}
BBYYYYBBYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBBBBYYYYBBYYYYYYYYYYYBBYYYYYYYBBYYYYYYYYYYYYYYYYYYYYYYYYYYBBYYYBBYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
))
self.assertQueriesGiveColoredOutput((
("select phone_numbers from users;", r"""
phone_numbers
MMMMMMMMMMMMM
-------------------------------------------------------------------------------------
{{country: null, number: '03'}, {country: '+7', number: null}}
BBYYYYYYYBBRRRRBBYYYYYYBBYYYYBBBBYYYYYYYBBYYYYBBYYYYYYBBRRRRBB
{{country: '+1', number: '512-537-7809'}, {country: '+44', number: '208 622 3021'}}
BBYYYYYYYBBYYYYBBYYYYYYBBYYYYYYYYYYYYYYBBBBYYYYYYYBBYYYYYBBYYYYYYBBYYYYYYYYYYYYYYBB
(2 rows)
nnnnnnnn
"""),
))
def test_user_types_with_collections(self):
self.assertQueriesGiveColoredOutput((
("select info from songs;", r"""
info
MMMM
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
{founded: 188694000, members: {'Adrian Smith', 'Bruce Dickinson', 'Dave Murray', 'Janick Gers', 'Nicko McBrain', 'Steve Harris'}, description: 'Pure evil metal'}
BYYYYYYYBBGGGGGGGGGBBYYYYYYYBBBYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYBBYYYYYYYYYYYYYYYBBYYYYYYYYYYYYYYBBBYYYYYYYYYYYBBYYYYYYYYYYYYYYYYYB
(1 rows)
nnnnnnnn
"""),
))
self.assertQueriesGiveColoredOutput((
("select tags from songs;", r"""
tags
MMMM
-------------------------------------------------
{tags: {'genre': 'metal', 'origin': 'england'}}
BYYYYBBBYYYYYYYBBYYYYYYYBBYYYYYYYYBBYYYYYYYYYBB
(1 rows)
nnnnnnnn
"""),
))
|
|
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import sys
import pwd
import errno
from os.path import join, dirname, normpath, exists, isdir
from optparse import OptionParser
from ConfigParser import ConfigParser
from carbon import log, state
from carbon.database import TimeSeriesDatabase
from carbon.routers import DatapointRouter
from carbon.exceptions import CarbonConfigException
from twisted.python import usage
defaults = dict(
USER="",
MAX_CACHE_SIZE=float('inf'),
MAX_UPDATES_PER_SECOND=500,
MAX_CREATES_PER_MINUTE=float('inf'),
MIN_TIMESTAMP_RESOLUTION=0,
LINE_RECEIVER_INTERFACE='0.0.0.0',
LINE_RECEIVER_PORT=2003,
ENABLE_UDP_LISTENER=False,
UDP_RECEIVER_INTERFACE='0.0.0.0',
UDP_RECEIVER_PORT=2003,
PICKLE_RECEIVER_INTERFACE='0.0.0.0',
PICKLE_RECEIVER_PORT=2004,
MAX_RECEIVER_CONNECTIONS=float('inf'),
CACHE_QUERY_INTERFACE='0.0.0.0',
CACHE_QUERY_PORT=7002,
LOG_UPDATES=True,
LOG_CREATES=True,
LOG_CACHE_HITS=True,
LOG_CACHE_QUEUE_SORTS=True,
DATABASE='whisper',
WHISPER_AUTOFLUSH=False,
WHISPER_SPARSE_CREATE=False,
WHISPER_FALLOCATE_CREATE=False,
WHISPER_LOCK_WRITES=False,
WHISPER_FADVISE_RANDOM=False,
CERES_MAX_SLICE_GAP=80,
CERES_NODE_CACHING_BEHAVIOR='all',
CERES_SLICE_CACHING_BEHAVIOR='latest',
CERES_LOCK_WRITES=False,
MAX_DATAPOINTS_PER_MESSAGE=500,
MAX_AGGREGATION_INTERVALS=5,
FORWARD_ALL=True,
MAX_QUEUE_SIZE=1000,
QUEUE_LOW_WATERMARK_PCT=0.8,
TIME_TO_DEFER_SENDING=0.0001,
ENABLE_AMQP=False,
AMQP_METRIC_NAME_IN_BODY=False,
AMQP_VERBOSE=False,
AMQP_SPEC=None,
BIND_PATTERNS=['#'],
ENABLE_MANHOLE=False,
MANHOLE_INTERFACE='127.0.0.1',
MANHOLE_PORT=7222,
MANHOLE_USER="",
MANHOLE_PUBLIC_KEY="",
RELAY_METHOD='rules',
REPLICATION_FACTOR=1,
DIVERSE_REPLICAS=True,
DESTINATIONS=[],
DESTINATION_PROTOCOL="pickle",
USE_FLOW_CONTROL=True,
USE_INSECURE_UNPICKLER=False,
USE_WHITELIST=False,
CARBON_METRIC_PREFIX='carbon',
CARBON_METRIC_INTERVAL=60,
CACHE_WRITE_STRATEGY='sorted',
WRITE_BACK_FREQUENCY=None,
MIN_RESET_STAT_FLOW=1000,
MIN_RESET_RATIO=0.9,
MIN_RESET_INTERVAL=121,
USE_RATIO_RESET=False,
LOG_LISTENER_CONN_SUCCESS=True,
LOG_AGGREGATOR_MISSES=True,
AGGREGATION_RULES='aggregation-rules.conf',
REWRITE_RULES='rewrite-rules.conf',
RELAY_RULES='relay-rules.conf',
ENABLE_LOGROTATION=True,
METRIC_CLIENT_IDLE_TIMEOUT=None,
)
def _process_alive(pid):
if exists("/proc"):
return exists("/proc/%d" % pid)
else:
try:
os.kill(int(pid), 0)
return True
except OSError, err:
return err.errno == errno.EPERM
class OrderedConfigParser(ConfigParser):
"""Hacky workaround to ensure sections are always returned in the order
they are defined in. Note that this does *not* make any guarantees about
the order of options within a section or the order in which sections get
written back to disk on write()."""
_ordered_sections = []
def read(self, path):
# Verifies a file exists *and* is readable
if not os.access(path, os.R_OK):
raise CarbonConfigException("Error: Missing config file or wrong perms on %s" % path)
result = ConfigParser.read(self, path)
sections = []
with open(path) as f:
for line in f:
line = line.strip()
if line.startswith('[') and line.endswith(']'):
sections.append(line[1:-1])
self._ordered_sections = sections
return result
def sections(self):
return list(self._ordered_sections) # return a copy for safety
class Settings(dict):
__getattr__ = dict.__getitem__
def __init__(self):
dict.__init__(self)
self.update(defaults)
def readFrom(self, path, section):
parser = ConfigParser()
if not parser.read(path):
raise CarbonConfigException("Failed to read config file %s" % path)
if not parser.has_section(section):
return
for key, value in parser.items(section):
key = key.upper()
# Detect type from defaults dict
if key in defaults:
valueType = type(defaults[key])
else:
valueType = str
if valueType is list:
value = [v.strip() for v in value.split(',')]
elif valueType is bool:
value = parser.getboolean(section, key)
else:
# Attempt to figure out numeric types automatically
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
self[key] = value
settings = Settings()
settings.update(defaults)
class CarbonCacheOptions(usage.Options):
optFlags = [
["debug", "", "Run in debug mode."],
]
optParameters = [
["config", "c", None, "Use the given config file."],
["instance", "", "a", "Manage a specific carbon instance."],
["logdir", "", None, "Write logs to the given directory."],
["whitelist", "", None, "List of metric patterns to allow."],
["blacklist", "", None, "List of metric patterns to disallow."],
]
def postOptions(self):
global settings
program = self.parent.subCommand
# Use provided pidfile (if any) as default for configuration. If it's
# set to 'twistd.pid', that means no value was provided and the default
# was used.
pidfile = self.parent["pidfile"]
if pidfile.endswith("twistd.pid"):
pidfile = None
self["pidfile"] = pidfile
# Enforce a default umask of '022' if none was set.
if not self.parent.has_key("umask") or self.parent["umask"] is None:
self.parent["umask"] = 022
# Read extra settings from the configuration file.
program_settings = read_config(program, self)
settings.update(program_settings)
settings["program"] = program
# Normalize and expand paths
settings["STORAGE_DIR"] = os.path.normpath(os.path.expanduser(settings["STORAGE_DIR"]))
settings["LOCAL_DATA_DIR"] = os.path.normpath(os.path.expanduser(settings["LOCAL_DATA_DIR"]))
settings["WHITELISTS_DIR"] = os.path.normpath(os.path.expanduser(settings["WHITELISTS_DIR"]))
settings["PID_DIR"] = os.path.normpath(os.path.expanduser(settings["PID_DIR"]))
settings["LOG_DIR"] = os.path.normpath(os.path.expanduser(settings["LOG_DIR"]))
settings["pidfile"] = os.path.normpath(os.path.expanduser(settings["pidfile"]))
# Set process uid/gid by changing the parent config, if a user was
# provided in the configuration file.
if settings.USER:
self.parent["uid"], self.parent["gid"] = (
pwd.getpwnam(settings.USER)[2:4])
# Set the pidfile in parent config to the value that was computed by
# C{read_config}.
self.parent["pidfile"] = settings["pidfile"]
storage_schemas = join(settings["CONF_DIR"], "storage-schemas.conf")
if not exists(storage_schemas):
print "Error: missing required config %s" % storage_schemas
sys.exit(1)
if settings.CACHE_WRITE_STRATEGY not in ('timesorted', 'sorted', 'max', 'naive'):
log.err("%s is not a valid value for CACHE_WRITE_STRATEGY, defaulting to %s" %
(settings.CACHE_WRITE_STRATEGY, defaults['CACHE_WRITE_STRATEGY']))
else:
log.msg("Using %s write strategy for cache" % settings.CACHE_WRITE_STRATEGY)
# Database-specific settings
database = settings.DATABASE
if database not in TimeSeriesDatabase.plugins:
print "No database plugin implemented for '%s'" % database
raise SystemExit(1)
database_class = TimeSeriesDatabase.plugins[database]
state.database = database_class(settings)
settings.CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95
if not "action" in self:
self["action"] = "start"
self.handleAction()
# If we are not running in debug mode or non-daemon mode, then log to a
# directory, otherwise log output will go to stdout. If parent options
# are set to log to syslog, then use that instead.
if not self["debug"]:
if self.parent.get("syslog", None):
prefix = "%s-%s[%d]" % (program, self["instance"], os.getpid())
log.logToSyslog(prefix)
elif not self.parent["nodaemon"]:
logdir = settings.LOG_DIR
if not isdir(logdir):
os.makedirs(logdir)
if settings.USER:
# We have not yet switched to the specified user,
# but that user must be able to create files in this
# directory.
os.chown(logdir, self.parent["uid"], self.parent["gid"])
log.logToDir(logdir)
if self["whitelist"] is None:
self["whitelist"] = join(settings["CONF_DIR"], "whitelist.conf")
settings["whitelist"] = self["whitelist"]
if self["blacklist"] is None:
self["blacklist"] = join(settings["CONF_DIR"], "blacklist.conf")
settings["blacklist"] = self["blacklist"]
def parseArgs(self, *action):
"""If an action was provided, store it for further processing."""
if len(action) == 1:
self["action"] = action[0]
def handleAction(self):
"""Handle extra argument for backwards-compatibility.
* C{start} will simply do minimal pid checking and otherwise let twistd
take over.
* C{stop} will kill an existing running process if it matches the
C{pidfile} contents.
* C{status} will simply report if the process is up or not.
"""
action = self["action"]
pidfile = self.parent["pidfile"]
program = settings["program"]
instance = self["instance"]
if action == "stop":
if not exists(pidfile):
print "Pidfile %s does not exist" % pidfile
raise SystemExit(0)
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except ValueError:
print "Failed to parse pid from pidfile %s" % pidfile
raise SystemExit(1)
except IOError:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
print "Sending kill signal to pid %d" % pid
try:
os.kill(pid, 15)
except OSError, e:
if e.errno == errno.ESRCH:
print "No process with pid %d running" % pid
else:
raise
raise SystemExit(0)
elif action == "status":
if not exists(pidfile):
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
pf = open(pidfile, "r")
try:
pid = int(pf.read().strip())
pf.close()
except ValueError:
print "Failed to parse pid from pidfile %s" % pidfile
raise SystemExit(1)
except IOError:
print "Failed to read pid from %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is running with pid %d" %
(program, instance, pid))
raise SystemExit(0)
else:
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
elif action == "start":
if exists(pidfile):
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except ValueError:
print "Failed to parse pid from pidfile %s" % pidfile
SystemExit(1)
except IOError:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is already running with pid %d" %
(program, instance, pid))
raise SystemExit(1)
else:
print "Removing stale pidfile %s" % pidfile
try:
os.unlink(pidfile)
except IOError:
print "Could not remove pidfile %s" % pidfile
# Try to create the PID directory
else:
if not os.path.exists(settings["PID_DIR"]):
try:
os.makedirs(settings["PID_DIR"])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(settings["PID_DIR"]):
pass
else:
raise
print "Starting %s (instance %s)" % (program, instance)
else:
print "Invalid action '%s'" % action
print "Valid actions: start stop status"
raise SystemExit(1)
class CarbonAggregatorOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given aggregation rules file."],
["rewrite-rules", "", None, "Use the given rewrite rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["rules"]
if self["rewrite-rules"] is None:
self["rewrite-rules"] = join(settings["CONF_DIR"],
settings['REWRITE_RULES'])
settings["rewrite-rules"] = self["rewrite-rules"]
class CarbonRelayOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given relay rules file."],
["aggregation-rules", "", None, "Use the given aggregation rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['RELAY_RULES'])
settings["relay-rules"] = self["rules"]
if self["aggregation-rules"] is None:
self["aggregation-rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["aggregation-rules"]
router = settings["RELAY_METHOD"]
if router not in DatapointRouter.plugins:
print ("In carbon.conf, RELAY_METHOD must be one of %s. "
"Invalid value: '%s'" % (', '.join(DatapointRouter.plugins), router))
raise SystemExit(1)
def get_default_parser(usage="%prog [options] <start|stop|status>"):
"""Create a parser for command line options."""
parser = OptionParser(usage=usage)
parser.add_option(
"--debug", action="store_true",
help="Run in the foreground, log to stdout")
parser.add_option(
"--syslog", action="store_true",
help="Write logs to syslog")
parser.add_option(
"--nodaemon", action="store_true",
help="Run in the foreground")
parser.add_option(
"--profile",
help="Record performance profile data to the given file")
parser.add_option(
"--profiler",
help="Specify the profiler to use")
parser.add_option(
"--pidfile", default=None,
help="Write pid to the given file")
parser.add_option(
"--umask", default=None,
help="Use the given umask when creating files")
parser.add_option(
"--config",
default=None,
help="Use the given config file")
parser.add_option(
"--whitelist",
default=None,
help="Use the given whitelist file")
parser.add_option(
"--blacklist",
default=None,
help="Use the given blacklist file")
parser.add_option(
"--logdir",
default=None,
help="Write logs in the given directory")
parser.add_option(
"--instance",
default='a',
help="Manage a specific carbon instance")
return parser
def get_parser(name):
parser = get_default_parser()
if name == "carbon-aggregator":
parser.add_option(
"--rules",
default=None,
help="Use the given aggregation rules file.")
parser.add_option(
"--rewrite-rules",
default=None,
help="Use the given rewrite rules file.")
elif name == "carbon-relay":
parser.add_option(
"--rules",
default=None,
help="Use the given relay rules file.")
return parser
def parse_options(parser, args):
"""
Parse command line options and print usage message if no arguments were
provided for the command.
"""
(options, args) = parser.parse_args(args)
if not args:
parser.print_usage()
raise SystemExit(1)
if args[0] not in ("start", "stop", "status"):
parser.print_usage()
raise SystemExit(1)
return options, args
def read_config(program, options, **kwargs):
"""
Read settings for 'program' from configuration file specified by
'options["config"]', with missing values provided by 'defaults'.
"""
settings = Settings()
settings.update(defaults)
# Initialize default values if not set yet.
for name, value in kwargs.items():
settings.setdefault(name, value)
graphite_root = kwargs.get("ROOT_DIR")
if graphite_root is None:
graphite_root = os.environ.get('GRAPHITE_ROOT')
if graphite_root is None:
raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT "
"needs to be provided.")
# Default config directory to root-relative, unless overriden by the
# 'GRAPHITE_CONF_DIR' environment variable.
settings.setdefault("CONF_DIR",
os.environ.get("GRAPHITE_CONF_DIR",
join(graphite_root, "conf")))
if options["config"] is None:
options["config"] = join(settings["CONF_DIR"], "carbon.conf")
else:
# Set 'CONF_DIR' to the parent directory of the 'carbon.conf' config
# file.
settings["CONF_DIR"] = dirname(normpath(options["config"]))
# Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR'
# environment variable. It defaults to a path relative to GRAPHITE_ROOT
# for backwards compatibility though.
settings.setdefault("STORAGE_DIR",
os.environ.get("GRAPHITE_STORAGE_DIR",
join(graphite_root, "storage")))
def update_STORAGE_DIR_deps():
# By default, everything is written to subdirectories of the storage dir.
settings.setdefault(
"PID_DIR", settings["STORAGE_DIR"])
settings.setdefault(
"LOG_DIR", join(settings["STORAGE_DIR"], "log", program))
settings.setdefault(
"LOCAL_DATA_DIR", join(settings["STORAGE_DIR"], "whisper"))
settings.setdefault(
"WHITELISTS_DIR", join(settings["STORAGE_DIR"], "lists"))
# Read configuration options from program-specific section.
section = program[len("carbon-"):]
config = options["config"]
if not exists(config):
raise CarbonConfigException("Error: missing required config %r" % config)
settings.readFrom(config, section)
settings.setdefault("instance", options["instance"])
update_STORAGE_DIR_deps()
# If a specific instance of the program is specified, augment the settings
# with the instance-specific settings and provide sane defaults for
# optional settings.
if options["instance"]:
settings.readFrom(config,
"%s:%s" % (section, options["instance"]))
settings["pidfile"] = (
options["pidfile"] or
join(settings["PID_DIR"], "%s-%s.pid" %
(program, options["instance"])))
settings["LOG_DIR"] = (options["logdir"] or
join(settings["LOG_DIR"],
"%s-%s" % (program, options["instance"])))
else:
settings["pidfile"] = (
options["pidfile"] or
join(settings["PID_DIR"], '%s.pid' % program))
settings["LOG_DIR"] = (options["logdir"] or settings["LOG_DIR"])
update_STORAGE_DIR_deps()
return settings
|
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo import messaging
from oslo.messaging.notify import dispatcher as oslo_dispatcher
from oslo.messaging import target
from sqlalchemy import desc
from murano.common import config
from murano.common.helpers import token_sanitizer
from murano.db import models
from murano.db.services import environments
from murano.db.services import instances
from murano.db.services import sessions
from murano.db import session
from murano.openstack.common.gettextutils import _ # noqa
from murano.openstack.common import log as logging
from murano.openstack.common import timeutils
RPC_SERVICE = None
NOTIFICATION_SERVICE = None
LOG = logging.getLogger(__name__)
class ResultEndpoint(object):
@staticmethod
def process_result(context, result, environment_id):
secure_result = token_sanitizer.TokenSanitizer().sanitize(result)
LOG.debug('Got result from orchestration '
'engine:\n{0}'.format(secure_result))
unit = session.get_session()
environment = unit.query(models.Environment).get(environment_id)
if not environment:
LOG.warning(_('Environment result could not be handled, specified '
'environment was not found in database'))
return
if result['Objects'] is None and result.get('ObjectsCopy', {}) is None:
environments.EnvironmentServices.remove(environment_id)
return
environment.description = result
if environment.description['Objects'] is not None:
environment.description['Objects']['services'] = \
environment.description['Objects'].pop('applications', [])
# environment.networking = result.get('networking', {})
action_name = 'Deployment'
deleted = False
else:
action_name = 'Deletion'
deleted = True
environment.version += 1
environment.save(unit)
#close deployment
deployment = get_last_deployment(unit, environment.id)
deployment.finished = timeutils.utcnow()
num_errors = unit.query(models.Status)\
.filter_by(level='error', task_id=deployment.id).count()
num_warnings = unit.query(models.Status)\
.filter_by(level='warning', task_id=deployment.id).count()
final_status_text = action_name + ' finished'
if num_errors:
final_status_text += " with errors"
elif num_warnings:
final_status_text += " with warnings"
status = models.Status()
status.task_id = deployment.id
status.text = final_status_text
status.level = 'info'
deployment.statuses.append(status)
deployment.save(unit)
#close session
conf_session = unit.query(models.Session).filter_by(
**{'environment_id': environment.id,
'state': 'deploying' if not deleted else 'deleting'}).first()
if num_errors > 0:
conf_session.state = \
sessions.SessionState.DELETE_FAILURE if deleted else \
sessions.SessionState.DEPLOY_FAILURE
else:
conf_session.state = sessions.SessionState.DEPLOYED
conf_session.save(unit)
def notification_endpoint_wrapper(priority='info'):
def wrapper(func):
class NotificationEndpoint(object):
def __init__(self):
setattr(self, priority, self._handler)
def _handler(self, ctxt, publisher_id, event_type,
payload, metadata):
if event_type == ('murano.%s' % func.__name__):
func(payload)
def __call__(self, payload):
return func(payload)
return NotificationEndpoint()
return wrapper
@notification_endpoint_wrapper()
def track_instance(payload):
LOG.debug('Got track instance request from orchestration '
'engine:\n{0}'.format(payload))
instance_id = payload['instance']
instance_type = payload.get('instance_type', 0)
environment_id = payload['environment']
unit_count = payload.get('unit_count')
type_name = payload['type_name']
type_title = payload.get('type_title')
instances.InstanceStatsServices.track_instance(
instance_id, environment_id, instance_type,
type_name, type_title, unit_count)
@notification_endpoint_wrapper()
def untrack_instance(payload):
LOG.debug('Got untrack instance request from orchestration '
'engine:\n{0}'.format(payload))
instance_id = payload['instance']
environment_id = payload['environment']
instances.InstanceStatsServices.destroy_instance(
instance_id, environment_id)
@notification_endpoint_wrapper()
def report_notification(report):
LOG.debug('Got report from orchestration '
'engine:\n{0}'.format(report))
report['entity_id'] = report['id']
del report['id']
status = models.Status()
status.update(report)
unit = session.get_session()
#connect with deployment
with unit.begin():
running_deployment = get_last_deployment(unit,
status.environment_id)
status.task_id = running_deployment.id
unit.add(status)
def get_last_deployment(unit, env_id):
query = unit.query(models.Task) \
.filter_by(environment_id=env_id) \
.order_by(desc(models.Task.started))
return query.first()
def _prepare_rpc_service(server_id):
endpoints = [ResultEndpoint()]
transport = messaging.get_transport(config.CONF)
s_target = target.Target('murano', 'results', server=server_id)
return messaging.get_rpc_server(transport, s_target, endpoints, 'eventlet')
def _prepare_notification_service(server_id):
endpoints = [report_notification, track_instance, untrack_instance]
transport = messaging.get_transport(config.CONF)
s_target = target.Target(topic='murano', server=server_id)
dispatcher = oslo_dispatcher.NotificationDispatcher(
[s_target], endpoints, None, True)
return messaging.MessageHandlingServer(transport, dispatcher, 'eventlet')
def get_rpc_service():
global RPC_SERVICE
if RPC_SERVICE is None:
RPC_SERVICE = _prepare_rpc_service(str(uuid.uuid4()))
return RPC_SERVICE
def get_notification_service():
global NOTIFICATION_SERVICE
if NOTIFICATION_SERVICE is None:
NOTIFICATION_SERVICE = _prepare_notification_service(str(uuid.uuid4()))
return NOTIFICATION_SERVICE
|
|
""" Tests for Processors """
import pandas as pd
from pandas.testing import (
assert_frame_equal, assert_series_equal, assert_index_equal
)
from ftpvl.processors import *
from ftpvl.evaluation import Evaluation
class TestProcessor:
"""
Testing by partition.
MinusOne()
StandardizeTypes()
CleanDuplicates()
AddNormalizedColumn()
ExpandToolchain()
Reindex()
SortIndex()
NormalizeAround()
Normalize()
FilterByIndex()
RemoveByIndex()
Aggregate()
GeomeanAggregate()
CompareToFirst()
"""
def test_minusone(self):
""" Test whether all values are correctly changed """
df = pd.DataFrame({"a": [1, 2, 3, 4, 5]})
eval1 = Evaluation(df, eval_id=10)
result = eval1.get_df()["a"]
expected = pd.Series([1, 2, 3, 4, 5], name="a")
assert_series_equal(result, expected)
pipeline = [MinusOne()]
result_processed = eval1.process(pipeline)
result_df = result_processed.get_df()["a"]
expected_df = pd.Series([0, 1, 2, 3, 4], name="a")
assert_series_equal(result_df, expected_df)
assert result_processed.get_eval_id() == 10
def test_standardizetypes(self):
""" Test whether types are standardized """
types = {"a": float}
df = pd.DataFrame({"a": [1, 2, 3, 4, 5]})
eval1 = Evaluation(df, eval_id=10)
assert eval1.get_df().dtypes["a"] == int
pipeline = [StandardizeTypes(types)]
result = eval1.process(pipeline)
assert result.get_df().dtypes["a"] == float
assert result.get_eval_id() == 10
def test_cleanduplicates_no_duplicates(self):
""" Test for evaluation that has no duplicates in specified column """
df = pd.DataFrame(
[
{"a": 1, "b": 1, "c": 5},
{"a": 1, "b": 2, "c": 4},
{"a": 3, "b": 3, "c": 3},
{"a": 4, "b": 4, "c": 2},
{"a": 5, "b": 5, "c": 1},
]
)
eval1 = Evaluation(df, eval_id=10)
# test no duplicates
pipeline = [CleanDuplicates(["b"])]
result = eval1.process(pipeline)
assert_frame_equal(result.get_df(), df, check_like=True)
assert result.get_eval_id() == 10
def test_cleanduplicates_one_col(self):
""" Test for evaluation that has duplicate in one column """
df = pd.DataFrame(
[
{"a": 1, "b": 1, "c": 5},
{"a": 1, "b": 2, "c": 4},
{"a": 3, "b": 3, "c": 3},
{"a": 4, "b": 4, "c": 2},
{"a": 5, "b": 5, "c": 1},
]
)
eval1 = Evaluation(df)
pipeline = [CleanDuplicates(["a"])]
result = eval1.process(pipeline).get_df()
expected = df.drop(1)
assert_frame_equal(result, expected, check_like=True)
def test_cleanduplicates_multi_col(self):
"""
Test for evaluation that doesn't have duplicates when comparing
more than one column
"""
df = pd.DataFrame(
[
{"a": 1, "b": 1, "c": 5},
{"a": 1, "b": 2, "c": 4},
{"a": 3, "b": 3, "c": 3},
{"a": 4, "b": 4, "c": 2},
{"a": 5, "b": 5, "c": 1},
]
)
eval1 = Evaluation(df)
pipeline = [CleanDuplicates(["a", "b"])]
result2 = eval1.process(pipeline).get_df()
assert_frame_equal(result2, df, check_like=True)
def test_cleanduplicates_sorting(self):
"""
Test by sorting before removing duplicate.
"""
df = pd.DataFrame(
[
{"a": 1, "b": 1, "c": 5},
{"a": 1, "b": 2, "c": 4},
{"a": 3, "b": 3, "c": 3},
{"a": 4, "b": 4, "c": 2},
{"a": 5, "b": 5, "c": 1},
]
)
eval1 = Evaluation(df)
pipeline = [CleanDuplicates(["a"], ["c"])]
result = eval1.process(pipeline).get_df() # will remove idx 1
expected = df.drop(1)
assert_frame_equal(result, expected, check_like=True)
pipeline = [CleanDuplicates(["a"], ["c"], reverse_sort=True)]
result = eval1.process(pipeline).get_df() # will remove idx 0
expected = df.drop(0).sort_index(level=0, ascending=False)
assert_frame_equal(result, expected, check_like=True)
def test_addnormalizedcolumn(self):
""" Test whether normalized column is added """
df = pd.DataFrame(
[
{"group": "a", "value": 10},
{"group": "a", "value": 5},
{"group": "a", "value": 3},
{"group": "b", "value": 100},
{"group": "b", "value": 31},
]
)
eval1 = Evaluation(df, eval_id=10)
pipeline = [AddNormalizedColumn("group", "value", "normalized")]
result = eval1.process(pipeline)
expected = pd.DataFrame(
[
{"group": "a", "value": 10, "normalized": 1.0},
{"group": "a", "value": 5, "normalized": 0.5},
{"group": "a", "value": 3, "normalized": 0.3},
{"group": "b", "value": 100, "normalized": 1.0},
{"group": "b", "value": 31, "normalized": 0.31},
]
)
assert_frame_equal(result.get_df(), expected)
assert result.get_eval_id() == 10
def test_addnormalizedcolumn_direction(self):
""" Test whether normalized column direction parameter works """
df = pd.DataFrame(
[
{"group": "a", "value": 10},
{"group": "a", "value": 5},
{"group": "a", "value": 3},
{"group": "b", "value": 100},
{"group": "b", "value": 31},
]
)
eval1 = Evaluation(df, eval_id=10)
# direction is -1 => normalize around minimum
pipeline = [AddNormalizedColumn("group", "value", "normalized", Direction.MINIMIZE)]
result = eval1.process(pipeline)
expected = pd.DataFrame(
[
{"group": "a", "value": 10, "normalized": 10/3},
{"group": "a", "value": 5, "normalized": 5/3},
{"group": "a", "value": 3, "normalized": 1.0},
{"group": "b", "value": 100, "normalized": 100/31},
{"group": "b", "value": 31, "normalized": 1.0},
]
)
assert_frame_equal(result.get_df(), expected)
assert result.get_eval_id() == 10
def test_expandcolumn(self):
""" Test whether the column is expanded """
df = pd.DataFrame(
[
{"group": "a", "value": 10},
{"group": "a", "value": 5},
{"group": "a", "value": 3},
{"group": "b", "value": 100},
{"group": "b", "value": 31},
]
)
eval1 = Evaluation(df, eval_id=10)
mapping = {
"a": ("a", "x"),
"b": ("b", "y"),
}
pipeline = [ExpandColumn("group", ["group1", "group2"], mapping)]
result = eval1.process(pipeline)
expected = pd.DataFrame(
[
{"group": "a", "group1": "a", "group2": "x", "value": 10},
{"group": "a", "group1": "a", "group2": "x", "value": 5},
{"group": "a", "group1": "a", "group2": "x", "value": 3},
{"group": "b", "group1": "b", "group2": "y", "value": 100},
{"group": "b", "group1": "b", "group2": "y", "value": 31},
]
)
assert_frame_equal(result.get_df(), expected, check_like=True)
assert result.get_eval_id() == 10
def test_reindex(self):
""" Test whether the dataframe was reindexed """
df = pd.DataFrame(
[
{"group": "a", "key": "a", "value": 10},
{"group": "a", "key": "b", "value": 5},
{"group": "a", "key": "c", "value": 3},
{"group": "b", "key": "d", "value": 100},
{"group": "b", "key": "e", "value": 31},
]
)
eval1 = Evaluation(df, eval_id=10)
pipeline = [Reindex(["value"])]
result = eval1.process(pipeline)
expected_index = pd.Index([10, 5, 3, 100, 31], name="value")
assert_index_equal(result.get_df().index, expected_index)
assert result.get_eval_id() == 10
pipeline = [Reindex(["group", "key"])]
result = eval1.process(pipeline)
arrays = [["a", "a", "a", "b", "b"], ["a", "b", "c", "d", "e"]]
expected_index = pd.MultiIndex.from_arrays(arrays, names=("group", "key"))
assert_index_equal(result.get_df().index, expected_index)
assert result.get_eval_id() == 10
def test_sortindex(self):
""" Test whether the dataframe is sorted by index """
df = pd.DataFrame(
data = [
{"group": "a", "value": 10},
{"group": "a", "value": 5},
{"group": "a", "value": 3},
{"group": "b", "value": 100},
{"group": "b", "value": 31},
],
index = pd.Index([
5,
4,
3,
2,
1,
], name="idx")
)
eval1 = Evaluation(df, eval_id=10)
pipeline = [SortIndex(["idx"])]
result = eval1.process(pipeline)
expected = pd.DataFrame(
data = [
{"group": "b", "value": 31},
{"group": "b", "value": 100},
{"group": "a", "value": 3},
{"group": "a", "value": 5},
{"group": "a", "value": 10},
],
index = pd.Index([
1,
2,
3,
4,
5,
], name="idx")
)
assert_frame_equal(result.get_df(), expected)
assert result.get_eval_id() == 10
def test_normalizearound(self):
"""
Test whether all values are normalized around a certain (set of) rows
"""
arrays = [
["blinky", "blinky", "blinky", "ibex", "ibex"],
["yosys", "yosys", "vivado", "yosys", "vivado"],
]
index = pd.MultiIndex.from_arrays(arrays, names=("project", "synthesis_tool"))
df = pd.DataFrame(
data=[
{"group": "b", "value": 0},
{"group": "b", "value": 50},
{"group": "b", "value": 100},
{"group": "a", "value": 0},
{"group": "a", "value": 10},
],
index=index
)
eval1 = Evaluation(df, eval_id=10)
normalize_direction = {
"value": Direction.MINIMIZE
}
pipeline = [NormalizeAround(
normalize_direction,
group_by="project",
idx_name="synthesis_tool",
idx_value="vivado"
)]
result = eval1.process(pipeline)
expected = pd.DataFrame(
data=[
{"group": "b", "value": 0},
{"group": "b", "value": 0.25},
{"group": "b", "value": 0.5},
{"group": "a", "value": 0},
{"group": "a", "value": 0.5},
],
index=index
)
assert_frame_equal(result.get_df(), expected)
assert result.get_eval_id() == 10
def test_normalizearound_negated(self):
"""
Test whether all values are normalized in the correct direction based
on the negation.
"""
arrays = [
["blinky", "blinky", "blinky", "ibex", "ibex"],
["yosys", "yosys", "vivado", "yosys", "vivado"],
]
index = pd.MultiIndex.from_arrays(arrays, names=("project", "synthesis_tool"))
df = pd.DataFrame(
data=[
{"group": "b", "value": 0},
{"group": "b", "value": 50},
{"group": "b", "value": 100},
{"group": "a", "value": 0},
{"group": "a", "value": 10},
],
index=index
)
eval1 = Evaluation(df, eval_id=10)
normalize_direction = {
"value": Direction.MAXIMIZE
}
pipeline = [NormalizeAround(
normalize_direction,
group_by="project",
idx_name="synthesis_tool",
idx_value="vivado"
)]
result = eval1.process(pipeline)
expected = pd.DataFrame(
data=[
{"group": "b", "value": 1},
{"group": "b", "value": 0.75},
{"group": "b", "value": 0.5},
{"group": "a", "value": 1},
{"group": "a", "value": 0.5},
],
index=index
)
assert_frame_equal(result.get_df(), expected)
assert result.get_eval_id() == 10
def test_normalize(self):
"""
Test whether all values are normalized
"""
df = pd.DataFrame(
data=[
{"group": "b", "value": -50},
{"group": "b", "value": 50},
{"group": "b", "value": 100},
{"group": "a", "value": 0},
{"group": "a", "value": 10},
]
)
eval1 = Evaluation(df, eval_id=10)
normalize_direction = {
"value": Direction.MINIMIZE
}
pipeline = [Normalize(normalize_direction)]
result = eval1.process(pipeline)
expected = pd.DataFrame(
data=[
{"group": "b", "value": 0.25},
{"group": "b", "value": 0.75},
{"group": "b", "value": 1.0},
{"group": "a", "value": 0.5},
{"group": "a", "value": 0.55},
],
)
assert_frame_equal(result.get_df(), expected)
assert result.get_eval_id() == 10
def test_normalize_negated(self):
"""
Test whether all values are normalized
"""
df = pd.DataFrame(
data=[
{"group": "b", "value": -50},
{"group": "b", "value": 50},
{"group": "b", "value": 100},
{"group": "a", "value": 0},
{"group": "a", "value": 10},
]
)
eval1 = Evaluation(df)
normalize_direction = {
"value": -1
}
pipeline = [Normalize(normalize_direction)]
result = eval1.process(pipeline).get_df()
expected = pd.DataFrame(
data=[
{"group": "b", "value": 0.75},
{"group": "b", "value": 0.25},
{"group": "b", "value": 0},
{"group": "a", "value": 0.5},
{"group": "a", "value": 0.45},
],
)
assert_frame_equal(result, expected)
def test_relativediff(self):
"""
Test if difference is correct
"""
a = pd.DataFrame(
data=[
{"a": 2, "b": 2},
{"a": 5, "b": 10},
]
)
b = pd.DataFrame(
data=[
{"a": 4, "b": 1},
{"a": 20, "b": 1},
]
)
a_eval = Evaluation(a)
b_eval = Evaluation(b)
diff = b_eval.process([RelativeDiff(a_eval)])
result = diff.get_df()
expected = pd.DataFrame(
data=[
{"a": 1.0, "b": -0.5},
{"a": 3.0, "b": -0.9},
]
)
assert_frame_equal(expected, result)
def test_filterbyindex_multindex(self):
""" tests if filtering by index works for multi-index dataframe """
# test dataframe
# {"group": "a", "key": "a", "value": 10},
# {"group": "a", "key": "b", "value": 5},
# {"group": "a", "key": "c", "value": 3},
# {"group": "b", "key": "d", "value": 100},
# {"group": "b", "key": "e", "value": 31}
idx_arrays = [["a", "a", "a", "b", "b"], ["a", "b", "c", "d", "e"]]
index = pd.MultiIndex.from_arrays(idx_arrays, names=("group", "key"))
df = pd.DataFrame({"value": [10, 5, 3, 100, 31]}, index=index)
eval1 = Evaluation(df, eval_id=10)
# filter by first index
pipeline = [FilterByIndex("group", "a")]
result = eval1.process(pipeline)
expected_index = pd.Index(["a", "b", "c"], name="key")
expected_df = pd.DataFrame({"value": [10, 5, 3]}, index=expected_index)
assert_frame_equal(result.get_df(), expected_df)
assert result.get_eval_id() == 10
def test_filterbyindex_singleindex(self):
""" tests if filtering by index works for single-index dataframe """
# test dataframe
# {"group": "a", "key": "a", "value": 10},
# {"group": "a", "key": "b", "value": 5},
# {"group": "a", "key": "c", "value": 3},
# {"group": "b", "key": "d", "value": 100},
# {"group": "b", "key": "e", "value": 31}
idx_array = ["a", "a", "a", "b", "b"]
index = pd.Index(idx_array, name="key")
df = pd.DataFrame({"value": [10, 5, 3, 100, 31]}, index=index)
eval1 = Evaluation(df, eval_id=10)
# filter by first index
pipeline = [FilterByIndex("key", "a")]
result = eval1.process(pipeline)
expected_index = pd.Index(["a", "a", "a"], name="key")
expected_df = pd.DataFrame({"value": [10, 5, 3]}, index=expected_index)
assert_frame_equal(result.get_df(), expected_df)
assert result.get_eval_id() == 10
""" tests if filtering by index works for single-index dataframe """
def test_removebyindex_multindex(self):
""" tests if removing by index works for multi-index dataframe """
# test dataframe
# {"group": "a", "key": "a", "value": 10},
# {"group": "a", "key": "b", "value": 5},
# {"group": "a", "key": "c", "value": 3},
# {"group": "b", "key": "d", "value": 100},
# {"group": "b", "key": "e", "value": 31}
idx_arrays = [["a", "a", "a", "b", "b"], ["a", "b", "c", "d", "e"]]
index = pd.MultiIndex.from_arrays(idx_arrays, names=("group", "key"))
df = pd.DataFrame({"value": [10, 5, 3, 100, 31]}, index=index)
eval1 = Evaluation(df, eval_id=10)
expected_idx_arrays = [["b", "b"], ["d", "e"]]
expected_index = pd.MultiIndex.from_arrays(expected_idx_arrays, names=("group", "key"))
expected_df = pd.DataFrame({"value": [100, 31]}, index=expected_index)
# filter by first index
pipeline = [RemoveByIndex("group", ["a"])]
result_group = eval1.process(pipeline)
# filter by first index
pipeline = [RemoveByIndex("key", ["a", "b", "c"])]
result_key = eval1.process(pipeline)
assert_frame_equal(result_group.get_df(), expected_df)
assert_frame_equal(result_key.get_df(), expected_df)
assert result_group.get_eval_id() == 10
assert result_key.get_eval_id() == 10
def test_removebyindex_singleindex(self):
""" tests if removing by index works for single-index dataframe """
# test dataframe
# {"key": "a", "value": 10},
# {"key": "a", "value": 5},
# {"key": "a", "value": 3},
# {"key": "b", "value": 100},
# {"key": "b", "value": 31}
idx_array = ["a", "a", "a", "b", "b"]
index = pd.Index(idx_array, name="key")
df = pd.DataFrame({"value": [10, 5, 3, 100, 31]}, index=index)
eval1 = Evaluation(df, eval_id=10)
expected_idx_array = ["b", "b"]
expected_index = pd.Index(expected_idx_array, name="key")
expected_df = pd.DataFrame({"value": [100, 31]}, index=expected_index)
# filter by first index
pipeline = [RemoveByIndex("key", ["a"])]
result = eval1.process(pipeline)
assert_frame_equal(result.get_df(), expected_df)
assert result.get_eval_id() == 10
def test_aggregate(self):
""" Test aggregate processor with custom aggregator functions """
df = pd.DataFrame(
[
{"a": 1, "b": 1, "c": 5},
{"a": 1, "b": 2, "c": 4},
{"a": 3, "b": 3, "c": 3},
{"a": 4, "b": 4, "c": 2},
{"a": 5, "b": 5, "c": 1},
]
)
eval1 = Evaluation(df, eval_id=20)
pipeline = [Aggregate(lambda x: x.sum())]
result = eval1.process(pipeline)
expected_df = pd.DataFrame(
[
{"a": 14, "b": 15, "c": 15}
]
)
assert_frame_equal(result.get_df(), expected_df)
assert eval1.get_eval_id() == 20
pipeline2 = [Aggregate(lambda x: x.product())]
result2 = eval1.process(pipeline2)
expected_df2 = pd.DataFrame(
[
{"a": 60, "b": 120, "c": 120}
]
)
assert_frame_equal(result2.get_df(), expected_df2)
assert result2.get_eval_id() == 20
def test_aggregate_exclude_nonnumeric(self):
""" Check if aggregate processor excludes fields that are non-numeric """
df = pd.DataFrame(
[
{"a": 1, "b": 1, "c": "a"},
{"a": 1, "b": 2, "c": "b"},
{"a": 3, "b": 3, "c": "c"},
{"a": 4, "b": 4, "c": "d"},
{"a": 5, "b": 5, "c": "e"},
]
)
eval1 = Evaluation(df, eval_id=20)
pipeline = [Aggregate(lambda x: x.sum())]
result = eval1.process(pipeline)
expected_df = pd.DataFrame(
[
{"a": 14, "b": 15}
]
)
assert_frame_equal(result.get_df(), expected_df)
assert eval1.get_eval_id() == 20
def test_geomean_aggregate(self):
""" Test built-in geomean aggregator """
df = pd.DataFrame(
[
{"a": 1, "b": 1, "c": 5},
{"a": 1, "b": 2, "c": 4},
{"a": 3, "b": 3, "c": 3},
{"a": 4, "b": 4, "c": 2},
{"a": 5, "b": 5, "c": 1},
]
)
eval1 = Evaluation(df, eval_id=20)
pipeline = [GeomeanAggregate()]
eval1 = eval1.process(pipeline)
expected_a = (1 * 1 * 3 * 4 * 5) ** (1/5)
expected_b = expected_c = (1 * 2 * 3 * 4 * 5) ** (1/5)
expected_df = pd.DataFrame(
[
{"a": expected_a, "b": expected_b, "c": expected_c}
]
)
assert_frame_equal(eval1.get_df(), expected_df)
assert eval1.get_eval_id() == 20
def test_comparetofirst(self):
""" Test if CompareToFirst works with default params """
df = pd.DataFrame(
[
{"a": 1, "b": 5},
{"a": 2, "b": 4},
{"a": 3, "b": 3},
{"a": 4, "b": 2},
{"a": 5, "b": 1},
]
)
eval1 = Evaluation(df, eval_id=20)
direction = {
"a": Direction.MAXIMIZE,
"b": Direction.MAXIMIZE
}
pipeline = [CompareToFirst(direction)]
eval1 = eval1.process(pipeline)
expected_df = pd.DataFrame(
[
{"a": 1, "a.relative": 1.0/1, "b": 5, "b.relative": 5.0/5},
{"a": 2, "a.relative": 2.0/1, "b": 4, "b.relative": 4.0/5},
{"a": 3, "a.relative": 3.0/1, "b": 3, "b.relative": 3.0/5},
{"a": 4, "a.relative": 4.0/1, "b": 2, "b.relative": 2.0/5},
{"a": 5, "a.relative": 5.0/1, "b": 1, "b.relative": 1.0/5},
]
)
assert_frame_equal(eval1.get_df(), expected_df)
assert eval1.get_eval_id() == 20
def test_comparetofirst_dir_subset(self):
""" Test if CompareToFirst works with different direction and subset"""
df = pd.DataFrame(
[
{"a": 1, "b": 5},
{"a": 2, "b": 4},
{"a": 3, "b": 3},
{"a": 4, "b": 2},
{"a": 5, "b": 1},
]
)
eval1 = Evaluation(df, eval_id=20)
direction = {
"a": Direction.MINIMIZE
}
pipeline = [CompareToFirst(direction)]
eval1 = eval1.process(pipeline)
expected_df = pd.DataFrame(
[
{"a": 1, "a.relative": 1.0/1},
{"a": 2, "a.relative": 1.0/2},
{"a": 3, "a.relative": 1.0/3},
{"a": 4, "a.relative": 1.0/4},
{"a": 5, "a.relative": 1.0/5},
]
)
assert_frame_equal(eval1.get_df(), expected_df)
assert eval1.get_eval_id() == 20
def test_comparetofirst_suffix(self):
""" Test if CompareToFirst works with different suffix """
df = pd.DataFrame(
[
{"a": 1, "b": 5},
{"a": 2, "b": 4},
{"a": 3, "b": 3},
{"a": 4, "b": 2},
{"a": 5, "b": 1},
]
)
eval1 = Evaluation(df, eval_id=20)
direction = {
"a": Direction.MAXIMIZE,
"b": Direction.MAXIMIZE
}
pipeline = [CompareToFirst(direction, suffix=".diff")]
eval1 = eval1.process(pipeline)
expected_df = pd.DataFrame(
[
{"a": 1, "a.diff": 1.0/1, "b": 5, "b.diff": 5.0/5},
{"a": 2, "a.diff": 2.0/1, "b": 4, "b.diff": 4.0/5},
{"a": 3, "a.diff": 3.0/1, "b": 3, "b.diff": 3.0/5},
{"a": 4, "a.diff": 4.0/1, "b": 2, "b.diff": 2.0/5},
{"a": 5, "a.diff": 5.0/1, "b": 1, "b.diff": 1.0/5},
]
)
assert_frame_equal(eval1.get_df(), expected_df)
assert eval1.get_eval_id() == 20
|
|
#!/usr/bin/env python
'''
Ansible module for zabbix items
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix item ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_value_type(value_type):
'''
Possible values:
0 - numeric float;
1 - character;
2 - log;
3 - numeric unsigned;
4 - text
'''
vtype = 0
if 'int' in value_type:
vtype = 3
elif 'char' in value_type:
vtype = 1
elif 'str' in value_type:
vtype = 4
return vtype
def get_app_ids(application_names, app_name_ids):
''' get application ids from names
'''
applications = []
if application_names:
for app in application_names:
applications.append(app_name_ids[app])
return applications
def get_template_id(zapi, template_name):
'''
get related templates
'''
template_ids = []
app_ids = {}
# Fetch templates by name
content = zapi.get_content('template',
'get',
{'search': {'host': template_name},
'selectApplications': ['applicationid', 'name']})
if content.has_key('result'):
template_ids.append(content['result'][0]['templateid'])
for app in content['result'][0]['applications']:
app_ids[app['name']] = app['applicationid']
return template_ids, app_ids
def get_multiplier(inval):
''' Determine the multiplier
'''
if inval == None or inval == '':
return None, 0
rval = None
try:
rval = int(inval)
except ValueError:
pass
if rval:
return rval, 1
return rval, 0
# The branches are needed for CRUD and error handling
# pylint: disable=too-many-branches
def main():
'''
ansible zabbix module for zbx_item
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
template_name=dict(default=None, type='str'),
zabbix_type=dict(default=2, type='int'),
value_type=dict(default='int', type='str'),
multiplier=dict(default=None, type='str'),
description=dict(default=None, type='str'),
units=dict(default=None, type='str'),
applications=dict(default=None, type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'item'
state = module.params['state']
templateid, app_name_ids = get_template_id(zapi, module.params['template_name'])
# Fail if a template was not found matching the name
if not templateid:
module.exit_json(failed=True,
changed=False,
results='Error: Could find template with name %s for item.' % module.params['template_name'],
state="Unkown")
content = zapi.get_content(zbx_class_name,
'get',
{'search': {'key_': module.params['key']},
'selectApplications': 'applicationid',
'templateids': templateid,
})
#******#
# GET
#******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
#******#
# DELETE
#******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']])
module.exit_json(changed=True, results=content['result'], state="absent")
# Create and Update
if state == 'present':
formula, use_multiplier = get_multiplier(module.params['multiplier'])
params = {'name': module.params.get('name', module.params['key']),
'key_': module.params['key'],
'hostid': templateid[0],
'type': module.params['zabbix_type'],
'value_type': get_value_type(module.params['value_type']),
'applications': get_app_ids(module.params['applications'], app_name_ids),
'formula': formula,
'multiplier': use_multiplier,
'description': module.params['description'],
'units': module.params['units'],
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
#******#
# CREATE
#******#
if not exists(content):
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('error'):
module.exit_json(failed=True, changed=True, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state='present')
########
# UPDATE
########
_ = params.pop('hostid', None)
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'applications':
app_ids = [item['applicationid'] for item in zab_results[key]]
if set(app_ids) != set(value):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences['itemid'] = zab_results['itemid']
content = zapi.get_content(zbx_class_name, 'update', differences)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
|
from __builtin__ import enumerate
import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file,sweeps
from kid_readout.analysis.resonator import fit_best_resonator
mmw_source_frequency = np.nan
source_on_freq_scale = 0.993 # nominally 1 if low-ish power
ri = baseband.RoachBaseband()
f0s = np.load('/home/gjones/readout/kid_readout/apps/sc5x4_0813f12.npy')
f0s.sort()
f0s = f0s[[0,1,2,3,4,5,6,7,8,9,10,13,14,15,16,17]] # remove close packed resonators to enable reading out all simultaneously
suffix = "dark"
mmw_source_modulation_freq = np.nan
mmw_atten_turns = (np.nan,np.nan)
def source_on():
ri.set_modulation_output(rate='low')
def source_off():
ri.set_modulation_output(rate='high')
def source_modulate(rate=7):
return ri.set_modulation_output(rate=rate)
nf = len(f0s)
atonce = 16
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
nsamp = 2**18
step = 1
nstep = 80
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-(nstep+1),(nstep+1))*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([offsets.min()-20e-3,],offsets,[offsets.max()+20e-3]))
print f0s
print offsets*1e6
print len(f0s)
if False:
from kid_readout.equipment.parse_srs import get_all_temperature_data
while True:
temp = get_all_temperature_data()[1][-1]
print "mk stage at", temp
if temp > 0.348:
break
time.sleep(300)
time.sleep(600)
start = time.time()
max_fit_error = 0.5
use_fmin = False
attenlist = [45,43,41,39,37,35,33,31,29]
while True:
print "*"*40
print "Hit enter to take a data set"
mmw_atten_str = raw_input("start: ")
if mmw_atten_str == 'exit':
break
nsamp = 2**18
step = 1
nstep = 80
offset_bins = np.arange(-(nstep+1),(nstep+1))*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([offsets.min()-20e-3,],offsets,[offsets.max()+20e-3]))
source_off()
print "setting attenuator to",attenlist[0]
ri.set_dac_attenuator(attenlist[0])
f0binned = np.round(f0s*source_on_freq_scale*nsamp/512.0)*512.0/nsamp
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded waveforms in", (time.time()-start),"seconds"
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=1)
orig_sweep_data = sweep_data
meas_cfs = []
idxs = []
delays = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]*source_on_freq_scale
res = fit_best_resonator(fr[1:-1],s21[1:-1],errors=errors[1:-1]) #Resonator(fr,s21,errors=errors)
delay = res.delay
delays.append(delay)
s21 = s21*np.exp(2j*np.pi*res.delay*fr)
res = fit_best_resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0, "delay",delay,"resid delay",res.delay
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > max_fit_error:
if abs(fmin - thiscf) > max_fit_error:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
delay = np.median(delays)
print "median delay is ",delay
nsamp = 2**22
step = 1
offset_bins = np.array([-8,-4,-2,-1,0,1,2,4])
offset_bins = np.concatenate(([-40,-20],offset_bins,[20,40]))
offsets = offset_bins*512.0/nsamp
meas_cfs = np.array(meas_cfs)
f0binned_meas = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
f0s = f0binned_meas
measured_freqs = sweeps.prepare_sweep(ri,f0binned_meas,offsets,nsamp=nsamp)
print "loaded updated waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
df = data_file.DataFile(suffix=suffix)
df.nc.mmw_atten_turns=mmw_atten_turns
for k,atten in enumerate(attenlist):
ri.set_dac_attenuator(atten)
print "measuring at attenuation", atten
df.log_hw_state(ri)
if k != 0:
orig_sweep_data = None
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=1, sweep_data=orig_sweep_data)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]*source_on_freq_scale
s21 = s21*np.exp(2j*np.pi*delay*fr)
res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if k != 0 or use_fmin:
print "using fmin"
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > max_fit_error:
if abs(fmin - thiscf) > max_fit_error:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
if k == 0:
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
else:
best_bank = (np.abs((ri.tone_bins[:,0]*ri.fs/ri.tone_nsamp)-meas_cfs[0]).argmin())
print "using bank",best_bank
print "offsets:", ((ri.tone_bins[best_bank,:]*ri.fs/ri.tone_nsamp)-meas_cfs)
ri.select_bank(best_bank)
ri._sync()
time.sleep(0.5)
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod,addr = ri.get_data_seconds(30)
x = np.nan
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg, mmw_source_freq=mmw_source_frequency,
mmw_source_modulation_freq=mmw_source_modulation_freq,
zbd_voltage=x)
df.sync()
print "done with sweep"
print "completed in",((time.time()-start)/60.0),"minutes"
|
|
"""
Unittests for iterative_repair module.
"""
import json
import unittest
import networkx as nx
from networkx.readwrite import json_graph
from stitcher import iterative_repair
def _sample_data():
cont = nx.DiGraph()
cont.add_node('1', **{'type': 'a', 'group': 'foo', 'rank': 1.0})
cont.add_node('2', **{'type': 'b', 'group': 'foo', 'rank': 1.0})
cont.add_node('3', **{'type': 'b', 'group': 'bar', 'rank': 2.0})
cont.add_node('4', **{'type': 'a', 'group': 'bar', 'rank': 2.0})
cont.add_edge('1', '2')
cont.add_edge('2', '3')
cont.add_edge('4', '3')
req = nx.DiGraph()
req.add_node('a', **{'type': 'x'})
req.add_node('b', **{'type': 'y'})
req.add_edge('a', 'b')
return cont, req
class IterativeRepairStitcherTest(unittest.TestCase):
"""
Test for class IterativeRepairStitcher.
"""
def setUp(self) -> None:
container_tmp = json.load(open('data/container.json'))
self.container = json_graph.node_link_graph(container_tmp,
directed=True)
request_tmp = json.load(open('data/request.json'))
self.request = json_graph.node_link_graph(request_tmp,
directed=True)
rels = json.load(open('data/stitch.json'))
self.cut = iterative_repair.IterativeRepairStitcher(rels)
# Test for success.
def test_stitch_for_success(self):
"""
Test fo success.
"""
self.cut.stitch(self.container, self.request)
def test_find_conflicts_for_success(self):
"""
Test for success.
"""
cont, req = _sample_data()
condy = {'attributes': [('eq', ('a', ('foo', 'bar')))]}
self.cut.find_conflicts(cont, req, condy, {'a': '1'})
def test_next_conflict_for_success(self):
"""
Test for success.
"""
self.cut.next_conflict([('foo', 'bar'), ('bar', 'foo')])
def test_fix_for_success(self):
"""
Test for success.
"""
self.cut.fix_conflict(('k', ('eq', ('rank', 5))),
self.container,
self.request,
{'k': 'A'})
# Test for failure.
def test_stitch_for_failure(self):
"""
Test for failure.
"""
cont = nx.DiGraph()
cont.add_node('foo', **{'type': 'a'})
req = nx.DiGraph()
req.add_node('bar', **{'type': 'y'}) # no matching type in container.
self.assertRaises(Exception, self.cut.stitch, cont, req)
# test with unsolvable case.
cont, req = _sample_data()
res = self.cut.stitch(cont, req, {
'attributes':
[('eq', ('a', ('buuha', 'asdf')))]
})
self.assertTrue(len(res) == 0)
# Test for sanity.
def test_stitch_for_sanity(self):
"""
Test for sanity.
"""
condy = {
'attributes': [('eq', ('k', ('rank', 5)))]
}
res = self.cut.stitch(self.container, self.request, conditions=condy)
# TODO: test with multigraph request!
self.assertIsInstance(res, list)
self.assertIsInstance(res[0], nx.DiGraph)
def test_find_conflicts_for_sanity(self):
"""
Test for sanity.
"""
cont, req = _sample_data()
# a doesn't have foo attr.
condy = {'a': [('eq', ('foo', 'bar'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a is in group foo
condy = {'a': [('neq', ('group', 'foo'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's rank is 1.0
condy = {'a': [('lt', ('rank', 0.5))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's rank is 1.0
condy = {'a': [('gt', ('rank', 2.0))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's group name is a word
condy = {'a': [('regex', ('group', '\\d'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not on same node...
condy = {'a': [('same', 'b')], 'b': [('same', 'a')]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '2'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not on same node...
condy = {'a': [('diff', 'b')], 'b': [('diff', 'a')]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not in same group
condy = {'a': [('share', ('group', ['b']))],
'b': [('share', ('group', ['a']))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '3'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b in same group
condy = {'a': [('nshare', ('group', ['b']))],
'b': [('nshare', ('group', ['a']))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '2'})
self.assertEqual(condy['a'][0], res[0][1])
def test_next_conflict_for_sanity(self):
"""
Test for sanity.
"""
res = self.cut.next_conflict(['foo', 'bar'])
self.assertIsNotNone(res)
def test_fix_for_sanity(self):
"""
Test for sanity.
"""
cont, req = _sample_data()
mapping = {'a': '1'}
self.cut.fix_conflict(('a', ('eq', ('foo', 'bar'))), cont, req,
mapping)
self.assertIn('a', mapping)
class TestConvertConditions(unittest.TestCase):
"""
Test the condition converter.
"""
def setUp(self) -> None:
self.cond = {
'attributes': [('eq', ('a', ('foo', 'y'))),
('neq', ('a', ('foo', 5))),
('lt', ('a', ('foo', 4))),
('lg', ('a', ('foo', 7))),
('regex', ('a', ('foo', '^a')))],
'compositions': [('same', ('1', '2')),
('diff', ('3', '4')),
('diff', ('3', '1')),
('share', ('group', ['x', 'y'])),
('nshare', ('group', ['a', 'b']))]
}
# Test for success.
def test_convert_for_success(self):
"""
Test for success.
"""
iterative_repair.convert_conditions(self.cond)
# Test for failure
# N/A
# Test for sanity.
def test_convert_for_sanity(self):
"""
Test for sanity.
"""
res = iterative_repair.convert_conditions(self.cond)
self.assertIn('a', res)
self.assertIn('b', res)
self.assertIn('x', res)
self.assertIn('y', res)
self.assertIn('1', res)
self.assertIn('2', res)
self.assertIn('3', res)
self.assertIn('4', res)
self.assertTrue(len(res['a']) == 6) # eq, neq, lt, lg, regex, nshare
self.assertTrue(len(res['b']) == 1) # nshare
self.assertTrue(len(res['x']) == 1) # share
self.assertTrue(len(res['y']) == 1) # share
self.assertTrue(len(res['1']) == 2) # same, diff
self.assertTrue(len(res['2']) == 1) # same
self.assertTrue(len(res['3']) == 2) # 2x diff
self.assertTrue(len(res['4']) == 1) # diff
|
|
from __future__ import absolute_import
from __future__ import print_function
import ujson
from django.http import HttpResponse
from mock import patch
from typing import Any, Dict, List, Text, Union
from zerver.lib.actions import (
do_change_is_admin,
do_set_realm_property,
do_deactivate_realm,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import tornado_redirected_to_list
from zerver.models import get_realm, Realm, UserProfile
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, user_profile, new_realm_name):
# type: (UserProfile, Text) -> None
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = u'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user('hamlet'), new_name)
def test_update_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip')
new_name = u'Puliz'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self):
# type: () -> None
realm = get_realm('zulip')
new_description = u'zulip dev group'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self):
# type: () -> None
email = self.example_email("iago")
self.login(email)
realm = get_realm('zulip')
new_description = u'zulip dev group'
data = dict(description=ujson.dumps(new_description))
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self):
# type: () -> None
new_description = u'A' * 1001
data = dict(description=ujson.dumps(new_description))
# create an admin user
email = self.example_email("iago")
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Realm description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
user_profile = self.example_user('othello')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_unauthorized_name_change(self):
# type: () -> None
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings/change'
result = self.client_post(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
def test_do_deactivate_realm_clears_user_realm_cache(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_deactivate_realm_on_deactived_realm(self):
# type: () -> None
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_change_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = self.example_email("iago")
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
class RealmAPITest(ZulipTestCase):
def setUp(self):
# type: () -> None
user_profile = self.example_user('cordelia')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, True)
def set_up_db(self, attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save()
def update_with_api(self, name, value):
# type: (str, Union[Text, int, bool]) -> Realm
result = self.client_patch('/json/realm', {name: ujson.dumps(value)})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def do_test_realm_update_api(self, name):
# type: (str) -> None
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests = [False, True] # type: List[bool]
test_values = dict(
add_emoji_by_admins_only=bool_tests,
create_stream_by_admins_only=bool_tests,
default_language=[u'de', u'en'],
description=[u'Realm description', u'New description'],
email_changes_disabled=bool_tests,
invite_required=bool_tests,
invite_by_admins_only=bool_tests,
inline_image_preview=bool_tests,
inline_url_embed_preview=bool_tests,
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
name_changes_disabled=bool_tests,
restricted_to_domain=bool_tests,
waiting_period_threshold=[10, 20],
) # type: Dict[str, Any]
vals = test_values.get(name)
if vals is None:
raise AssertionError('No test created for %s' % (name))
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self):
# type: () -> None
for prop in Realm.property_types:
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self):
# type: () -> None
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
|
|
"""
Test suite for socketserver.
"""
import contextlib
import imp
import os
import select
import signal
import socket
import select
import errno
import tempfile
import unittest
import socketserver
import test.support
from test.support import reap_children, reap_threads, verbose
try:
import threading
except ImportError:
threading = None
test.support.requires("network")
TEST_STR = b"hello world\n"
HOST = test.support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
HAVE_FORKING = hasattr(os, "fork") and os.name != "os2"
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=20):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError("timed out on %r" % (sock,))
if HAVE_UNIX_SOCKETS:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
pid = os.fork()
if pid == 0:
# Don't throw an exception; it would be caught by the test harness.
os._exit(72)
yield None
pid2, status = os.waitpid(pid, 0)
testcase.assertEqual(pid2, pid)
testcase.assertEqual(72 << 8, status)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except os.error:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
if os.name == 'os2':
dir = '\socket'
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
if os.name == 'os2':
# AF_UNIX socket names on OS/2 require a specific prefix
# which can't include a drive letter and must also use
# backslashes as directory separators
if fn[1] == ':':
fn = fn[2:]
if fn[0] in (os.sep, os.altsep):
fn = fn[1:]
if os.sep == '/':
fn = fn.replace(os.sep, os.altsep)
else:
fn = fn.replace(os.altsep, os.sep)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print("creating server")
server = MyServer(addr, MyHandler)
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print("ADDR =", addr)
print("CLASS =", svrcls)
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
for i in range(3):
if verbose: print("test client", i)
testfunc(svrcls.address_family, addr)
if verbose: print("waiting for server")
server.shutdown()
t.join()
server.server_close()
if verbose: print("done")
def stream_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_STREAM)
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def dgram_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_DGRAM)
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def test_TCPServer(self):
self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(socketserver.ThreadingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
if HAVE_UNIX_SOCKETS:
def test_UnixStreamServer(self):
self.run_server(socketserver.UnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingUnixStreamServer(self):
self.run_server(socketserver.ThreadingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(socketserver.UDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(socketserver.ThreadingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
if HAVE_FORKING:
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@contextlib.contextmanager
def mocked_select_module(self):
"""Mocks the select.select() call to raise EINTR for first call"""
old_select = select.select
class MockSelect:
def __init__(self):
self.called = 0
def __call__(self, *args):
self.called += 1
if self.called == 1:
# raise the exception on first call
raise OSError(errno.EINTR, os.strerror(errno.EINTR))
else:
# Return real select value for consecutive calls
return old_select(*args)
select.select = MockSelect()
try:
yield select.select
finally:
select.select = old_select
def test_InterruptServerSelectCall(self):
with self.mocked_select_module() as mock_select:
pid = self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
# Make sure select was called again:
self.assertGreater(mock_select.called, 1)
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
# client address so this cannot work:
# if HAVE_UNIX_SOCKETS:
# def test_UnixDatagramServer(self):
# self.run_server(socketserver.UnixDatagramServer,
# socketserver.DatagramRequestHandler,
# self.dgram_examine)
#
# def test_ThreadingUnixDatagramServer(self):
# self.run_server(socketserver.ThreadingUnixDatagramServer,
# socketserver.DatagramRequestHandler,
# self.dgram_examine)
#
# if HAVE_FORKING:
# def test_ForkingUnixDatagramServer(self):
# self.run_server(socketserver.ForkingUnixDatagramServer,
# socketserver.DatagramRequestHandler,
# self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(socketserver.TCPServer):
pass
class MyHandler(socketserver.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
s.server_close()
def test_main():
if imp.lock_held():
# If the import lock is held, the threads will hang
raise unittest.SkipTest("can't run when import lock is held")
test.support.run_unittest(SocketServerTest)
if __name__ == "__main__":
test_main()
|
|
import StringIO
import json
import logging
import random
import urllib
import urllib2
import os
# from urllib.parse import urlparse
from time import gmtime, strftime
from datetime import datetime
from firebase import firebase
# for sending images
# from PIL import Image
# import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
# SQListBot Token
# TOKEN = '134200866:AAGSqcPJVNtMruJBGpFX-1PEGBwA6KYxfKs'
# Quantum Token
TOKEN = '279379002:AAGRWKf3V3mUtTt9Lg-t9OSSu7kp2mGdESE'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
# ================================
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
def listToString(invitees):
listString = ""
for invitee in invitees:
listString=listString+"\n"+invitee+" has been invited"
return listString
def createEvent():
db = firebase.FirebaseApplication('https://telegram-list-bot.firebaseio.com', None)
db.put('/events', 'TESTEVENT', {'key1': 'value1'}, {'key2': 'value2'})
# ================================
#Variables for list functionality
invitees=[]
isEventCreated=False
eventName="Test Event"
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(30)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(30)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(30)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(30)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg=None, img=None):
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
text = text.lower()
if text.startswith('/'):
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
# elif text == '/image':
# img = Image.new('RGB', (512, 512))
# base = random.randint(0, 16777216)
# pixels = [base+i*j for i in range(512) for j in range(512)]
# img.putdata(pixels)
# output = StringIO.StringIO()
# img.save(output, 'JPEG')
# reply(img=output.getvalue())
elif text == '/version':
reply('Version 3.0: Last updated 25.01.17')
elif text == '/createevent':
createEvent()
reply('Event Created!')
elif text == 'shiquan':
reply('DAS ME!')
elif '/generatelist' in text:
reply('Please set the event name:'
+'\nType /rsvp to respond to this event.'
+'\nType /viewresponses to view current event status.'
+'\nType /destroylist to terminate the existing event.')
isEventCreated=True
elif '/rsvp' in text:
if isEventCreated:
invitee = str(fr.get('first_name'))
invitees.append(invitee)
reply(invitee+' is going!')
else:
reply('There is no active event to rsvp to!')
elif '/viewresponses' in text:
if isEventCreated:
reply(listToString(invitees))
else:
reply('There is no active event to view!')
elif '/destroylist' in text:
if isEventCreated:
isEventCreated=False
reply(eventName+' terminated.')
else:
reply("There is no existing event to terminate!")
else:
reply('What command?')
elif 'who are you' in text:
reply('I am QUANTUM, created by Master LIM Shi Quan.')
elif 'what time' in text:
now = datetime.now()
reply("It is "+str((now.hour+8)%24)+":"+str(now.minute))
else:
if getEnabled(chat_id):
try:
resp1 = json.load(urllib2.urlopen('http://www.simsimi.com/requestChat?lc=en&ft=1.0&req=' + urllib.quote_plus(text.encode('utf-8'))))
back = resp1.get('res').get('msg')
except urllib2.HTTPError, err:
logging.error(err)
back = str(err)
if not back:
reply('okay...')
elif 'I HAVE NO RESPONSE' in back:
reply('you said something with no meaning')
else:
reply(back)
# reply("No Meaning")
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','co.coolelephant.tandemscroll.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','CoCoolelephantTandemscrollModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:29376")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:29376")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Memetic address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Memetic address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import uuid
from azure import (
_update_request_uri_query,
WindowsAzureError,
WindowsAzureBatchOperationError,
url_unquote,
DEFAULT_HTTP_TIMEOUT,
_ERROR_CANNOT_FIND_PARTITION_KEY,
_ERROR_CANNOT_FIND_ROW_KEY,
_ERROR_INCORRECT_TABLE_IN_BATCH,
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
_ERROR_BATCH_COMMIT_FAIL,
ETree,
_get_etree_text,
_etree_entity_feed_namespaces,
)
from azure.http import HTTPError, HTTPRequest, HTTPResponse
from azure.http.httpclient import _HTTPClient
from azure.storage import (
_update_storage_table_header,
METADATA_NS,
_sign_storage_table_request,
)
_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'
if sys.version_info < (3,):
def _new_boundary():
return str(uuid.uuid1())
else:
def _new_boundary():
return str(uuid.uuid1()).encode('utf-8')
class _BatchClient(_HTTPClient):
'''
This is the class that is used for batch operation for storage table
service. It only supports one changeset.
'''
def __init__(self, service_instance, account_key, account_name,
protocol='http', timeout=DEFAULT_HTTP_TIMEOUT):
_HTTPClient.__init__(self, service_instance, account_name=account_name,
account_key=account_key, protocol=protocol,
timeout=timeout)
self.is_batch = False
self.batch_requests = []
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
def get_request_table(self, request):
'''
Extracts table name from request.uri. The request.uri has either
"/mytable(...)" or "/mytable" format.
request:
the request to insert, update or delete entity
'''
if '(' in request.path:
pos = request.path.find('(')
return request.path[1:pos]
else:
return request.path[1:]
def get_request_partition_key(self, request):
'''
Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces)
if part_key is None:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return _get_etree_text(part_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2]
def get_request_row_key(self, request):
'''
Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces)
if row_key is None:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
return _get_etree_text(row_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key
def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request)
def validate_request_partition_key(self, request):
'''
Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != \
self.batch_partition_key:
raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request)
def validate_request_row_key(self, request):
'''
Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request:
the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request))
def begin_batch(self):
'''
Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = []
def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request:
the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request)
def commit_batch(self):
''' Resets batch flag and commits the batch requests. '''
if self.is_batch:
self.is_batch = False
self.commit_batch_requests()
def commit_batch_requests(self):
''' Commits the batch requests. '''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
# Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [
('Content-Type', 'multipart/mixed; boundary=' + \
batch_boundary.decode('utf-8')),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = b'--' + batch_boundary + b'\n'
request.body += b'Content-Type: multipart/mixed; boundary='
request.body += changeset_boundary + b'\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += b'--' + changeset_boundary + b'\n'
request.body += b'Content-Type: application/http\n'
request.body += b'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method.encode('utf-8')
request.body += b' http://'
request.body += batch_request.host.encode('utf-8')
request.body += batch_request.path.encode('utf-8')
request.body += b' HTTP/1.1\n'
request.body += b'Content-ID: '
request.body += str(content_id).encode('utf-8') + b'\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += \
b'Content-Type: application/atom+xml;type=entry\n'
for name, value in batch_request.headers:
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n'
break
request.body += b'Content-Length: '
request.body += str(len(batch_request.body)).encode('utf-8')
request.body += b'\n\n'
request.body += batch_request.body + b'\n'
else:
for name, value in batch_request.headers:
# If-Match should be already included in
# batch_request.headers, but in case it is missing,
# just add it.
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n\n'
break
else:
request.body += b'If-Match: *\n\n'
request.body += b'--' + changeset_boundary + b'--' + b'\n'
request.body += b'--' + batch_boundary + b'--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
# Submit the whole request as batch request.
response = self.perform_request(request)
if response.status >= 300:
raise HTTPError(response.status,
_ERROR_BATCH_COMMIT_FAIL,
self.respheader,
response.body)
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
# The body of a ChangeSet response is either a response for all the
# successfully processed change request within the ChangeSet,
# formatted exactly as it would have appeared outside of a batch,
# or a single response indicating a failure of the entire ChangeSet.
responses = self._parse_batch_response(response.body)
if responses and responses[0].status >= 300:
self._report_batch_error(responses[0])
def cancel_batch(self):
''' Resets the batch flag. '''
self.is_batch = False
def _parse_batch_response(self, body):
parts = body.split(b'--changesetresponse_')
responses = []
for part in parts:
httpLocation = part.find(b'HTTP/')
if httpLocation > 0:
response = self._parse_batch_response_part(part[httpLocation:])
responses.append(response)
return responses
def _parse_batch_response_part(self, part):
lines = part.splitlines();
# First line is the HTTP status/reason
status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
# Followed by headers and body
headers = []
body = b''
isBody = False
for line in lines[1:]:
if line == b'' and not isBody:
isBody = True
elif isBody:
body += line
else:
headerName, _, headerVal = line.partition(b':')
headers.append((headerName.lower(), headerVal))
return HTTPResponse(int(status), reason.strip(), headers, body)
def _report_batch_error(self, response):
doc = ETree.fromstring(response.body)
code_element = doc.find('./m:code', _etree_entity_feed_namespaces)
code = _get_etree_text(code_element) if code_element is not None else ''
message_element = doc.find('./m:message', _etree_entity_feed_namespaces)
message = _get_etree_text(message_element) if message_element is not None else ''
raise WindowsAzureBatchOperationError(message, code)
|
|
import collections
import csv
import random
from operator import itemgetter
from optparse import make_option
from os.path import basename, splitext
from django.core.management.base import BaseCommand, CommandError
from django.core.validators import URLValidator, ValidationError
from django.db.transaction import atomic
from django.utils import translation
from mpconstants.mozilla_languages import LANGUAGES
from mkt.constants.base import STATUS_PUBLIC
from mkt.constants.categories import CATEGORY_CHOICES_DICT
from mkt.constants.regions import REGIONS_CHOICES_ID_DICT, REGIONS_DICT
from mkt.tags.models import Tag
from mkt.translations.utils import to_language
from mkt.webapps.models import Installs, Webapp
from mkt.websites.models import Website, WebsitePopularity
from mkt.websites.tasks import fetch_icon
class ParsingError(Exception):
pass
class Command(BaseCommand):
"""
Usage:
python manage.py import_websites_from_csv <file>
"""
help = u'Import Websites from a CSV file'
args = u'<file> [--overwrite] [--limit] [--set-popularity]'
subcommand = splitext(basename(__file__))[0]
option_list = BaseCommand.option_list + (
make_option(
'--overwrite',
action='store_true',
dest='overwrite',
default=False,
help='Overwrite existing Website with the same Unique Moz ID. '
'Otherwise, any row with an existing Unique Moz ID in the '
'database will be skipped.',
),
make_option(
'--limit',
action='store',
type=int,
dest='limit',
default=None,
help='Maximum number of sites to import. Skipped websites do not '
'count towards the limit',
),
make_option(
'--set-popularity',
action='store_true',
dest='set_popularity',
default=False,
help='Set a (fake) initial popularity and last updated date '
'using the Rank and Unique Moz ID columns in the CSV.',
),
)
def clean_string(self, s):
return s.strip().decode('utf-8')
def validate_url(self, url):
if url:
URLValidator()(url)
def set_automatic_properties(self, instance, row):
"""
Set properties automatically from the included mapping. Since it sets
translated fields, it's important to set default_locale on the instance
first, it's then used by this method to set the correct locale for the
fields.
"""
mapping = {
# property on Website : field name in csv.
'short_name': 'Short Name (enter text up to 12 characters)',
'name': 'Display Title',
'title': 'Long Name to Use (type in if no)',
'description': 'Description from site',
}
with translation.override(instance.default_locale):
for prop, column in mapping.items():
setattr(instance, prop, self.clean_string(row[column]))
def set_default_locale(self, instance, row):
lang = to_language(self.clean_string(row['Language of Meta Data']))
if not lang or lang == 'english':
# Exception because 'en-US' is set as 'English (US)'.
lang = 'en-US'
elif lang == 'chinese':
# Consider 'chinese' without more information as simplified
# chinese, zh-CN.
lang = 'zh-CN'
elif lang == 'portuguese':
# We don't support pt-PT in Marketplace, use pt-BR.
lang = 'pt-BR'
if lang not in self.languages:
lang = self.reversed_languages.get(lang)
if lang is None:
raise ParsingError(
u'Website %s has unknown language set for its metadata: %s'
% (row['Unique Moz ID'], row['Language of Meta Data']))
instance.default_locale = lang
def set_categories(self, instance, row):
cat = self.clean_string(row['Marketplace Category']).lower()
if cat == 'science & tech':
cat = 'science-tech'
elif cat == 'comics':
cat = 'books-comics'
elif cat == 'fitness':
cat = 'health-fitness'
elif cat == 'navigation':
cat = 'maps-navigation'
if cat not in self.categories:
cat = self.reversed_categories.get(cat)
if cat is None:
raise ParsingError(
u'Website %s has unknown category set: %s'
% (row['Unique Moz ID'], row['Marketplace Category']))
instance.categories = [cat]
def set_preferred_regions(self, instance, row):
# For each region, find the region object, add the id to the list,
# store it. Warn about unknown regions.
regions_slugs = self.clean_string(row['List of Countries']).split(',')
preferred_regions = []
for region in regions_slugs:
if region == 'gb':
region = 'uk'
elif region == 'wo':
region = 'restofworld'
try:
preferred_regions.append(REGIONS_DICT[region].id)
except KeyError:
raise ParsingError(
u'Website %s has unknown country: %s'
% (row['Unique Moz ID'], region))
instance.preferred_regions = preferred_regions
def set_tags(self, instance, row):
keywords = self.clean_string(row['Keywords to use'].lower())
if keywords.startswith('http'):
raise ParsingError(
u'Website %s has invalid keywords: %s'
% (row['Unique Moz ID'], keywords))
max_len = Tag._meta.get_field('tag_text').max_length
for keyword in set(keywords.split(',')):
keyword = keyword.strip()
if len(keyword) > max_len:
raise ParsingError(
u'Website %s has a keyword which is too long: %s'
% (row['Unique Moz ID'], keyword))
tag, _ = Tag.objects.get_or_create(tag_text=keyword)
instance.keywords.add(tag)
def set_url(self, instance, row):
# Ultimately, we don't care whether the website has a mobile specific
# URL, is responsive, etc: If it has a desktop-specific URL and a
# mobile URL set, then set both accordingly, otherwise just set
# url.
desktop_url = self.clean_string(row['Desktop URL']).lower()
mobile_url = self.clean_string(row['Mobile URL']).lower()
try:
self.validate_url(desktop_url)
except ValidationError:
raise ParsingError(
u'Website %s has invalid Desktop URL %s'
% (row['Unique Moz ID'], desktop_url))
try:
self.validate_url(mobile_url)
except ValidationError:
raise ParsingError(
u'Website %s has invalid Mobile URL %s'
% (row['Unique Moz ID'], mobile_url))
if desktop_url and mobile_url:
instance.url = desktop_url
instance.mobile_url = mobile_url
elif mobile_url:
instance.url = mobile_url
else:
raise ParsingError(
u'Website %s has no URL ?!' % row['Unique Moz ID'])
def set_icon(self, instance, row):
icon_url = self.clean_string(row['Icon url'])
try:
# Lots of rows with no icons or just 'Default' in the data, so
# ignore the issue and don't report it.
if icon_url:
self.validate_url(icon_url)
# Use original_apply_async instead of using the
# post_request_task mechanism. See comment below at the end of
# the file for an explanation.
fetch_icon.original_apply_async(args=(instance, icon_url))
else:
raise ValidationError('Empty Icon URL')
except ValidationError:
instance.icon_type = ''
def parse(self, filename):
try:
return csv.DictReader(open(filename))
except IOError as err:
raise CommandError(err)
def assign_popularity(self):
print 'Setting regional popularity values...'
for region in self.ranking_per_region.keys():
websites_len = len(self.ranking_per_region[region])
print u'Setting regional popularity for %d site(s) in %s' % (
websites_len, unicode(REGIONS_CHOICES_ID_DICT[region].name))
# Sort sites by rank in that region.
websites = sorted(self.ranking_per_region[region],
key=itemgetter(1), reverse=True)
# Take the same number of popularity values for apps in that
# region.
apps_popularity = (Installs.objects.filter(region=region)
.values_list('value', flat=True)
.order_by('-value')[:websites_len])
for i, app_popularity_value in enumerate(apps_popularity):
# Steal popularity value, minus one just to get a chance to end
# up with a more stable ordering (no equal values).
pk = websites[i][0]
popularity, created = WebsitePopularity.objects.get_or_create(
website_id=pk, region=region)
popularity.update(value=app_popularity_value - 1)
print 'Setting global popularity values...'
GLOBAL_REGION = 0
for pk in self.websites:
values = list(WebsitePopularity.objects
.filter(website=pk)
.exclude(region=GLOBAL_REGION)
.values_list('value', flat=True))
popularity, created = WebsitePopularity.objects.get_or_create(
website_id=pk, region=GLOBAL_REGION)
popularity.update(value=sum(values))
def assign_last_updated(self):
print 'Setting last updated dates...'
# To make new and popular different, assign a random value for
# last_updated stolen from the last x apps, where x is twice the number
# of websites.
desired_len = len(self.websites) * 2
last_updated_dates = list(
Webapp.objects
.exclude(last_updated=None)
.values_list('last_updated', flat=True)
.order_by('-last_updated')[:desired_len])
if len(last_updated_dates) < desired_len:
raise CommandError('Not enough apps with a last_updated set in the'
' database to continue!')
return
random.shuffle(last_updated_dates)
for pk in self.websites:
(Website.objects.filter(pk=pk)
.update(last_updated=last_updated_dates.pop()))
def remember_website_ranking(self, instance, rank):
for region in instance.preferred_regions:
self.ranking_per_region[region].append((instance.pk, rank))
self.websites.append(instance.pk)
def create_instances(self, data):
created_count = 0
for i, row in enumerate(data):
if (i + 1) % 100 == 0:
print 'Processing row %d... (%d websites created)' % (
i + 1, created_count)
if self.limit and created_count >= self.limit:
print 'Limit (%d) was hit, stopping the import' % self.limit
break
id_ = int(self.clean_string(row['Unique Moz ID']))
rank = int(self.clean_string(row['Rank']))
try:
website = Website.objects.get(moz_id=id_)
if self.overwrite:
# Existing website and we were asked to overwrite: delete
# it!
website.delete()
else:
# Existing website and we were not asked to overwrite: skip
# it, storing its ranking first to set popularity later.
if self.set_popularity:
self.remember_website_ranking(website, rank)
continue
except Website.DoesNotExist:
pass
with atomic():
try:
website = Website(moz_id=id_, status=STATUS_PUBLIC)
self.set_default_locale(website, row)
self.set_automatic_properties(website, row)
self.set_categories(website, row)
self.set_preferred_regions(website, row)
self.set_url(website, row)
website.save()
if self.set_popularity:
# Remember ranking to set popularity later.
self.remember_website_ranking(website, rank)
# Keywords use a M2M, so do that once the website is saved.
self.set_tags(website, row)
# Launch task to fetch icon once we know everything is OK.
self.set_icon(website, row)
created_count += 1
except ParsingError as e:
print e.message
return created_count
def handle(self, *args, **kwargs):
if len(args) != 1:
self.print_help('manage.py', self.subcommand)
return
filename = args[0]
self.overwrite = kwargs.get('overwrite', False)
self.limit = kwargs.get('limit', None)
self.set_popularity = kwargs.get('set_popularity', False)
if self.set_popularity:
if self.limit:
raise CommandError(
'Can not use --set_popularity with --limit, the full data '
'set is needed to set popularity, aborting.')
self.websites = []
self.ranking_per_region = collections.defaultdict(list)
with translation.override('en-US'):
self.languages = dict(LANGUAGES).keys()
self.reversed_languages = {v['English'].lower(): k for k, v
in LANGUAGES.items()}
self.categories = CATEGORY_CHOICES_DICT.keys()
self.reversed_categories = {unicode(v).lower(): k for k, v
in CATEGORY_CHOICES_DICT.items()}
data = self.parse(filename)
created_count = self.create_instances(data)
print 'Import phase done, created %d websites.' % created_count
if self.set_popularity:
self.assign_popularity()
self.assign_last_updated()
# No need to manually call _send_tasks() even though we are in a
# management command. The only tasks we are using are fetch_icon(),
# for which we use original_apply_async() directly, and the indexation
# task, which would be useless to fire since the fetch icon task will
# trigger a save and a re-index anyway. Plus, we won't have many sites
# so it's probably simpler to trigger a full reindex.
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains tests that simulate peer failures.
When a peer fails during MultiWorkerMirroredStrategy training. All workers
should get Unavailable error.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.distribute import collective_all_reduce_strategy as mwms_lib
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import test
COORDINATION_SERVICE = None
RPC_PROTOCOL = "grpc"
# Put it in top level so it executes in the child processes as well.
mwms_lib.CollectiveAllReduceExtended._enable_check_health = True
mwms_lib.CollectiveAllReduceExtended._check_health_interval = 3
mwms_lib.CollectiveAllReduceExtended._check_health_initial_timeout = 0
# This is needed for OSS, which issues all RPCs with fail_fast=false by default.
mwms_lib.CollectiveAllReduceExtended._check_health_timeout = 1
def get_attempt(strategy, attempts):
task_type = strategy.cluster_resolver.task_type
task_id = strategy.cluster_resolver.task_id
attempts[(task_type, task_id)] = attempts.get((task_type, task_id), 0) + 1
return task_id, attempts[(task_type, task_id)]
quick_exit = os._exit # pylint: disable=protected-access
class PeerFailureTest(test.TestCase):
# Note that all the tests use auto_restart=True. Currently we rely on the
# assumption that an external system restarts failed tasks. If the assumption
# is not true, the remaining tasks may still hang instead of fail.
#
# In these tests we leverage the auto restart feature of MultiProcessRunner.
# Failed workers are restarted automatically. In reality there needs to be
# some job management system that does the restart, e.g. Kubernetes.
#
# Worker failures may cause problems if there're more than one collective, and
# the failure happens after the first collective. In this case the recovered
# worker will be running a different collective with the rest, which causes a
# deadlock. Note that collectives are common, e.g. when creating variables the
# initial values are broadcasted from the first worker.
#
# We use a multiprocessing.Manager().dict() object to track the attempts of
# each worker. We take different actions in different attempts to simuate the
# events in real world. E.g. some tests make a worker fail on the first
# attempt only, and asserts that it should recovery.
def test_creating_variable(self):
# This test simulates the case when a worker fails before or during creating
# a variable. Creating variables involve broadcasting the initial value from
# the first replica to all replicas.
def worker_fn():
context.context().enable_coordination_service(COORDINATION_SERVICE)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
with strategy.scope():
tf.Variable(1.)
# worker-1 dies here.
if strategy.cluster_resolver.task_id == 1:
quick_exit(1)
v = tf.Variable(tf.random.uniform(()))
return v.read_value().numpy()
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
mpr = multi_process_runner.MultiProcessRunner(
worker_fn, cluster_spec, rpc_layer=RPC_PROTOCOL)
mpr.start()
# TODO(b/151232436): Always raise UnavailableError when a peer fails.
with self.assertRaises(
(tf.errors.UnavailableError, tf.errors.DeadlineExceededError)):
mpr.join(timeout=30)
def test_reduce_small_tensor(self):
# This test simulates the case when a worker fails before or during reducing
# a small tensors, e.g. reading a metric.
#
# Note that this is written for a specific corner case that used to happen
# only when all of the following conditions are met:
# - There're two workers.
# - They're reducing a small tensor. The definition of small varies
# per platform.
# - They're reducing a single tensor. Batched all-reduce are not affected.
# - It must be worker-1 that fails.
# Under this case, the all-reduce is effectively two send/recv operation,
# the first one from worker-0 to worker-1, and the second one vice versa.
# The first one blocks the second one. In send/recv, the sending party is
# not aware of the failures of the receiving party.
def worker_fn():
context.context().enable_coordination_service(COORDINATION_SERVICE)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
value = tf.identity([1.])
strategy.reduce("sum", value, axis=None)
# worker-1 dies here.
if strategy.cluster_resolver.task_id == 1:
quick_exit(1)
strategy.reduce("sum", value, axis=None)
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
mpr = multi_process_runner.MultiProcessRunner(
worker_fn, cluster_spec, rpc_layer=RPC_PROTOCOL)
mpr.start()
# TODO(b/151232436): Always raise UnavailableError when a peer fails.
with self.assertRaises(
(tf.errors.UnavailableError, tf.errors.DeadlineExceededError)):
mpr.join(timeout=30)
class PeerFailureRecoverTest(test.TestCase):
# Similar to PeerFailureTest but simulates the situation where there's some
# external system that automatically restarts failed workers.
def test_creating_variable(self):
# See PeerFailureTest.test_creating_variable
def worker_fn(attempts):
context.context().enable_coordination_service(COORDINATION_SERVICE)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
task_id, attempt = get_attempt(strategy, attempts)
with strategy.scope():
tf.Variable(1.)
# worker-1 dies here.
if attempt == 1 and task_id == 1:
quick_exit(1)
v = tf.Variable(tf.random.uniform(()))
return v.read_value().numpy()
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
attempts = multi_process_runner.manager().dict()
mpr = multi_process_runner.MultiProcessRunner(
worker_fn,
cluster_spec,
rpc_layer=RPC_PROTOCOL,
args=(attempts,),
auto_restart=True)
mpr.start()
results = mpr.join(timeout=90).return_value
self.assertEqual(results[0], results[1])
def test_reduce_small_tensor(self):
# See PeerFailureTest.test_reduce_small_tensor
def worker_fn(attempts):
context.context().enable_coordination_service(COORDINATION_SERVICE)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
task_id, attempt = get_attempt(strategy, attempts)
value = tf.identity([1.])
strategy.reduce("sum", value, axis=None)
# worker-1 dies here.
if attempt == 1 and task_id == 1:
quick_exit(1)
return strategy.reduce("sum", value, axis=None).numpy()
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
attempts = multi_process_runner.manager().dict()
mpr = multi_process_runner.MultiProcessRunner(
worker_fn,
cluster_spec,
rpc_layer=RPC_PROTOCOL,
args=(attempts,),
auto_restart=True)
mpr.start()
results = mpr.join(timeout=90).return_value
self.assertAllEqual(results, [[2.], [2.]])
def test_quick_recover(self):
# This test simulates the case when a worker fails but recovers quickly
# before the next collective.
#
# It's not guaranteed that the cluster only restarts once when one worker
# fails. The external job management system is expected to keep restarting
# failed workers.
def worker_fn(attempts):
# Set a long check alive interval to better simulate the case when a
# worker fails and recovers during a check alive interval.
mwms_lib.CollectiveAllReduceExtended._check_alive_interval = 30
mwms_lib.CollectiveAllReduceExtended._check_alive_initial_timeout = 30
context.context().enable_coordination_service(COORDINATION_SERVICE)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
task_id, attempt = get_attempt(strategy, attempts)
@tf.function
def replica_fn():
ctx = tf.distribute.get_replica_context()
# Use a large tensor because small tensor may hang regardless when the
# worker recovers.
value = tf.ones((64, 64))
ctx.all_reduce(tf.distribute.ReduceOp.SUM, [value, value])
strategy.run(replica_fn)
# worker-1 dies here.
if attempt == 1 and task_id == 1:
quick_exit(1)
strategy.run(replica_fn)
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
attempts = multi_process_runner.manager().dict()
mpr = multi_process_runner.MultiProcessRunner(
worker_fn,
cluster_spec,
rpc_layer=RPC_PROTOCOL,
args=(attempts,),
auto_restart=True)
mpr.start()
mpr.join(timeout=90)
if __name__ == "__main__":
test_util.main()
|
|
"""
Satchmo shipping module using python-fedex
"""
# Note, make sure you use decimal math everywhere!
from decimal import Decimal
from django.utils.translation import ugettext as _
from shipping.modules.base import BaseShipper
from fedex.base_service import FedexBaseServiceException
from fedex.services.rate_service import FedexRateServiceRequest
import traceback
import logging
log = logging.getLogger('fedex_web_services.shipper')
transit_days = {
'FEDEX_1_DAY_FREIGHT': '1 day',
'FEDEX_2_DAY': '2 days',
'FEDEX_2_DAY_FREIGHT': '2 days',
'FEDEX_3_DAY_FREIGHT': '3 days',
'FEDEX_EXPRESS_SAVER': '3 days',
'FEDEX_GROUND': '1 to 7 days',
'FIRST_OVERNIGHT': '1 day',
'GROUND_HOME_DELIVERY': '1 to 7 days',
'PRIORITY_OVERNIGHT': '1 day',
'SMART_POST': '2 to 8 days',
'STANDARD_OVERNIGHT': '1 day',
}
WEIGHT_UNITS = {
'KG': 1.0,
'G': 0.001,
'LB': 0.45359237,
'OZ': 0.028349523125,
}
def convert_weight(value, src_unit, dst_unit):
try:
return value * (WEIGHT_UNITS[src_unit.upper()] / WEIGHT_UNITS[dst_unit.upper()])
except KeyError:
raise KeyError('Unknown weight unit')
class Shipper(BaseShipper):
id = "fedex_web_services"
def __init__(self,
cart=None,
contact=None,
service_type=None,
config=None,
packaging=None,
default_weight=None,
default_weight_units=None,
single_box=True,
verbose_log=False,
dropoff_type=None):
self._calculated = False
self.cart = cart
self.contact = contact
self.default_weight_units = default_weight_units
self.single_box = single_box
self.verbose_log = verbose_log
self.dropoff_type = dropoff_type
if service_type:
self.service_type_code = service_type[0]
self.service_type_text = service_type[1]
else:
self.service_type_code = '99'
self.service_type_text = 'Uninitialized'
self.id = u'%s' % (self.service_type_text)
self._cost = Decimal('0.00')
self._valid = None
self.CONFIG_OBJ = config
self.packaging = packaging
self.default_weight = default_weight
try:
self._expected_delivery = transit_days[self.service_type_code]
except KeyError:
self._expected_delivery = ''
def __unicode__(self):
return u"Shipping via fedex_web_services"
def description(self):
return _('Fedex - %s' % self.service_type_text)
def cost(self):
if self._calculated:
return self._cost
def method(self):
return self.service_type_text
def expectedDelivery(self):
return self._expected_delivery
def valid(self, order=None):
if self._calculated:
return self._valid
def calculate(self, cart, contact):
# These imports are here to avoid circular import errors
from satchmo_store.shop.models import Config
from shipping.utils import product_or_parent
shop_details = Config.objects.get_current()
verbose = self.verbose_log
if verbose:
log.debug('Calculating fedex with type=%s', self.service_type_code)
rate_request = FedexRateServiceRequest(self.CONFIG_OBJ)
# This is very generalized, top-level information.
# REGULAR_PICKUP, REQUEST_COURIER, DROP_BOX, BUSINESS_SERVICE_CENTER or STATION
rate_request.RequestedShipment.DropoffType = self.dropoff_type
# See page 355 in WS_ShipService.pdf for a full list. Here are the common ones:
# STANDARD_OVERNIGHT, PRIORITY_OVERNIGHT, FEDEX_GROUND, FEDEX_EXPRESS_SAVER
rate_request.RequestedShipment.ServiceType = self.service_type_code
# What kind of package this will be shipped in.
# FEDEX_BOX, FEDEX_PAK, FEDEX_TUBE, YOUR_PACKAGING
rate_request.RequestedShipment.PackagingType = self.packaging
# No idea what this is.
# INDIVIDUAL_PACKAGES, PACKAGE_GROUPS, PACKAGE_SUMMARY
rate_request.RequestedShipment.PackageDetail = 'INDIVIDUAL_PACKAGES'
# Shipper's address
rate_request.RequestedShipment.Shipper.Address.PostalCode = shop_details.postal_code
rate_request.RequestedShipment.Shipper.Address.CountryCode = shop_details.country.iso2_code
rate_request.RequestedShipment.Shipper.Address.Residential = False
# Recipient address
rate_request.RequestedShipment.Recipient.Address.PostalCode = contact.shipping_address.postal_code
rate_request.RequestedShipment.Recipient.Address.CountryCode = contact.shipping_address.country.iso2_code
# This flag is optional. When turned on, it limits flexibility in options you can select
#rate_request.RequestedShipment.Recipient.Address.Residential = True
# Who pays for the rate_request?
# RECIPIENT, SENDER or THIRD_PARTY
rate_request.RequestedShipment.ShippingChargesPayment.PaymentType = 'SENDER'
#EDT is used to determine which estimated taxes and duties are included in the response
#For international shipments only
rate_request.RequestedShipment.EdtRequestType = 'NONE'
seq = 1
# If we are using one, box we add up all the weights and ship in one box
# Otherwise, we send multiple boxes
default_weight_units = (self.default_weight_units or "").upper()
assert default_weight_units in ('KG', 'LB'), "Valid default weight units are only KG or LB"
if self.single_box:
box_weight = 0.0
for product in cart.get_shipment_list():
item_weight = product_or_parent(product, 'weight') or 0.0
converted_item_weight = (
convert_weight(float(item_weight), product.smart_attr('weight_units')
or default_weight_units, default_weight_units))
box_weight += max(converted_item_weight, float(self.default_weight))
item = rate_request.create_wsdl_object_of_type('RequestedPackageLineItem')
item.SequenceNumber = seq
item.Weight = rate_request.create_wsdl_object_of_type('Weight')
item.Weight.Units = default_weight_units
item.Weight.Value = box_weight
item.PhysicalPackaging = 'BOX'
rate_request.add_package(item)
else: # Send separate packages for each item
for product in cart.get_shipment_list():
item_weight = product_or_parent(product, 'weight')
converted_item_weight = convert_weight(float(item_weight), product.smart_attr('weight_units') \
or default_weight_units, default_weight_units)
item_weight = max(float(self.default_weight), float(self.default_weight))
item = rate_request.create_wsdl_object_of_type('RequestedPackageLineItem')
item.SequenceNumber = seq
item.Weight.Units = default_weight_units
item.Weight.Value = item_weight
item.PhysicalPackaging = 'BOX'
rate_request.add_package(item)
seq += 1
# If you'd like to see some documentation on the ship service WSDL, un-comment
# this line. (Spammy).
#print rate_request.client
# Un-comment this to see your complete, ready-to-send request as it stands
# before it is actually sent. This is useful for seeing what values you can
# change.
# print rate_request.RequestedShipment
# Fires off the request, sets the 'response' attribute on the object.
try:
rate_request.send_request()
except FedexBaseServiceException, e:
# Expected Fedex exceptions with good messages are:
# FedexFailure (for temporary server error), FedexError (for wrong request), SchemaValidationError
log.info('******************* Error in shipping: %s' % str(e))
except Exception, e:
# Unexpected exceptions mostly need a traceback but also continue.
log.info('******************* Error in shipping:\n%s', traceback.format_exc(limit=15))
# This will show the reply to your rate_request being sent. You can access the
# attributes through the response attribute on the request object. This is
# good to un-comment to see the variables returned by the FedEx reply.
# print rate_request.response
if rate_request.response:
if rate_request.response.HighestSeverity in ['SUCCESS', 'WARNING', 'NOTE']:
# we're good
log.debug('******************good shipping: %s' % self.service_type_code)
try:
self._expected_delivery = rate_request.response.RateReplyDetails[0].TransitTime
except AttributeError: # TransitTime not included for everything
pass
cost = 0
for rate_detail in rate_request.response.RateReplyDetails[0].RatedShipmentDetails:
cost = max(cost, rate_detail.ShipmentRateDetail.TotalNetFedExCharge.Amount)
self._cost = Decimal(str(cost))
self._valid = True
else:
log.debug('*******************bad shipping: %s' % self.service_type_code)
log.debug(rate_request.response.HighestSeverity)
log.debug(rate_request.response.Notifications)
self._valid = False
else:
self._valid = False
self._calculated = True
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import contextlib
import copy
import datetime
import hashlib
import inspect
import os
import pprint
import mock
from oslo_log import log
from oslo_utils import timeutils
import six
from testtools import matchers
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import rpc
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLogger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.Field(fields.Integer())}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.Field(fields.Integer(), default=1),
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
'readonly': fields.Field(fields.Integer(), read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.Field(fields.String())}
class TestMetaclass(test.NoDBTestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.NovaObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.NovaObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.IPV4AndV6AddressField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.IPV4AndV6Address)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.NoDBTestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('MyObj'),
}
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
self.useFixture(nova_fixtures.IndirectionAPIFixture(None))
def assertRemotes(self):
self.assertEqual(self.remote_object_calls, [])
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return (base.NovaObject.obj_from_primitive(result, context=args[0])
if isinstance(result, base.NovaObject) else result)
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
self.useFixture(nova_fixtures.IndirectionAPIFixture(
conductor_rpcapi.ConductorAPI()))
# To make sure local and remote contexts match
self.stubs.Set(rpc.RequestContextSerializer,
'serialize_context',
lambda s, c: c)
self.stubs.Set(rpc.RequestContextSerializer,
'deserialize_context',
lambda s, c: c)
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
def assertRemotes(self):
self.assertNotEqual(self.remote_object_calls, [])
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': fields.Field(fields.Integer())}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
self.assertEqual('1.5', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.6', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.6', error.kwargs['supported'])
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
self.assertRemotes()
def test_changed_with_sub_object(self):
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
actual = obj.obj_to_primitive()
self.assertJsonEqual(actual, expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_obj_reset_changes_recursive(self):
obj = MyObj(rel_object=MyOwnedObject(baz=123),
rel_objects=[MyOwnedObject(baz=456)])
self.assertEqual(set(['rel_object', 'rel_objects']),
obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True, fields=['foo'])
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True)
self.assertEqual(set([]), obj.rel_object.obj_what_changed())
self.assertEqual(set([]), obj.obj_what_changed())
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object',
'rel_objects', 'mutable_default'] +
base_fields)
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_obj_alternate_context(self):
obj = MyObj(context=self.context)
with obj.obj_alternate_context(mock.sentinel.alt_ctx):
self.assertEqual(mock.sentinel.alt_ctx,
obj._context)
self.assertEqual(self.context, obj._context)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': fields.Field(fields.Integer())}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(exception.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_mutable_default(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.mutable_default = None
obj.mutable_default.append('s1')
self.assertEqual(obj.mutable_default, ['s1'])
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.mutable_default = None
obj1.mutable_default.append('s2')
self.assertEqual(obj1.mutable_default, ['s2'])
def test_obj_mutable_default_set_default(self):
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.obj_set_defaults('mutable_default')
self.assertEqual(obj1.mutable_default, [])
obj1.mutable_default.append('s1')
self.assertEqual(obj1.mutable_default, ['s1'])
obj2 = MyObj(context=self.context, foo=123, bar='abc')
obj2.obj_set_defaults('mutable_default')
self.assertEqual(obj2.mutable_default, [])
obj2.mutable_default.append('s2')
self.assertEqual(obj2.mutable_default, ['s2'])
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,'
'mutable_default=<?>,readonly=<?>,rel_object=<?>,'
'rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
subobj.VERSION = '1.2'
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
orig_primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', primitive)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_complains_about_missing_rules(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {}
self.assertRaises(exception.ObjectActionError,
obj.obj_make_compatible, {}, '1.0')
def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self):
class MyList(base.ObjectListBase, base.NovaObject):
VERSION = '1.2'
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList(objects=[])
class MyOwner(base.NovaObject):
VERSION = '1.2'
fields = {'mylist': fields.ObjectField('MyList')}
obj_relationships = {
'mylist': [('1.1', '1.1')],
}
myowner = MyOwner(mylist=mylist)
primitive = myowner.obj_to_primitive('1.1')
self.assertIn('mylist', primitive['nova_object.data'])
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
def test_delattr(self):
obj = MyObj(bar='foo')
del obj.bar
# Should appear unset now
self.assertFalse(obj.obj_attr_is_set('bar'))
# Make sure post-delete, references trigger lazy loads
self.assertEqual('loaded!', getattr(obj, 'bar'))
def test_delattr_unset(self):
obj = MyObj()
self.assertRaises(AttributeError, delattr, obj, 'bar')
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(exception.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo', 'mutable_default']),
obj.obj_what_changed())
self.assertEqual(1, obj.foo)
def test_set_defaults_not_overwrite(self):
# NOTE(danms): deleted defaults to False, so verify that it does
# not get reset by obj_set_defaults()
obj = MyObj(deleted=True)
obj.obj_set_defaults()
self.assertEqual(1, obj.foo)
self.assertTrue(obj.deleted)
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.VERSION = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.VERSION = '1.7'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.VERSION = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
self.assertRemotes()
def test_compat(self):
MyObj2.VERSION = '1.1'
obj = MyObj2.query(self.context)
self.assertEqual('oldbar', obj.bar)
def test_revision_ignored(self):
MyObj2.VERSION = '1.1.456'
obj = MyObj2.query(self.context)
self.assertEqual('bar', obj.bar)
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport.called)
else:
self.assertEqual('backported', result)
ser._conductor.object_backport.assert_called_with(self.context,
primitive,
backported_to)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in six.itervalues(primitive):
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in six.itervalues(thing2):
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = timeutils.strtime(at=self.now)
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now}
for key, val in six.iteritems(kwargs):
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-cf1b002f0e50f5333e0f33588f6c2d57',
'AgentList': '1.0-3c73cea65e7c938080184ec70a4ee1f7',
'Aggregate': '1.1-7b3f04af5342ba544955d01c9c954fa5',
'AggregateList': '1.2-13a2dfb67f9cb9aee815e233bc89f34c',
'BandwidthUsage': '1.2-e7d3b3a5c3950cc67c99bc26a1075a70',
'BandwidthUsageList': '1.2-fe73c30369dd23c41619c9c19f27a562',
'BlockDeviceMapping': '1.9-c87e9c7e5cfd6a402f32727aa74aca95',
'BlockDeviceMappingList': '1.10-44b9818d5e90a7396eb807540cbe42c0',
'CellMapping': '1.0-4b1616970814c3c819e10c7ef6b9c3d5',
'ComputeNode': '1.11-5f8cd6948ad98fcc0c39b79d49acc4b6',
'ComputeNodeList': '1.11-f09b7f64339350b4296ac85c07e3a573',
'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba',
'DNSDomainList': '1.0-bc58364180c693203ebcf5e5d5775736',
'EC2Ids': '1.0-8e193896fa01cec598b875aea94da608',
'EC2InstanceMapping': '1.0-e9c3257badcc3aa14089b0a62f163108',
'EC2SnapshotMapping': '1.0-a545acd0d1519d4316b9b00f30e59b4d',
'EC2VolumeMapping': '1.0-15710aa212b5cbfdb155fdc81cce4ede',
'FixedIP': '1.10-4e8060f91f6c94ae73d557708ec62f56',
'FixedIPList': '1.10-724a59f2446d917d0bd13d6aa33edf8a',
'Flavor': '1.1-01ed47361fbe76bf728edf667d3f45d3',
'FlavorList': '1.1-ab3f242e0db21db87285f2ac2ddc5c72',
'FloatingIP': '1.6-24c614d2c3d4887254a679be65c11de5',
'FloatingIPList': '1.7-e61a470ab21d7422f6bb703f86d99b53',
'HVSpec': '1.0-1f9806b94af42dd91e6db369cd10f114',
'ImageMeta': '1.0-dbbf573f58eb5f874e2a10aee54394dd',
'ImageMetaProps': '1.0-5da0eea4eeca04fcd9c7f255c2a43eaa',
'Instance': '1.20-0991d6bd300ebf35ec19d7d68922e69b',
'InstanceAction': '1.1-866fb0235d45ab51cc299b8726303d9c',
'InstanceActionEvent': '1.1-538698f30974064543134784c5da6056',
'InstanceActionEventList': '1.0-3510dc5bc494bcf2468f54249366164f',
'InstanceActionList': '1.0-7f3f14a6c16fa16113c112a3b2ffffdd',
'InstanceExternalEvent': '1.0-86f86f31d561090fe1254c422b517a2b',
'InstanceFault': '1.2-090c74b3833c715845ec2cf24a686aaf',
'InstanceFaultList': '1.1-94f71c64972f25ba5675704bf2087fdb',
'InstanceGroup': '1.9-a77a59735d62790dcaa413a21acfaa73',
'InstanceGroupList': '1.6-4642a730448b2336dfbf0f410f9c0cab',
'InstanceInfoCache': '1.5-ef7394dae46cff2dd560324555cb85cf',
'InstanceList': '1.17-d453df4d1e7e1ec3b5b8b089672a870f',
'InstanceMapping': '1.0-d7cfc251f16c93df612af2b9de59e5b7',
'InstanceMappingList': '1.0-1e388f466f8a306ab3c0a0bb26479435',
'InstanceNUMACell': '1.2-5d2dfa36e9ecca9b63f24bf3bc958ea4',
'InstanceNUMATopology': '1.1-b6fab68a3f0f1dfab4c98a236d29839a',
'InstancePCIRequest': '1.1-e082d174f4643e5756ba098c47c1510f',
'InstancePCIRequests': '1.1-4825b599f000538991fdc9972a92c2c6',
'KeyPair': '1.3-2d7c9ccade5532f7cd185110a9367e6a',
'KeyPairList': '1.2-41b7c9ab5fd2a216be4bbce011a55eff',
'Migration': '1.2-0554a9f061ec0e9fefe43773bc426fcf',
'MigrationList': '1.2-e772d7d6ae0581ec72042d50c6bdf6ec',
'MyObj': '1.6-fce707f79d6fee00f0ebbac98816a380',
'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298',
'NUMACell': '1.2-cb9c3b08cc1c418d021492f788d04173',
'NUMAPagesTopology': '1.0-97d93f70a68625b5f29ff63a40a4f612',
'NUMATopology': '1.2-790f6bdff85bf6e5677f409f3a4f1c6a',
'NUMATopologyLimits': '1.0-201845851897940c0a300e3d14ebf04a',
'Network': '1.2-141c797b794a4f8dbe251f929dc15268',
'NetworkList': '1.2-4997048844f38a920eed0f9e685360e3',
'NetworkRequest': '1.1-f31192f5a725017707f989585e12d7dc',
'NetworkRequestList': '1.1-46ff51f691dde5cf96b4c37b0953a516',
'PciDevice': '1.3-6d37f795ee934e7db75b5a6a1926def0',
'PciDeviceList': '1.1-0aedd5a49b4a9f30da37cf275cd98cf7',
'PciDevicePool': '1.1-2f352e08e128ec5bc84bc3007936cc6d',
'PciDevicePoolList': '1.1-46ff51f691dde5cf96b4c37b0953a516',
'Quotas': '1.2-615ed622082c92d938119fd49e6d84ee',
'QuotasNoOp': '1.2-164c628906b170fd946a7672e85e4935',
'S3ImageMapping': '1.0-56d23342db8131d826797c7229dc4050',
'SecurityGroup': '1.1-cd2f3c063640723b584634fa1075be77',
'SecurityGroupList': '1.0-29b93ebda887d1941ec10c8e34644356',
'SecurityGroupRule': '1.1-38290b6f9a35e416c2bcab5f18708967',
'SecurityGroupRuleList': '1.1-c98e038da57c3a9e47e62a588e5b3c23',
'Service': '1.12-1a34a387914f90aacc33c8c43d45d0b3',
'ServiceList': '1.10-653f472b965b6ed17235ebd683751be7',
'Tag': '1.0-521693d0515aa031dff2b8ae3f86c8e0',
'TagList': '1.0-698b4e8bd7d818db10b71a6d3c596760',
'TestSubclassedObject': '1.6-d0f7f126f87433003c4d2ced202d6c86',
'VirtCPUFeature': '1.0-ac0f6fa47089583a95c57131e46de052',
'VirtCPUModel': '1.0-aa6fd0df43edfd2f8cfa0f2151a06f20',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.0-d3d14066c99b8ae4d5204059fb147279',
'VirtualInterfaceList': '1.0-311365526cc6904e43ace844a794cb6b'
}
object_relationships = {
'BlockDeviceMapping': {'Instance': '1.20'},
'ComputeNode': {'HVSpec': '1.0', 'PciDevicePoolList': '1.1'},
'FixedIP': {'Instance': '1.20', 'Network': '1.2',
'VirtualInterface': '1.0',
'FloatingIPList': '1.7'},
'FloatingIP': {'FixedIP': '1.10'},
'ImageMeta': {'ImageMetaProps': '1.0'},
'Instance': {'InstanceFault': '1.2',
'InstanceInfoCache': '1.5',
'InstanceNUMATopology': '1.1',
'PciDeviceList': '1.1',
'TagList': '1.0',
'SecurityGroupList': '1.0',
'Flavor': '1.1',
'InstancePCIRequests': '1.1',
'VirtCPUModel': '1.0',
'EC2Ids': '1.0',
},
'InstanceNUMACell': {'VirtCPUTopology': '1.0'},
'InstanceNUMATopology': {'InstanceNUMACell': '1.2'},
'InstancePCIRequests': {'InstancePCIRequest': '1.1'},
'MyObj': {'MyOwnedObject': '1.0'},
'NUMACell': {'NUMAPagesTopology': '1.0'},
'NUMATopology': {'NUMACell': '1.2'},
'SecurityGroupRule': {'SecurityGroup': '1.1'},
'Service': {'ComputeNode': '1.11'},
'TestSubclassedObject': {'MyOwnedObject': '1.0'},
'VirtCPUModel': {'VirtCPUFeature': '1.0', 'VirtCPUTopology': '1.0'},
}
class TestObjectVersions(test.NoDBTestCase):
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _get_fingerprint(self, obj_name):
obj_class = base.NovaObject._obj_classes[obj_name][0]
fields = obj_class.fields.items()
fields.sort()
methods = []
for name in dir(obj_class):
thing = getattr(obj_class, name)
if inspect.ismethod(thing) or isinstance(thing, classmethod):
method = self._find_remotable_method(obj_class, thing)
if method:
methods.append((name, inspect.getargspec(method)))
methods.sort()
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
if hasattr(obj_class, 'child_versions'):
relevant_data = (fields, methods,
OrderedDict(
sorted(obj_class.child_versions.items())))
else:
relevant_data = (fields, methods)
fingerprint = '%s-%s' % (obj_class.VERSION,
hashlib.md5(str(relevant_data)).hexdigest())
return fingerprint
def test_versions(self):
fingerprints = {}
for obj_name in base.NovaObject._obj_classes:
fingerprints[obj_name] = self._get_fingerprint(obj_name)
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
stored = set(object_data.items())
computed = set(fingerprints.items())
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, hash in changed:
expected[name] = object_data.get(name)
actual[name] = fingerprints.get(name)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def test_registry_matches_metaclass(self):
reference = set(object_data.keys())
actual = set(base.NovaObjectRegistry.classes)
test_objects = set(['MyObj', 'MyOwnedObject', 'TestSubclassedObject'])
# NOTE(danms): In the new registry, we don't implicitly track test
# objects, so make sure that the difference between the metaclass and
# the opt-in registry is the set of test objects.
self.assertEqual(test_objects, reference.symmetric_difference(actual))
def _get_object_field_name(self, field):
if isinstance(field._type, fields.Object):
return field._type._obj_name
if isinstance(field, fields.ListOfObjectsField):
return field._type._element_type._type._obj_name
return None
def _build_tree(self, tree, obj_class):
obj_name = obj_class.obj_name()
if obj_name in tree:
return
for name, field in obj_class.fields.items():
# Notes(yjiang5): ObjectListBase should be covered by
# child_versions test
if (issubclass(obj_class, base.ObjectListBase) and
name == 'objects'):
continue
sub_obj_name = self._get_object_field_name(field)
if sub_obj_name:
sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
self._build_tree(tree, sub_obj_class)
tree.setdefault(obj_name, {})
tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
def test_relationships(self):
tree = {}
for obj_name in base.NovaObject._obj_classes.keys():
self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
stored = set([(x, str(y)) for x, y in object_relationships.items()])
computed = set([(x, str(y)) for x, y in tree.items()])
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, deps in changed:
expected[name] = object_relationships.get(name)
actual[name] = tree.get(name)
self.assertEqual(expected, actual,
'Some objects have changed dependencies. '
'Please make sure to bump the versions of '
'parent objects and provide a rule in their '
'obj_make_compatible() routines to backlevel '
'the child object.')
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
obj_class().obj_to_primitive(target_version=test_version)
def _get_obj_to_test(self, obj_class):
obj = obj_class()
for fname, ftype in obj.fields.items():
if isinstance(ftype, fields.ObjectField):
fobjname = ftype.AUTO_TYPE._obj_name
fobjcls = base.NovaObject._obj_classes[fobjname][0]
setattr(obj, fname, self._get_obj_to_test(fobjcls))
elif isinstance(ftype, fields.ListOfObjectsField):
# FIXME(danms): This will result in no tests for this
# field type...
setattr(obj, fname, [])
return obj
def _find_version_mapping(self, my_ver, versions):
closest = None
my_ver = utils.convert_version_to_tuple(my_ver)
for _my, _child in versions:
_my = utils.convert_version_to_tuple(_my)
_child = utils.convert_version_to_tuple(_child)
if _my == my_ver:
return '%s.%s' % _child
elif _my < my_ver:
closest = _child
if closest:
return '%s.%s' % closest
else:
return None
def _validate_object_fields(self, obj_class, primitive):
for fname, ftype in obj_class.fields.items():
if isinstance(ftype, fields.ObjectField):
exp_vers = obj_class.obj_relationships[fname]
exp_ver = self._find_version_mapping(
primitive['nova_object.version'], exp_vers)
if exp_ver is None:
self.assertNotIn(fname, primitive['nova_object.data'])
else:
child_p = primitive['nova_object.data'][fname]
self.assertEqual(exp_ver,
child_p['nova_object.version'])
def test_obj_make_compatible_with_data(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
if 'tests.unit' in obj_class.__module__:
# NOTE(danms): Skip test objects. When we move to
# oslo.versionedobjects, we won't have to do this
continue
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
test_object = self._get_obj_to_test(obj_class)
obj_p = test_object.obj_to_primitive(
target_version=test_version)
self._validate_object_fields(obj_class, obj_p)
def test_obj_relationships_in_order(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
for field, versions in obj_class.obj_relationships.items():
last_my_version = (0, 0)
last_child_version = (0, 0)
for my_version, child_version in versions:
_my_version = utils.convert_version_to_tuple(my_version)
_ch_version = utils.convert_version_to_tuple(child_version)
self.assertTrue((last_my_version < _my_version
and last_child_version <= _ch_version),
'Object %s relationship '
'%s->%s for field %s is out of order' % (
obj_name, my_version, child_version,
field))
last_my_version = _my_version
last_child_version = _ch_version
class TestObjEqualPrims(test.NoDBTestCase):
def test_object_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='goodbye')
obj2.obj_reset_changes()
obj2.bar = 'goodbye'
# obj2 will be marked with field 'three' updated
self.assertTrue(base.obj_equal_prims(obj1, obj2),
"Objects that differ only because one a is marked "
"as updated should be equal")
def test_object_not_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertFalse(base.obj_equal_prims(obj1, obj2),
"Objects that differ in any field "
"should not be equal")
def test_object_ignore_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertTrue(base.obj_equal_prims(obj1, obj2, ['bar']),
"Objects that only differ in an ignored field "
"should be equal")
|
|
# -*- coding: utf8 -*-
import rlp
from bitcoin import encode_pubkey, N, encode_privkey
from rlp.sedes import big_endian_int, binary
from rlp.utils import encode_hex, str_to_bytes, ascii_chr
from secp256k1 import PublicKey, ALL_FLAGS, PrivateKey
from ethereum.exceptions import InvalidTransaction
from ethereum import bloom
from ethereum import opcodes
from ethereum import utils
from ethereum.slogging import get_logger
from ethereum.utils import TT256, mk_contract_address, zpad, int_to_32bytearray, big_endian_to_int
log = get_logger('eth.chain.tx')
# in the yellow paper it is specified that s should be smaller than secpk1n (eq.205)
secpk1n = 115792089237316195423570985008687907852837564279074904382605163141518161494337
class Transaction(rlp.Serializable):
"""
A transaction is stored as:
[nonce, gasprice, startgas, to, value, data, v, r, s]
nonce is the number of transactions already sent by that account, encoded
in binary form (eg. 0 -> '', 7 -> '\x07', 1000 -> '\x03\xd8').
(v,r,s) is the raw Electrum-style signature of the transaction without the
signature made with the private key corresponding to the sending account,
with 0 <= v <= 3. From an Electrum-style signature (65 bytes) it is
possible to extract the public key, and thereby the address, directly.
A valid transaction is one where:
(i) the signature is well-formed (ie. 0 <= v <= 3, 0 <= r < P, 0 <= s < N,
0 <= r < P - N if v >= 2), and
(ii) the sending account has enough funds to pay the fee and the value.
"""
fields = [
('nonce', big_endian_int),
('gasprice', big_endian_int),
('startgas', big_endian_int),
('to', utils.address),
('value', big_endian_int),
('data', binary),
('v', big_endian_int),
('r', big_endian_int),
('s', big_endian_int),
]
_sender = None
def __init__(self, nonce, gasprice, startgas, to, value, data, v=0, r=0, s=0):
self.data = None
to = utils.normalize_address(to, allow_blank=True)
assert len(to) == 20 or len(to) == 0
super(Transaction, self).__init__(nonce, gasprice, startgas, to, value, data, v, r, s)
self.logs = []
if self.gasprice >= TT256 or self.startgas >= TT256 or \
self.value >= TT256 or self.nonce >= TT256:
raise InvalidTransaction("Values way too high!")
if self.startgas < self.intrinsic_gas_used:
raise InvalidTransaction("Startgas too low")
log.debug('deserialized tx', tx=encode_hex(self.hash)[:8])
@property
def sender(self):
if not self._sender:
# Determine sender
if self.v:
if self.r >= N or self.s >= N or self.v < 27 or self.v > 28 \
or self.r == 0 or self.s == 0:
raise InvalidTransaction("Invalid signature values!")
log.debug('recovering sender')
rlpdata = rlp.encode(self, UnsignedTransaction)
rawhash = utils.sha3(rlpdata)
pk = PublicKey(flags=ALL_FLAGS)
try:
pk.public_key = pk.ecdsa_recover(
rawhash,
pk.ecdsa_recoverable_deserialize(
zpad("".join(chr(c) for c in int_to_32bytearray(self.r)), 32) + zpad("".join(chr(c) for c in int_to_32bytearray(self.s)), 32),
self.v - 27
),
raw=True
)
pub = pk.serialize(compressed=False)
except Exception:
raise InvalidTransaction("Invalid signature values (x^3+7 is non-residue)")
if pub[1:] == "\x00" * 32:
raise InvalidTransaction("Invalid signature (zero privkey cannot sign)")
pub = encode_pubkey(pub, 'bin')
self._sender = utils.sha3(pub[1:])[-20:]
assert self.sender == self._sender
else:
self._sender = 0
return self._sender
@sender.setter
def sender(self, value):
self._sender = value
def sign(self, key):
"""Sign this transaction with a private key.
A potentially already existing signature would be overridden.
"""
if key in (0, '', '\x00' * 32, '0' * 64):
raise InvalidTransaction("Zero privkey cannot sign")
rawhash = utils.sha3(rlp.encode(self, UnsignedTransaction))
if len(key) == 64:
# we need a binary key
key = encode_privkey(key, 'bin')
pk = PrivateKey(key, raw=True)
signature = pk.ecdsa_recoverable_serialize(
pk.ecdsa_sign_recoverable(rawhash, raw=True)
)
signature = signature[0] + chr(signature[1])
self.v = ord(signature[64]) + 27
self.r = big_endian_to_int(signature[0:32])
self.s = big_endian_to_int(signature[32:64])
self.sender = utils.privtoaddr(key)
return self
@property
def hash(self):
return utils.sha3(rlp.encode(self))
def log_bloom(self):
"returns int"
bloomables = [x.bloomables() for x in self.logs]
return bloom.bloom_from_list(utils.flatten(bloomables))
def log_bloom_b64(self):
return bloom.b64(self.log_bloom())
def to_dict(self):
# TODO: previous version used printers
d = {}
for name, _ in self.__class__.fields:
d[name] = getattr(self, name)
d['sender'] = self.sender
d['hash'] = encode_hex(self.hash)
return d
def log_dict(self):
d = self.to_dict()
d['sender'] = encode_hex(d['sender'] or '')
d['to'] = encode_hex(d['to'])
d['data'] = encode_hex(d['data'])
return d
@property
def intrinsic_gas_used(self):
num_zero_bytes = str_to_bytes(self.data).count(ascii_chr(0))
num_non_zero_bytes = len(self.data) - num_zero_bytes
return (opcodes.GTXCOST
+ opcodes.GTXDATAZERO * num_zero_bytes
+ opcodes.GTXDATANONZERO * num_non_zero_bytes)
@property
def creates(self):
"returns the address of a contract created by this tx"
if self.to in (b'', '\0' * 20):
return mk_contract_address(self.sender, self.nonce)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.hash == other.hash
def __hash__(self):
return utils.big_endian_to_int(self.hash)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<Transaction(%s)>' % encode_hex(self.hash)[:4]
def __structlog__(self):
return encode_hex(self.hash)
# This method should be called for block numbers >= HOMESTEAD_FORK_BLKNUM only.
# The >= operator is replaced by > because the integer division N/2 always produces the value
# which is by 0.5 less than the real N/2
def check_low_s(self):
if self.s > N / 2 or self.s == 0:
raise InvalidTransaction("Invalid signature S value!")
UnsignedTransaction = Transaction.exclude(['v', 'r', 's'])
def contract(nonce, gasprice, startgas, endowment, code, v=0, r=0, s=0):
"""A contract is a special transaction without the `to` argument."""
tx = Transaction(nonce, gasprice, startgas, '', endowment, code, v, r, s)
return tx
|
|
import os
import sys
import string
import argparse
from build.sdk_build_utils import *
WINPHONE10_ARCHS = ['x86', 'x64', 'ARM64']
DEFAULT_MSBUILD = "C:/Program Files (x86)/MSBuild/14.0/Bin/msbuild.exe"
def msbuild(args, dir, *cmdArgs):
return execute(args.msbuild, dir, *cmdArgs)
def nuget(args, dir, *cmdArgs):
return execute(args.nuget, dir, *cmdArgs)
def corflags(args, dir, *cmdArgs):
return execute(args.corflags, dir, *cmdArgs)
def patchVcxprojFile(baseDir, fileName, patched1=False, patched2=False):
with open(fileName, 'rb') as f:
linesIn = f.readlines()
linesOut = []
for line in linesIn:
if line.strip() == '<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.targets" />' and not patched2:
with open('%s/scripts/winphone10/carto_mobile_sdk.vcxproj.patch2' % baseDir, 'rb') as f:
linesOut += f.readlines()
patched2 = True
linesOut.append(line)
if line.strip() == '<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props" />' and not patched1:
with open('%s/scripts/winphone10/carto_mobile_sdk.vcxproj.patch1' % baseDir, 'rb') as f:
linesOut += f.readlines()
patched1 = True
with open(fileName, 'wb') as f:
f.writelines(linesOut)
def buildWinPhoneNativeDLL(args, arch):
version = getVersion(args.buildnumber) if args.configuration == 'Release' else 'Devel'
platformArch = 'Win32' if arch == 'x86' else arch
baseDir = getBaseDir()
buildDir = getBuildDir('winphone_native10', platformArch)
defines = ["-D%s" % define for define in args.defines.split(';') if define]
options = ["-D%s" % option for option in args.cmakeoptions.split(';') if option]
if not cmake(args, buildDir, options + [
'-G', 'Visual Studio 17 2022',
'-DCMAKE_SYSTEM_NAME=WindowsStore',
'-DCMAKE_SYSTEM_VERSION=10.0',
'-DCMAKE_GENERATOR_PLATFORM=%s' % platformArch,
'-DCMAKE_BUILD_TYPE=%s' % args.nativeconfiguration,
'-DWRAPPER_DIR=%s' % ('%s/generated/winphone-csharp/wrappers' % baseDir),
'-DSINGLE_LIBRARY:BOOL=ON',
"-DSDK_CPP_DEFINES=%s" % " ".join(defines),
"-DSDK_VERSION='%s'" % version,
"-DSDK_PLATFORM='Windows Phone 10'",
"-DSDK_WINPHONE_ARCH='%s'" % arch,
'%s/scripts/build' % baseDir
]):
return False
patchVcxprojFile(baseDir, '%s/carto_mobile_sdk.vcxproj' % buildDir)
return cmake(args, buildDir, [
'--build', '.',
'--parallel', '4',
'--config', args.nativeconfiguration
])
def buildWinPhoneManagedDLL(args, arch):
baseDir = getBaseDir()
buildDir = getBuildDir('winphone_managed10', arch)
proxyFiles = os.listdir("%s/generated/winphone-csharp/proxies" % baseDir)
proxies = "\n".join(['<Compile Include="%s\\generated\\winphone-csharp\\proxies\\%s"><Link>Proxies\%s</Link></Compile>' % (baseDir, proxyFile, proxyFile) for proxyFile in proxyFiles])
with open('%s/scripts/winphone10/CartoMobileSDK.WinPhone.csproj.template' % baseDir, 'r') as f:
csProjFile = string.Template(f.read()).safe_substitute({ 'baseDir': baseDir, 'buildDir': buildDir, 'proxies': proxies })
with open('%s/CartoMobileSDK.WinPhone.csproj' % buildDir, 'w') as f:
f.write(csProjFile)
if not nuget(args, buildDir, 'restore', '%s/CartoMobileSDK.WinPhone.csproj' % buildDir):
print("Failed to restore required nuget packages")
return False
return msbuild(args, buildDir,
'/t:Build',
'/p:Configuration=%s' % args.configuration,
'/p:ProcessorArchitecture=%s' % arch,
'%s/CartoMobileSDK.WinPhone.csproj' % buildDir
)
def buildWinPhoneVSIX(args):
baseDir = getBaseDir()
buildDir = getBuildDir('winphone_vsix10')
distDir = getDistDir('winphone10')
with open('%s/scripts/winphone10-vsix/CartoMobileSDK.WinPhone.VSIX.csproj.template' % baseDir, 'r') as f:
csProjFile = string.Template(f.read()).safe_substitute({ 'baseDir': baseDir, 'buildDir': buildDir, 'configuration': args.configuration, 'nativeConfiguration': args.nativeconfiguration })
with open('%s/CartoMobileSDK.WinPhone.VSIX.csproj' % buildDir, 'w') as f:
f.write(csProjFile)
if not msbuild(args, buildDir,
'/t:Build',
'/p:Configuration=%s' % args.configuration,
'%s/CartoMobileSDK.WinPhone.VSIX.csproj' % buildDir
):
return False
if not copyfile('%s/bin/%s/CartoMobileSDK.WinPhone.VSIX.vsix' % (buildDir, args.configuration), '%s/CartoMobileSDK.WinPhone10.VSIX.vsix' % distDir):
return False
print("VSIX output available in:\n%s" % distDir)
return True
def buildWinPhoneNuget(args):
baseDir = getBaseDir()
buildDir = getBuildDir('winphone_nuget')
distDir = getDistDir('winphone10')
version = args.buildversion
with open('%s/scripts/nuget/CartoMobileSDK.WinPhone.nuspec.template' % baseDir, 'r') as f:
nuspecFile = string.Template(f.read()).safe_substitute({
'baseDir': baseDir,
'buildDir': buildDir,
'configuration': args.configuration,
'nativeConfiguration': args.nativeconfiguration,
'version': version
})
with open('%s/CartoMobileSDK.WinPhone.nuspec' % buildDir, 'w') as f:
f.write(nuspecFile)
# A hack to generate non-arch dependent assembly, this is nuget peculiarity
arch = 'x86' if 'x86' in args.winphonearch else args.winphonearch[0]
if not copyfile('%s/../winphone_managed10-%s/bin/%s/CartoMobileSDK.WinPhone.dll' % (buildDir, arch, args.configuration), '%s/CartoMobileSDK.WinPhone.dll' % buildDir):
return False
if not corflags(args, buildDir,
'/32BITREQ-',
'%s/CartoMobileSDK.WinPhone.dll' % buildDir
):
return False
if not nuget(args, buildDir,
'pack',
'%s/CartoMobileSDK.WinPhone.nuspec' % buildDir,
'-BasePath', '/'
):
return False
if not copyfile('%s/CartoMobileSDK.UWP.%s.nupkg' % (buildDir, version), '%s/CartoMobileSDK.UWP.%s.nupkg' % (distDir, version)):
return False
print("Nuget output available in:\n%s" % distDir)
return True
parser = argparse.ArgumentParser()
parser.add_argument('--profile', dest='profile', default=getDefaultProfileId(), type=validProfile, help='Build profile')
parser.add_argument('--winphone-arch', dest='winphonearch', default=[], choices=WINPHONE10_ARCHS + ['all'], action='append', help='Windows phone target architectures')
parser.add_argument('--defines', dest='defines', default='', help='Defines for compilation')
parser.add_argument('--msbuild', dest='msbuild', default='auto', help='WinPhone msbuild executable')
parser.add_argument('--nuget', dest='nuget', default='nuget', help='nuget executable')
parser.add_argument('--corflags', dest='corflags', default='corflags', help='corflags executable')
parser.add_argument('--cmake', dest='cmake', default='cmake', help='CMake executable')
parser.add_argument('--cmake-options', dest='cmakeoptions', default='', help='CMake options')
parser.add_argument('--configuration', dest='configuration', default='Release', choices=['Release', 'RelWithDebInfo', 'Debug'], help='Configuration')
parser.add_argument('--build-number', dest='buildnumber', default='', help='Build sequence number, goes to version str')
parser.add_argument('--build-version', dest='buildversion', default='%s-devel' % SDK_VERSION, help='Build version, goes to distributions')
parser.add_argument('--build-vsix', dest='buildvsix', default=False, action='store_true', help='Build VSIX package')
parser.add_argument('--build-nuget', dest='buildnuget', default=False, action='store_true', help='Build Nuget package')
args = parser.parse_args()
if args.msbuild == 'auto':
args.msbuild = DEFAULT_MSBUILD
if 'all' in args.winphonearch or args.winphonearch == []:
args.winphonearch = WINPHONE10_ARCHS
args.defines += ';' + getProfile(args.profile).get('defines', '')
args.cmakeoptions += ';' + getProfile(args.profile).get('cmake-options', '')
args.nativeconfiguration = 'RelWithDebInfo' if args.configuration == 'Debug' else args.configuration
if not os.path.exists("%s/generated/winphone-csharp/proxies" % getBaseDir()):
print("Proxies/wrappers not generated yet, run swigpp script first.")
sys.exit(-1)
if not checkExecutable(args.cmake, '--help'):
print('Failed to find CMake executable. Use --cmake to specify its location')
sys.exit(-1)
if not checkExecutable(args.msbuild, '/?'):
print('Failed to find msbuild executable. Use --msbuild to specify its location')
sys.exit(-1)
if not checkExecutable(args.nuget, 'help'):
print('Failed to find nuget executable. Use --nuget to specify its location')
sys.exit(-1)
if args.buildnuget:
if not checkExecutable(args.corflags, '/?'):
print('Failed to find corflags executable. Use --corflags to specify its location')
sys.exit(-1)
if 'x86' not in args.winphonearch:
print('Nuget package requires x86 architecture')
sys.exit(-1)
for arch in args.winphonearch:
if not buildWinPhoneNativeDLL(args, arch):
sys.exit(-1)
if not buildWinPhoneManagedDLL(args, arch):
sys.exit(-1)
if args.buildvsix:
if not buildWinPhoneVSIX(args):
sys.exit(-1)
if args.buildnuget:
if not buildWinPhoneNuget(args):
sys.exit(-1)
|
|
import unittest
import os
import yaml
from medleydb import multitrack
from medleydb import AUDIO_PATH
from medleydb import MIXING_COEFFICIENTS
class TestMultitrack(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack("NightPanther_Fire")
self.mtrack2 = multitrack.MultiTrack("Phoenix_ScotchMorris")
self.stem = self.mtrack.stems[8]
self.raw = self.mtrack.raw_audio[8][1]
def test_dataset_version_v1(self):
actual = self.mtrack.dataset_version
expected = 'V1'
self.assertEqual(expected, actual)
def test_dataset_version_v2(self):
mtrack = multitrack.MultiTrack("FennelCartwright_DearTessie")
actual = mtrack.dataset_version
expected = 'V2'
self.assertEqual(expected, actual)
def test_dataset_version_extra(self):
mtrack = multitrack.MultiTrack("AHa_TakeOnMe")
actual = mtrack.dataset_version
expected = 'EXTRA'
self.assertEqual(expected, actual)
def test_invalid_trackid(self):
with self.assertRaises(IOError):
multitrack.MultiTrack("RickAstley_NeverGonnaGiveYouUp")
def test_audio_path(self):
actual = self.mtrack.audio_path
expected = os.path.join(AUDIO_PATH, "NightPanther_Fire")
self.assertEqual(actual, expected)
def test_artist(self):
actual = self.mtrack.artist
expected = "NightPanther"
self.assertEqual(actual, expected)
def test_title(self):
actual = self.mtrack.title
expected = "Fire"
self.assertEqual(actual, expected)
def test_trackid(self):
actual = self.mtrack.track_id
expected = "NightPanther_Fire"
self.assertEqual(actual, expected)
def test_stem_length(self):
actual = len(self.mtrack.stems)
expected = 12
self.assertEqual(actual, expected)
def test_stem_type(self):
actual = type(self.stem)
expected = multitrack.Track
self.assertEqual(actual, expected)
def test_stem_component(self):
actual = self.stem.component
expected = ''
self.assertEqual(actual, expected)
def test_stem_duration(self):
actual = self.stem.duration
expected = None
self.assertEqual(actual, expected)
def test_stem_fname(self):
actual = os.path.basename(self.stem.audio_path)
expected = "NightPanther_Fire_STEM_08.wav"
self.assertEqual(actual, expected)
def test_stem_instrument(self):
actual = self.stem.instrument
expected = ["auxiliary percussion"]
self.assertEqual(actual, expected)
def test_stem_f0_type(self):
actual = self.stem.f0_type
expected = ["u"]
self.assertEqual(actual, expected)
def test_stem_mixpath(self):
actual = os.path.basename(self.stem.mix_path)
expected = "NightPanther_Fire_MIX.wav"
self.assertEqual(actual, expected)
def test_stem_pitch_annot_none(self):
actual = self.stem.pitch_annotation
expected = None
self.assertEqual(actual, expected)
def test_stem_pitch_pyin_none(self):
actual = self.stem.pitch_estimate_pyin
expected = None
self.assertEqual(actual, expected)
def test_stem_pitch_annot_exists(self):
actual = self.mtrack.stems[7].pitch_annotation
expected_len = 18268
self.assertEqual(len(actual), expected_len)
def test_stem_pitch_pyin_exists(self):
actual = self.mtrack.stems[7].pitch_estimate_pyin
expected_len = 25175
self.assertEqual(len(actual), expected_len)
def test_stem_raw_idx(self):
actual = self.stem.raw_idx
expected = None
self.assertEqual(actual, expected)
def test_stem_stem_idx(self):
actual = self.stem.stem_idx
expected = 8
self.assertEqual(actual, expected)
def test_raw_length1(self):
actual = len(self.mtrack.raw_audio)
expected = 12
self.assertEqual(actual, expected)
def test_raw_length2(self):
actual = len(multitrack.get_dict_leaves(self.mtrack.raw_audio))
expected = 55
self.assertEqual(actual, expected)
def test_raw_type(self):
actual = type(self.raw)
expected = multitrack.Track
self.assertEqual(actual, expected)
def test_raw_component(self):
actual = self.raw.component
expected = ''
self.assertEqual(actual, expected)
def test_raw_duration(self):
actual = self.raw.duration
expected = None
self.assertEqual(actual, expected)
def test_raw_fname(self):
actual = os.path.basename(self.raw.audio_path)
expected = "NightPanther_Fire_RAW_08_01.wav"
self.assertEqual(actual, expected)
def test_raw_instrument(self):
actual = self.raw.instrument
expected = ["cymbal"]
self.assertEqual(actual, expected)
def test_raw_f0type(self):
actual = self.raw.f0_type
expected = ["u"]
self.assertEqual(actual, expected)
def test_raw_mixpath(self):
actual = os.path.basename(self.raw.mix_path)
expected = "NightPanther_Fire_MIX.wav"
self.assertEqual(actual, expected)
def test_raw_pitch_annotation(self):
actual = self.raw.pitch_annotation
expected = None
self.assertEqual(actual, expected)
def test_raw_raw_idx(self):
actual = self.raw.raw_idx
expected = 1
self.assertEqual(actual, expected)
def test_raw_stem_idx(self):
actual = self.raw.stem_idx
expected = 8
self.assertEqual(actual, expected)
def test_stem_instruments(self):
actual = self.mtrack.stem_instruments
expected = [
'auxiliary percussion',
'brass section',
'drum machine',
'drum set',
'electric bass',
'male singer',
'string section',
'synthesizer',
'synthesizer',
'synthesizer',
'vocalists',
'vocalists',
]
print(actual)
self.assertEqual(actual, expected)
def test_raw_instruments_length(self):
actual = len(self.mtrack.raw_instruments)
expected = 55
self.assertEqual(actual, expected)
def test_raw_instruments(self):
actual = self.mtrack.raw_instruments[0:5]
expected = [
'brass section',
'brass section',
'brass section',
'brass section',
'cello'
]
self.assertEqual(actual, expected)
def test_has_melody(self):
actual = self.mtrack.has_melody
expected = True
self.assertEqual(actual, expected)
def test_predominant_stem_type(self):
actual = type(self.mtrack.predominant_stem)
expected = multitrack.Track
self.assertEqual(actual, expected)
def test_predominant_stem_component(self):
actual = self.mtrack.predominant_stem.component
expected = 'melody'
self.assertEqual(actual, expected)
def test_predominant_stem_stem_idx(self):
actual = self.mtrack.predominant_stem.stem_idx
expected = 7
self.assertEqual(actual, expected)
def test_melody_annotations(self):
actual_mel1 = self.mtrack.melody1_annotation
actual_mel2 = self.mtrack.melody2_annotation
actual_mel3 = self.mtrack.melody3_annotation
self.assertEqual(len(actual_mel1), 36692)
self.assertEqual(len(actual_mel2), 36692)
self.assertEqual(len(actual_mel3), 36692)
self.assertEqual(len(actual_mel1[0]), 2)
self.assertEqual(len(actual_mel2[0]), 2)
self.assertEqual(len(actual_mel3[0]), 3)
def test_melody_tracks(self):
mel_tracks = self.mtrack.melody_stems()
self.assertEqual(len(mel_tracks), 2)
self.assertEqual(mel_tracks[0].component, 'melody')
self.assertEqual(mel_tracks[0].stem_idx, 6)
self.assertEqual(len(mel_tracks[0].pitch_annotation), 6591)
def test_bass_tracks(self):
bass_tracks = self.mtrack.bass_stems()
self.assertEqual(len(bass_tracks), 1)
self.assertEqual(bass_tracks[0].component, 'bass')
self.assertEqual(bass_tracks[0].stem_idx, 1)
def test_num_stems(self):
actual = self.mtrack.num_stems()
expected = 12
self.assertEqual(actual, expected)
def test_num_raw(self):
actual = self.mtrack.num_raw()
expected = 55
self.assertEqual(actual, expected)
def test_stem_filepaths(self):
actual = len(self.mtrack.stem_filepaths())
expected = 12
self.assertEqual(actual, expected)
def test_raw_filepaths(self):
actual = len(self.mtrack.raw_filepaths())
expected = 55
self.assertEqual(actual, expected)
def test_stem_activations(self):
actual = self.mtrack.stem_activations
self.assertEqual(type(actual), list)
def test_stem_activations_v2(self):
actual = self.mtrack.stem_activations_v2
self.assertEqual(type(actual), list)
def test_activation_conf_from_stem1(self):
actual = self.mtrack.activation_conf_from_stem(3)[0]
expected = [0.0, 0.0474]
self.assertEqual(actual, expected)
def test_activation_conf_from_stem_v2(self):
actual = self.mtrack.activation_conf_from_stem(3, version='v2')[0]
expected = [0.0, 0.0474]
self.assertEqual(actual, expected)
def test_activation_conf_from_stem2(self):
actual = self.mtrack.activation_conf_from_stem(50)
expected = None
self.assertEqual(actual, expected)
def test_get_mixing_coefficient(self):
mtrack = multitrack.MultiTrack('AClassicEducation_NightOwl')
actual = mtrack._get_mixing_coefficient(3)
expected = 0.2
self.assertEqual(actual, expected)
def test_get_mixing_coefficient2(self):
actual = self.mtrack2._get_mixing_coefficient(3)
expected = 0.585016969071061
self.assertAlmostEqual(actual, expected)
class TestTrack(unittest.TestCase):
def test_track(self):
track = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
self.assertEqual(track.instrument, ['blurbophone'])
self.assertEqual(track.audio_path, 'fake/path1')
self.assertEqual(track.component, 'melody')
self.assertEqual(track.stem_idx, 12)
self.assertEqual(track.raw_idx, None)
self.assertEqual(track.mix_path, 'fake/path2')
self.assertEqual(track.pitch_path, None)
def test_track2(self):
track = multitrack.Track(
'kazoo', 'fake/path1', 50, 'fake/path2',
raw_idx='R07'
)
self.assertEqual(track.instrument, ['kazoo'])
self.assertEqual(track.audio_path, 'fake/path1')
self.assertEqual(track.component, '')
self.assertEqual(track.stem_idx, 50)
self.assertEqual(track.raw_idx, 7)
self.assertEqual(track.mix_path, 'fake/path2')
def test_track_equality(self):
track1 = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
track2 = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
actual = track1 == track2
expected = True
self.assertEqual(expected, actual)
def test_track_inequality(self):
track1 = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
track2 = multitrack.Track(
'kazoo', 'fake/path1', 50, 'fake/path2',
raw_idx='R07'
)
actual = track1 != track2
expected = True
self.assertEqual(expected, actual)
class TestPathBasedir(unittest.TestCase):
def test_basedir(self):
path = 'this/is/a/path'
actual = multitrack._path_basedir(path)
expected = 'path'
self.assertEqual(actual, expected)
def test_basedir2(self):
path = 'this/is/a/second/path/'
actual = multitrack._path_basedir(path)
expected = 'path'
self.assertEqual(actual, expected)
def test_basedir3(self):
path = 'this/is/a/path/with/an/ending/file.txt'
actual = multitrack._path_basedir(path)
expected = 'file.txt'
self.assertEqual(actual, expected)
class TestGetDictLeaves(unittest.TestCase):
def test_get_leaves(self):
test_dict = {
'a': ['z', 'y', 'x'],
'b': ['w', 't'],
'c': ['elephant'],
'd': {'asdf': ['z']},
'e': {'borg': ['foo']}
}
actual = multitrack.get_dict_leaves(test_dict)
expected = set(['z', 'y', 'x', 'w', 't', 'elephant', 'foo'])
self.assertEqual(actual, expected)
def test_get_leaves2(self):
mtrack = multitrack.MultiTrack('NightPanther_Fire')
test_dict = {
'a': mtrack,
'b': {1: mtrack, 2: mtrack},
'c': [mtrack],
'd': {'asdf': mtrack},
'e': {'borg': [mtrack]}
}
actual = multitrack.get_dict_leaves(test_dict)
expected = set([mtrack, mtrack, mtrack, mtrack, mtrack])
self.assertEqual(actual, expected)
class TestGetDuration(unittest.TestCase):
def test_get_duration(self):
actual = multitrack.get_duration(os.path.join(
os.path.dirname(__file__), 'data/short_audio.wav'))
expected = 4.0
self.assertEqual(actual, expected)
class TestReadAnnotationFile(unittest.TestCase):
def test_readpitch(self):
actual, header = multitrack.read_annotation_file(
os.path.join(os.path.dirname(__file__), 'data/pitch.csv')
)
expected = [
[0.023219954, 189.187],
[0.029024943, 191.782],
[0.034829931, 200.344]
]
self.assertEqual(actual, expected)
self.assertEqual(header, [])
def test_readmelody(self):
actual, header = multitrack.read_annotation_file(
os.path.join(os.path.dirname(__file__), 'data/melody.csv')
)
expected = [
[0.0, 0.0],
[0.0058049886621315194, 0.0],
[0.011609977324263039, 0.0],
[0.017414965986394557, 0.0],
[0.023219954648526078, 189.18700000000001]
]
self.assertEqual(actual, expected)
self.assertEqual(header, [])
def test_invalidpath(self):
actual, header = multitrack.read_annotation_file('blurb/blork/barg')
expected = None
self.assertEqual(actual, expected)
self.assertEqual(header, expected)
class TestGetValidInstrumentLabels(unittest.TestCase):
def setUp(self):
self.labels = multitrack.get_valid_instrument_labels()
test_taxonomy_fpath = os.path.join(
os.path.dirname(__file__), 'data/test_taxonomy.yaml')
with open(test_taxonomy_fpath, 'r') as fhandle:
self.test_taxonomy = yaml.load(fhandle)
def test_inclusion(self):
self.assertTrue('female singer' in self.labels)
def test_inclusion2(self):
self.assertTrue('erhu' in self.labels)
def test_exclusion(self):
self.assertFalse('squidward' in self.labels)
def test_alternate_taxonomy(self):
actual = multitrack.get_valid_instrument_labels(
taxonomy=self.test_taxonomy
)
expected = set([
'rick',
'morty',
'beth',
'summer',
'jerry',
'mrs pancakes',
'tiny rick',
'squanchy',
'traflorkians',
'unity'
])
self.assertEqual(actual, expected)
class TestIsValidInstrument(unittest.TestCase):
def test_valid_instrument(self):
actual = multitrack.is_valid_instrument('clarinet')
expected = True
self.assertEqual(actual, expected)
def test_invalid_instrument(self):
actual = multitrack.is_valid_instrument('Clarinet')
expected = False
self.assertEqual(actual, expected)
def test_invalid_instrument2(self):
actual = multitrack.is_valid_instrument('mayonnaise')
expected = False
self.assertEqual(actual, expected)
class TestGetDatasetVersion(unittest.TestCase):
def test_version_1(self):
actual = multitrack.get_dataset_version('MusicDelta_Beethoven')
expected = 'V1'
self.assertEqual(expected, actual)
def test_version_v2(self):
actual = multitrack.get_dataset_version("FennelCartwright_DearTessie")
expected = 'V2'
self.assertEqual(expected, actual)
def test_version_extra(self):
actual = multitrack.get_dataset_version("AHa_TakeOnMe")
expected = 'EXTRA'
self.assertEqual(expected, actual)
def test_version_bach10(self):
actual = multitrack.get_dataset_version("Bach10_05DieNacht")
expected = 'BACH10'
self.assertEqual(expected, actual)
def test_version_none(self):
actual = multitrack.get_dataset_version("ManateeCommune_Blueberry")
expected = ''
self.assertEqual(expected, actual)
|
|
from django.contrib.auth.views import redirect_to_login
from django.db.models.aggregates import Max
from django.http.response import HttpResponse, HttpResponseBadRequest, \
HttpResponseForbidden
from django.shortcuts import get_object_or_404, render, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.views.generic import ListView
from django.views.generic.base import View
from django.views.generic.detail import DetailView, SingleObjectMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from issues import models, forms
from issues.forms import CreateIssueForm, CreateProposalForm, EditProposalForm, \
UpdateIssueForm, EditProposalTaskForm, AddAttachmentForm, \
UpdateIssueAbstractForm, CreateProposalVoteArgumentForm
from issues.models import ProposalType, Issue, IssueStatus, ProposalVote, \
Proposal, ProposalVoteBoard, ProposalVoteValue, VoteResult, ProposalVoteArgument, ProposalVoteArgumentRanking
from meetings.models import Meeting
from oc_util.templatetags.opencommunity import minutes, board_voters_on_proposal
from ocd.base_views import AjaxFormView, json_response, CommitteeMixin
from ocd.validation import enhance_html
from ocd.base_managers import ConfidentialSearchQuerySet
from shultze_vote import send_issue_ranking
from acl.default_roles import DefaultGroups
from users.permissions import has_community_perm, has_committee_perm
from haystack.inputs import AutoQuery
import json
import mimetypes
from datetime import date
class IssueMixin(CommitteeMixin):
model = models.Issue
def get_queryset(self):
return self.model.objects.object_access_control(
user=self.request.user,
committee=self.committee).filter(committee=self.committee,
active=True)
class ProposalMixin(IssueMixin):
model = models.Proposal
def get_queryset(self):
return self.model.objects.object_access_control(
user=self.request.user,
committee=self.committee).filter(issue=self.issue,
active=True)
@property
def issue(self):
return get_object_or_404(models.Issue, committee=self.committee,
pk=self.kwargs['issue_id'])
def _can_complete_task(self):
o = self.get_object()
if self.request.user == o.assigned_to_user:
return True
return has_community_perm(self.request.user, self.committee.community,
'issues.edittask_proposal')
class IssueList(IssueMixin, ListView):
required_permission = 'viewopen_issue'
def get_queryset(self):
return super(IssueList, self).get_queryset().exclude(
status=IssueStatus.ARCHIVED).order_by('-created_at')
def get_context_data(self, **kwargs):
d = super(IssueList, self).get_context_data(**kwargs)
available_ids = set([x.id for x in self.get_queryset()])
if d['committee'].issue_ranking_enabled:
d['sorted_issues'] = super(IssueList, self).get_queryset().exclude(
status=IssueStatus.ARCHIVED).order_by('-order_by_votes')
if 'vote_ranking' in d['cperms'] and self.request.user.is_authenticated():
my_ranking = models.IssueRankingVote.objects.filter(
voted_by=self.request.user,
issue__committee_id=d['committee'].id) \
.order_by('rank')
d['my_vote'] = [i.issue for i in my_ranking if i.issue.active and \
i.issue.status != IssueStatus.ARCHIVED]
d['my_non_ranked'] = [i for i in self.get_queryset() \
if i not in d['my_vote']]
for obj in self.object_list:
obj.restricted_proposals = \
obj.proposals.object_access_control(
user=self.request.user, committee=self.committee)
for ai in obj.agenda_items.all():
ai.restricted_proposals = ai.proposals(
user=self.request.user, committee=self.committee)
ai.restricted_accepted_proposals = ai.accepted_proposals(
user=self.request.user, committee=self.committee)
return d
required_permission_for_post = 'vote_ranking'
def post(self, request, *args, **kwargs):
# TODO: check post permission for user and for each issue
send_issue_ranking(request)
return json_response({'res': 'ok', })
class IssueDetailView(IssueMixin, DetailView):
def get_required_permission(self):
o = self.get_object()
return 'viewclosed_issue' if o.is_published else 'viewopen_issue'
def get_context_data(self, **kwargs):
d = super(IssueDetailView, self).get_context_data(**kwargs)
m_id = self.request.GET.get('m_id', None)
d['form'] = forms.CreateIssueCommentForm()
d['proposal_form'] = forms.CreateProposalForm(committee=self.committee)
if m_id:
d['meeting'] = get_object_or_404(Meeting, id=m_id,
committee=self.committee)
a = d['meeting'].agenda.object_access_control(
user=self.request.user, committee=self.committee).all()
d['meeting_active_issues'] = [ai.issue for ai in a if
ai.issue.active]
else:
d['meeting'] = None
if self.request.GET.get('s', None) == '1':
d['all_issues'] = self.get_queryset().exclude(
status=IssueStatus.ARCHIVED).order_by('-created_at')
o = self.get_object()
if o.is_current and self.request.user in o.committee.upcoming_meeting_participants.all() and has_committee_perm(
self.request.user, self.committee, 'proposal_board_vote_self'):
d['can_board_vote_self'] = True
d['proposals'] = self.object.proposals.object_access_control(
user=self.request.user, committee=self.committee).open()
d['upcoming_issues'] = self.object.committee.upcoming_issues(
user=self.request.user, committee=self.committee)
d['agenda_items'] = self.object.agenda_items.all()
for ai in d['agenda_items']:
ai.accepted_proposals = ai.accepted_proposals(
user=self.request.user, committee=self.committee)
ai.rejected_proposals = ai.rejected_proposals(
user=self.request.user, committee=self.committee)
ai.proposals = ai.proposals(
user=self.request.user, committee=self.committee)
return d
required_permission_for_post = 'add_issuecomment'
def post(self, request, *args, **kwargs):
form = forms.CreateIssueCommentForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest()
i = self.get_object()
comment_id = request.POST.get('comment_id', None)
try:
c = i.comments.get(pk=int(comment_id))
c.content = enhance_html(form.cleaned_data['content'])
c.save()
return json_response({'comment_id': c.id})
except:
c = i.comments.create(content=enhance_html(form.cleaned_data['content']),
created_by=request.user)
return json_response({'comment_id': c.id})
# if comment_id == '':
# c = i.comments.create(content=enhance_html(form.cleaned_data['content']),
# created_by=request.user)
#
# self.object = i # this makes the next line work
# context = self.get_context_data(object=i, c=c)
# return render(request, 'issues/_comment.html', context)
# else:
# c = i.comments.get(pk=int(comment_id))
# c.content=enhance_html(form.cleaned_data['content'])
# return json_response({'comment_id': c.id})
class IssueCommentMixin(CommitteeMixin):
model = models.IssueComment
def get_required_permission(self):
o = self.get_object()
return 'editopen_issuecomment' if o.issue.is_upcoming else 'editclosed_issuecomment'
def get_queryset(self):
return models.IssueComment.objects.filter(issue__committee=self.committee)
class IssueCommentDeleteView(IssueCommentMixin, DeleteView):
def post(self, request, *args, **kwargs):
o = self.get_object()
o.active = 'undelete' in request.POST
o.save()
return HttpResponse(int(o.active))
class IssueCommentEditView(IssueCommentMixin, UpdateView):
form_class = forms.EditIssueCommentForm
def form_valid(self, form):
c = self.get_object()
c.update_content(form.instance.version, self.request.user,
form.cleaned_data['content'])
context = self.get_context_data(object=c.issue, c=c)
return render(self.request, 'issues/_comment.html', context)
def form_invalid(self, form):
return HttpResponse("")
def get_form_kwargs(self):
d = super(IssueCommentEditView, self).get_form_kwargs()
d['prefix'] = 'ic%d' % self.get_object().id
return d
class IssueCreateView(AjaxFormView, IssueMixin, CreateView):
form_class = CreateIssueForm
template_name = "issues/issue_create_form.html"
reload_on_success = True
def get_required_permission(self):
return 'editagenda_community' if self.upcoming else 'add_issue'
upcoming = False
def form_valid(self, form):
form.instance.committee = self.committee
form.instance.created_by = self.request.user
form.instance.status = IssueStatus.IN_UPCOMING_MEETING if \
self.upcoming else IssueStatus.OPEN
if self.upcoming:
max_upcoming = Issue.objects.filter(
committee=self.committee).aggregate(x=Max(
'order_in_upcoming_meeting'))['x']
form.instance.order_in_upcoming_meeting = max_upcoming + 1 \
if max_upcoming else 1
return super(IssueCreateView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(IssueCreateView, self).get_form_kwargs()
kwargs.update({'committee': self.committee})
return kwargs
def get_success_url(self):
url = super(IssueCreateView, self).get_success_url()
if not self.upcoming:
url += '?s=1'
return url
class IssueEditView(AjaxFormView, IssueMixin, UpdateView):
required_permission = 'editopen_issue'
form_class = UpdateIssueForm
reload_on_success = True
def form_valid(self, form):
if self.reload_on_success:
return super(IssueEditView, self).form_valid(form)
else:
self.object = form.save()
return render(self.request, 'issues/_issue_title.html',
self.get_context_data())
def get_form_kwargs(self):
kwargs = super(IssueEditView, self).get_form_kwargs()
kwargs.update({'committee': self.committee})
return kwargs
class IssueEditAbstractView(AjaxFormView, IssueMixin, UpdateView):
required_permission = 'editopen_issue'
form_class = UpdateIssueAbstractForm
def form_valid(self, form):
self.object = form.save()
return render(self.request, 'issues/_issue-abstract.html',
self.get_context_data())
class IssueCompleteView(IssueMixin, SingleObjectMixin, View):
required_permission = 'add_meeting'
def post(self, request, *args, **kwargs):
o = self.get_object()
# TODO: verify that issue is in active meeting
if request.POST.get('complete') == '1':
o.completed = True
elif request.POST.get('undo_complete') == '1':
o.completed = False
if o.status == IssueStatus.ARCHIVED:
o.status = o.statuses.OPEN
elif request.POST.get('archive') == '1':
# TODO: check if issue can be closed
o.completed = True
o.status = IssueStatus.ARCHIVED
o.save()
return HttpResponse("-")
class IssueSetLengthView(IssueMixin, SingleObjectMixin, View):
required_permission = 'editagenda_community'
def post(self, request, *args, **kwargs):
o = self.get_object()
s = request.POST.get('length', '').strip()
if s:
try:
t = int(s)
if not 0 <= t <= 360:
raise ValueError('Illegal Value')
except ValueError:
return HttpResponseBadRequest("Bad Request")
else:
t = None
o.length_in_minutes = t
o.save()
return HttpResponse(minutes(t) or "--:--")
class IssueDeleteView(AjaxFormView, IssueMixin, DeleteView):
def get_required_permission(self):
o = self.get_object()
if o.is_published:
return 'editclosed_issue'
return 'add_issue' if o.created_by == self.request.user else 'editopen_issue'
def get_success_url(self):
return "" if self.issue.active else "-"
def delete(self, request, *args, **kwargs):
o = self.get_object()
o.active = False
o.save()
o.active_proposals().update(active=False)
return HttpResponse("-")
class AttachmentCreateView(AjaxFormView, IssueMixin, CreateView):
model = models.IssueAttachment
form_class = AddAttachmentForm
required_permission = 'editopen_issue'
reload_on_success = True
@property
def issue(self):
return get_object_or_404(models.Issue, committee=self.committee, pk=self.kwargs['pk'])
def form_valid(self, form):
form.instance.created_by = self.request.user
form.instance.issue = self.issue
return super(AttachmentCreateView, self).form_valid(form)
class AttachmentDeleteView(AjaxFormView, CommitteeMixin, DeleteView):
model = models.IssueAttachment
required_permission = 'editopen_issue'
@property
def issue(self):
return get_object_or_404(models.Issue, pk=self.kwargs['issue_id'])
def delete(self, request, *args, **kwargs):
o = self.get_object()
o.file.delete(save=False)
o.delete()
return HttpResponse("")
class AttachmentDownloadView(CommitteeMixin, SingleObjectMixin, View):
model = models.IssueAttachment
def get_required_permission(self):
o = self.get_object().issue
return 'viewclosed_issue' if o.is_published else 'viewopen_issue'
def get(self, request, *args, **kwargs):
o = self.get_object()
filename = o.file.name.split('/')[-1]
mime_type = mimetypes.guess_type(filename, True)[0] or "text/plain"
response = HttpResponse(o.file, content_type=mime_type)
response['Content-Disposition'] = 'attachment; filename=%s' % filename.encode('utf-8')
return response
class ProposalCreateView(AjaxFormView, ProposalMixin, CreateView):
reload_on_success = True
def get_required_permission(self):
return 'editclosedproposal' if self.issue.status == IssueStatus.ARCHIVED else 'add_proposal'
form_class = CreateProposalForm
def get_context_data(self, **kwargs):
context = super(ProposalCreateView, self).get_context_data(**kwargs)
context['issue'] = self.issue
return context
def form_valid(self, form):
form.instance.created_by = self.request.user
form.instance.issue = self.issue
if self.reload_on_success:
return super(ProposalCreateView, self).form_valid(form)
else:
self.object = form.save()
return render(self.request, 'issues/_proposal.html',
self.get_context_data(proposal=self.object))
def get_success_url(self):
return self.issue.get_absolute_url()
def get_form_kwargs(self):
d = super(ProposalCreateView, self).get_form_kwargs()
d['prefix'] = 'proposal'
d['committee'] = self.committee
d['initial'] = {'issue': self.issue}
return d
class ProposalDetailView(ProposalMixin, DetailView):
def get_required_permission(self):
p = self.get_object()
return 'viewclosed_proposal' if p.decided_at_meeting else 'viewopen_proposal'
def get_required_permission_for_post(self):
p = self.get_object()
return 'acceptclosed_proposal' if p.decided_at_meeting else 'acceptopen_proposal'
def board_votes_dict(self):
total_votes = 0
votes_dict = {'sums': {}, 'total': total_votes, 'per_user': {}}
pro_count = 0
con_count = 0
neut_count = 0
# Board vote permission
board_attending = self.get_object().issue.committee.meeting_participants()
for u in board_attending:
# check u has perm for board vote
vote = ProposalVoteBoard.objects.filter(proposal=self.get_object(), user=u)
if vote.exists():
votes_dict['per_user'][u] = vote[0]
if vote[0].value == 1:
pro_count += 1
total_votes += 1
elif vote[0].value == -1:
con_count += 1
total_votes += 1
elif vote[0].value == 0:
neut_count += 1
else:
votes_dict['per_user'][u] = None
neut_count += 1
votes_dict['sums']['pro_count'] = pro_count
votes_dict['sums']['con_count'] = con_count
votes_dict['sums']['neut_count'] = neut_count
votes_dict['total'] = total_votes
return votes_dict
def _init_board_votes(self, board_attending):
p = self.get_object()
for b in board_attending:
ProposalVoteBoard.objects.create(proposal=p, user=b,
voted_by_chairman=True)
def get_context_data(self, **kwargs):
"""add meeting for the latest straw voting result
add 'previous_res' var if found previous registered results for this meeting
"""
context = super(ProposalDetailView, self).get_context_data(**kwargs)
m_id = self.request.GET.get('m_id', None)
o = self.get_object()
if m_id:
context['meeting_context'] = get_object_or_404(Meeting, id=m_id,
committee=self.committee)
participants = context['meeting_context'].participants.all()
else:
context['meeting_context'] = None
participants = o.issue.committee.upcoming_meeting_participants.all()
try:
group = self.request.user.memberships.get(committee=self.issue.committee).default_group_name
except:
group = ""
board_votes = ProposalVoteBoard.objects.filter(proposal=o)
board_attending = board_voters_on_proposal(o)
is_current = o.issue.is_current
context['res'] = o.get_straw_results()
results = VoteResult.objects.filter(proposal=o) \
.order_by('-meeting__held_at')
if o.issue.is_upcoming and \
self.committee.upcoming_meeting_is_published and \
self.committee.straw_vote_ended:
context['meeting'] = self.committee.draft_meeting()
else:
if results.count():
context['meeting'] = results[0].meeting
else:
context['meeting'] = None
if not board_votes.exists():
self._init_board_votes(board_attending)
show_to_member = group == DefaultGroups.MEMBER and o.decided_at_meeting
show_to_board = (group == DefaultGroups.BOARD or \
group == DefaultGroups.SECRETARY) and \
(is_current or o.decided_at_meeting)
show_to_chairman = group == DefaultGroups.CHAIRMAN and o.decided
show_board_vote_result = o.register_board_votes and \
board_votes.exclude(
value=ProposalVoteValue.NEUTRAL).count() and \
(show_to_member or show_to_board or show_to_chairman)
context['issue_frame'] = self.request.GET.get('s', None)
context['board_attending'] = board_attending
context['user_vote'] = o.board_vote_by_member(self.request.user.id)
context['show_board_vote_result'] = show_board_vote_result
context['chairman_can_vote'] = is_current and not o.decided
context['board_votes'] = self.board_votes_dict()
context['can_board_vote_self'] = is_current and not o.decided and has_committee_perm(self.request.user,
self.committee,
'proposal_board_vote_self')\
and self.request.user in board_attending
rel_proposals = self.object.issue.proposals
context['proposals'] = rel_proposals.object_access_control(
user=self.request.user, committee=self.committee)
return context
def post(self, request, *args, **kwargs):
""" Used to change a proposal status (accept/reject)
or a proposal's property completed/not completed
"""
p = self.get_object()
v = request.POST.get('accepted', None)
if v:
v = int(v)
if v not in [
p.statuses.ACCEPTED,
p.statuses.REJECTED,
p.statuses.IN_DISCUSSION
]:
return HttpResponseBadRequest("Bad value for accepted POST parameter")
p.status = v
p.save()
return redirect(p)
class ProposalEditView(AjaxFormView, ProposalMixin, UpdateView):
form_class = EditProposalForm
reload_on_success = True
def get_required_permission(self):
o = self.get_object()
return 'editclosed_proposal' if o.decided_at_meeting else 'edittask_proposal'
def get_form_kwargs(self):
d = super(ProposalEditView, self).get_form_kwargs()
d['prefix'] = 'proposal'
d['committee'] = self.committee
return d
class ProposalEditTaskView(ProposalMixin, UpdateView):
form_class = EditProposalTaskForm
def get_queryset(self):
return super(ProposalEditTaskView, self).get_queryset().filter(type=ProposalType.TASK)
def get_required_permission(self):
o = self.get_object()
return 'editclosed_proposal' if o.decided_at_meeting else 'editopen_proposal'
class ProposalCompletedTaskView(ProposalMixin, UpdateView):
""" update a task as completed / un-completed
"""
def post(self, request, *args, **kwargs):
if not self._can_complete_task():
return HttpResponseForbidden("403 Unauthorized")
p = self.get_object()
completed = request.POST.get('completed', None)
if completed:
p.task_completed = completed == '1'
p.save()
return redirect(p)
class ProposalDeleteView(AjaxFormView, ProposalMixin, DeleteView):
def get_required_permission(self):
o = self.get_object()
if o.decided_at_meeting:
return 'editclosed_issue'
return 'add_proposal' if o.created_by == self.request.user else 'editopen_proposal'
def get_success_url(self):
return "" if self.issue.active else "-"
def delete(self, request, *args, **kwargs):
o = self.get_object()
o.active = False
o.save()
return HttpResponse("-")
class VoteResultsView(CommitteeMixin, DetailView):
model = models.Proposal
def get(self, request, *args, **kwargs):
meeting = None
meeting_id = request.GET.get('meeting_id', None)
p = self.get_object()
if meeting_id:
meeting = get_object_or_404(Meeting, id=int(meeting_id))
res = p.get_straw_results(meeting.id)
else:
meeting = self.committee.draft_meeting()
res = p.get_straw_results()
panel = render_to_string('issues/_proposal_vote_results.html',
RequestContext(request, {
'meeting': meeting,
'res': res,
'proposal': p,
}))
return HttpResponse(panel)
class ProposalVoteMixin(CommitteeMixin):
VOTE_OK = 0
VOTE_VER_ERR = 1
VOTE_OVERRIDE_ERR = 2
def _do_vote(self, vote_class, proposal, user_id, value, is_board, voter_group):
if is_board:
# verify
if not voter_group or voter_group == DefaultGroups.MEMBER \
or proposal.decided:
return (None, self.VOTE_VER_ERR)
by_chairman = voter_group == DefaultGroups.CHAIRMAN
vote, created = vote_class.objects.get_or_create(proposal_id=proposal.id,
user_id=user_id)
if not created and by_chairman and not vote.voted_by_chairman:
# don't allow chairman vote override a board member existing vote!
return (vote, self.VOTE_OVERRIDE_ERR)
vote.value = value
if is_board:
vote.voted_by_chairman = by_chairman
vote.save()
return (vote, self.VOTE_OK)
def _vote_values_map(self, key):
vote_map = {
'pro': 1,
'con': -1,
'neut': 0,
'reset': -2,
}
if type(key) != int:
try:
return vote_map[key]
except KeyError:
return None
else:
for k, val in vote_map.items():
if key == val:
return k
return None
class ProposalVoteView(ProposalVoteMixin, DetailView):
required_permission_for_post = 'vote'
model = models.Proposal
def post(self, request, *args, **kwargs):
is_board = request.POST.get('board', False)
user_id = request.POST.get('user', request.user.id)
voter = request.user
voter_group = 'board' if has_committee_perm(voter, self.committee, 'proposal_board_vote') else ''
val = request.POST.get('val', None)
if is_board == '1':
# vote for board member by chairman or board member
vote_class = ProposalVoteBoard
else:
# straw vote by member
vote_class = ProposalVote
proposal = self.get_object()
pid = proposal.id
vote_panel_tpl = 'issues/_vote_panel.html' if val == 'reset' \
else 'issues/_vote_reset_panel.html'
res_panel_tpl = 'issues/_board_vote_res.html' if is_board \
else 'issues/_vote_reset_panel.html'
vote_response = {
'result': 'ok',
'html': render_to_string(res_panel_tpl,
{
'proposal': proposal,
'committee': self.committee,
'vote_status': val,
}),
}
value = ''
if val == 'reset':
vote = get_object_or_404(vote_class,
proposal_id=pid, user_id=user_id)
vote.delete()
related_arguments = ProposalVoteArgumentRanking.objects.filter(user=request.user,
argument__proposal_vote__proposal=proposal)
if related_arguments.count():
related_arguments.delete()
vote_response['html'] = render_to_string(vote_panel_tpl,
{
'proposal': proposal,
'committee': self.committee
})
return json_response(vote_response)
else:
value = self._vote_values_map(val)
if value == None:
return HttpResponseBadRequest('vote value not valid')
vote, valid = self._do_vote(vote_class, proposal, user_id, value, is_board, voter_group)
if valid == ProposalVoteMixin.VOTE_OK:
vote_response['html'] = render_to_string(res_panel_tpl,
{
'proposal': proposal,
'committee': self.committee,
'vote_status': val,
'user': self.request.user
})
if is_board and voter_group == DefaultGroups.CHAIRMAN:
vote_response['sum'] = render_to_string('issues/_member_vote_sum.html',
{
'proposal': proposal,
'committee': self.committee,
'board_attending': board_voters_on_proposal(proposal),
})
else:
vote_response['result'] = 'err'
if valid == ProposalVoteMixin.VOTE_OVERRIDE_ERR:
vote_response['override_fail'] = [{'uid': user_id,
'val': self._vote_values_map(vote.value),
}]
return json_response(vote_response)
def get(self, request, *args, **kwargs):
voter_id = request.user.id
if not request.user.is_authenticated():
return redirect_to_login(request.build_absolute_uri())
is_board = request.GET.get('board', False)
voter_group = DefaultGroups.MEMBER
val = request.GET['val']
vote_class = ProposalVote
proposal = self.get_object()
value = self._vote_values_map(val)
if value == None:
return redirect(proposal)
vote, valid = self._do_vote(vote_class, proposal, voter_id, value,
is_board, voter_group)
return redirect(proposal)
class MultiProposalVoteView(ProposalVoteMixin, DetailView):
required_permission_for_post = 'chairman_vote'
model = models.Proposal
def post(self, request, *args, **kwargs):
voted_ids = json.loads(request.POST['users'])
proposal = self.get_object()
pid = proposal.id
voter_group = request.user.get_default_group(self.committee.community) \
if request.user.is_authenticated() \
else ''
val = request.POST['val']
value = self._vote_values_map(val)
if value == None:
return HttpResponseBadRequest('vote value not valid')
vote_failed = []
for user_id in voted_ids:
vote, valid = self._do_vote(ProposalVoteBoard, proposal,
user_id, value, True, voter_group)
if valid == ProposalVoteMixin.VOTE_OVERRIDE_ERR:
vote_failed.append({'uid': user_id, 'val': self._vote_values_map(vote.value), })
return json_response({
'result': 'ok',
'html': render_to_string('issues/_vote_reset_panel.html',
{
'proposal': proposal,
'committee': self.committee,
}),
'override_fail': vote_failed,
'sum': render_to_string('issues/_member_vote_sum.html',
{
'proposal': proposal,
'committee': self.committee,
'board_attending': board_voters_on_proposal(proposal),
})
})
class RankingVoteMixin(ProposalVoteMixin):
VOTE_OK = 0
VOTE_VER_ERR = 1
def _do_vote(self, vote_class, argument, user_id, value):
try:
vote = vote_class.objects.get(argument_id=argument.id,
user_id=user_id)
if argument.proposal_vote.value != ProposalVote.objects.get(user__id=user_id,
proposal=argument.proposal_vote.proposal).value:
return vote, self.VOTE_VER_ERR
if vote.value == value:
vote.delete()
else:
vote.value = value
vote.save()
except vote_class.DoesNotExist:
vote = vote_class.objects.create(argument_id=argument.id,
user_id=user_id,
value=value)
except vote_class.MultipleObjectsReturned:
# Should not happen
raise
except ProposalVote.DoesNotExist:
# Should not happen
raise
return vote, self.VOTE_OK
def _vote_values_map(self, key):
vote_map = {
'up': 1,
'down': -1,
}
if type(key) != int:
try:
return vote_map[key]
except KeyError:
return None
else:
for k, val in vote_map.items():
if key == val:
return k
return None
class ArgumentRankingVoteView(RankingVoteMixin, DetailView):
required_permission_for_post = 'vote'
model = models.ProposalVoteArgument
def post(self, request, *args, **kwargs):
user_id = request.POST.get('user', request.user.id)
val = request.POST['val']
vote_class = ProposalVoteArgumentRanking
argument = self.get_object()
vote_response = {
'result': 'ok',
}
value = self._vote_values_map(val)
if not value:
return HttpResponseBadRequest('vote value not valid')
vote, valid = self._do_vote(vote_class, argument, user_id, value)
if valid != RankingVoteMixin.VOTE_OK:
vote_response['result'] = 'err'
return HttpResponse(argument.argument_score)
def up_down_vote(request, committee_id, arg_id):
if request.method != "POST":
raise Exception("Must be POST")
argument = models.ProposalVoteArgument.objects.get(pk=arg_id)
val = request.POST['val']
value = 1 if val == 'up' else -1
try:
vote = models.ProposalVoteArgumentRanking.objects.get(argument=argument, user=request.user)
if vote.value == value:
vote.delete()
else:
vote.value = value
vote.save()
except ProposalVoteArgumentRanking.DoesNotExist:
obj = models.ProposalVoteArgumentRanking(argument=argument, user=request.user, value=value)
obj.save()
up_votes = ProposalVoteArgumentRanking.objects.filter(argument=argument, value=1).count()
down_votes = ProposalVoteArgumentRanking.objects.filter(argument=argument, value=-1).count()
total_votes = up_votes - down_votes
return HttpResponse(total_votes)
class ProposalVoteArgumentCreateView(CreateView):
model = models.ProposalVoteArgument
form_class = CreateProposalVoteArgumentForm
fields = ['argument', 'proposal_vote', 'created_by']
template_name = 'issues/proposal_vote_argument_form.html'
def get_success_url(self):
return ""
# def form_valid(self, form):
# form.instance.proposal_vote = ProposalVote.objects.get(pk=self.kwargs['vote_id'])
# form.instance.created_by = self.request.user
# return super(ProposalVoteArgumentCreateView, self).form_valid(form)
#
# def form_invalid(self, form):
# return HttpResponse("000")
def post(self, request, *args, **kwargs):
form = forms.CreateProposalVoteArgumentForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest()
proposal_vote = ProposalVote.objects.get(pk=self.kwargs['vote_id'])
a = ProposalVoteArgument.objects.create(argument=form.cleaned_data['argument'],
created_by=request.user, proposal_vote=proposal_vote)
self.object = a
context = self.get_context_data(arg=a, proposal=proposal_vote.proposal)
if proposal_vote.value == 1:
return render(request, 'issues/_pro_argument.html', context)
else:
return render(request, 'issues/_con_argument.html', context)
class ProposalMoreArgumentsView(DetailView):
model = models.Proposal
template_name = 'issues/_more_arguments_box.html'
def get_context_data(self, **kwargs):
d = super(ProposalMoreArgumentsView, self).get_context_data(**kwargs)
d['proposal'] = self.get_object()
d['user'] = self.request.user
return d
class ProposalArgumentsView(DetailView):
model = models.Proposal
template_name = 'issues/_vote_arguments.html'
context_object_name = 'proposal'
def get_context_data(self, **kwargs):
context = super(ProposalArgumentsView, self).get_context_data(**kwargs)
context['proposal'] = self.get_object()
context['user'] = self.request.user
return context
class ProposalVoteArgumentUpdateView(UpdateView):
model = models.ProposalVoteArgument
fields = ['argument', ]
def post(self, request, *args, **kwargs):
a = self.get_object()
if request.POST.get('argument', None):
a.argument = request.POST.get('argument')
a.save()
return HttpResponse(a.argument)
else:
return HttpResponse("")
class ProposalVoteArgumentDeleteView(DeleteView):
model = models.ProposalVoteArgument
success_url = ""
def post(self, request, *args, **kwargs):
o = self.get_object()
arg_id = o.id
o.delete()
return HttpResponse(arg_id)
def get_argument_value(request, committee_id, arg_id):
""" Return the value of the argument for editing """
arg_value = models.ProposalVoteArgument.objects.get(pk=arg_id)
return HttpResponse(arg_value.argument)
class ChangeBoardVoteStatusView(ProposalMixin, UpdateView):
required_permission_for_post = 'chairman_vote'
model = models.Proposal
def post(self, request, *args, **kwargs):
p = self.get_object()
if request.POST.get('val', None):
p.register_board_votes = request.POST.get('val') == '1'
p.save()
return json_response({'result': 'ok'})
else:
return json_response({'result': 'err'})
class AssignmentsView(ProposalMixin, ListView):
required_permission = 'viewopen_issue'
template_name = 'issues/assignment_list.html'
paginate_by = 75
def __init__(self, **kwargs):
super(AssignmentsView, self).__init__(**kwargs)
self.status = ''
def _get_order(self):
order_by = self.request.GET.get('ord', 'date')
if order_by == 'date':
order_by = '-due_by'
return order_by
def _add_status_qs(self, sqs):
self.status = self.request.GET.get('status', '')
if self.status:
if self.status == 'completed':
sqs = sqs.filter(task_completed=True)
else:
sqs = sqs.filter(task_completed=False)
if self.status == 'opened':
sqs = sqs.exclude(due_by__lt=date.today())
elif self.status == 'late':
sqs = sqs.filter(due_by__lt=date.today())
return sqs
def get_queryset(self):
term = self.request.GET.get('q', '').strip()
sqs = ConfidentialSearchQuerySet().models(Proposal).object_access_control(
user=self.request.user, committee=self.committee).filter(
active=True, committee=self.committee.id,
status=Proposal.statuses.ACCEPTED,
type=ProposalType.TASK).order_by(self._get_order())
sqs = self._add_status_qs(sqs)
if term:
sqs = sqs.filter(content=AutoQuery(term)) \
.filter_or(assignee__contains=term)
return sqs.load_all()
def get_context_data(self, **kwargs):
d = super(AssignmentsView, self).get_context_data(**kwargs)
search_query = self.request.GET.get('q', '').strip()
d['late'] = [p.id for p in list(self.get_queryset()) \
if not p.object.task_completed and p.due_by \
and p.due_by.date() < date.today()]
d['query'] = search_query
d['ord'] = self._get_order()
d['status'] = self.status
d['filter_as_link'] = d['is_paginated'] or d['status']
d['extra_arg'] = '&ord=' + d['ord'] + '&q=' + d['query'] + '&status=' + self.status
return d
class RulesMixin(CommitteeMixin):
def _get_rule_queryset(self):
qs = Proposal.objects.object_access_control(user=self.request.user,
committee=self.committee).filter(
active=True, issue__committee=self.committee,
status=Proposal.statuses.ACCEPTED,
type=ProposalType.RULE)
return qs
class ProceduresView(RulesMixin, ProposalMixin, ListView):
required_permission = 'viewopen_issue'
template_name = 'issues/procedure_list.html'
context_object_name = 'procedure_list'
paginate_by = 75
def __init__(self, **kwargs):
self.order_by = 'date'
super(ProceduresView, self).__init__(**kwargs)
def get_queryset(self):
term = self.request.GET.get('q', '').strip()
if not term:
# try search by tag
term = self.request.GET.get('t', '').strip()
self.order_by = self.request.GET.get('ord', 'date')
ord_term = '-decided_at' if self.order_by == 'date' else 'title'
sqs = ConfidentialSearchQuerySet().object_access_control(
user=self.request.user, committee=self.committee).filter(
active=True, committee=self.committee.id,
status=Proposal.statuses.ACCEPTED,
type=ProposalType.RULE).order_by(ord_term)
if term:
sqs = sqs.filter(content=AutoQuery(term))
return sqs.load_all()
def get_context_data(self, **kwargs):
def _sort_by_popularity(a, b):
return cmp(a[1], b[1])
d = super(ProceduresView, self).get_context_data(**kwargs)
alltags = {}
for p in self._get_rule_queryset():
for t in p.tags.names():
n = alltags.setdefault(t, 0)
alltags[t] = n + 1
sorted_tags = sorted(alltags.items(), _sort_by_popularity, reverse=True)
search_query = self.request.GET.get('q', '').strip()
tag_query = self.request.GET.get('t', '').strip()
d['sorted_tags'] = sorted_tags
d['query'] = search_query or tag_query
d['extra_arg'] = '&ord=' + self.order_by + '&q=' + d['query']
d['ord'] = self.order_by
d['active_tag'] = tag_query
d['tags_as_links'] = (not search_query and d['is_paginated']) or len(d['object_list']) == 0
return d
class AutoCompleteTagView(CommitteeMixin, View):
required_permission = 'editopen_issue'
def get(self, request, *args, **kwargs):
tag = request.GET.get('tag', '')
tag = tag.split(',')[-1].strip()
print 'T: ', tag
if not tag:
return HttpResponse(json.dumps([]))
json_tags = []
tags = set()
proposals = Proposal.objects.filter(
active=True, issue__committee=self.committee,
type=ProposalType.RULE)
for p in proposals:
tags.update(t for t in p.tags.names() if t.startswith(tag))
for t in tags:
json_tags.append({'tokens': [t, ], 'value': t})
# context = self.get_context_data(object_list=proposals)
return HttpResponse(json.dumps(json_tags), {'content_type': 'application/json'})
|
|
import random
import string
from django.core.mail import EmailMessage
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from core.models import Event, User
from .utils import DEFAULT_QUESTIONS
QUESTION_TYPES = (
('paragraph', 'Paragraph'),
('text', 'Long text'),
('choices', 'Choices'),
('email', 'Email')
)
APPLICATION_STATES = (
('submitted', 'Application submitted'),
('accepted', 'Application accepted'),
('rejected', 'Application rejected'),
('waitlisted', 'Application on waiting list'),
('declined', 'Applicant declined'),
)
RSVP_WAITING = 'waiting'
RSVP_YES = 'yes'
RSVP_NO = 'no'
RSVP_STATUSES = (
(RSVP_WAITING, 'RSVP: Waiting for response'),
(RSVP_YES, 'RSVP: Confirmed attendance'),
(RSVP_NO, 'RSVP: Rejected invitation')
)
RSVP_LINKS = ['[rsvp-url-yes]', '[rsvp-url-no]']
@python_2_unicode_compatible
class Form(models.Model):
event = models.OneToOneField(Event, null=False)
text_header = models.CharField(
max_length=255, default="Apply for a spot at Django Girls [City]!")
text_description = models.TextField(
default="Yay! We're so excited you want to be a part of our "
"workshop. Please mind that filling out the form below does "
"not give you a place on the workshop, but a chance to get "
"one. The application process is open from {INSERT DATE} "
"until {INSERT DATE}. If you're curious about the criteria "
"we use to choose applicants, you can read about it on "
"<a href='http://blog.djangogirls.org/post/91067112853/"
"djangogirls-how-we-scored-applications'>Django Girls "
"blog</a>. Good luck!")
confirmation_mail = models.TextField(
default="Hi there!"
"This is a confirmation of your application to <a href=\"http://djangogirls.org/{city}\">Django Girls {CITY}</a>. "
"Yay! That's a huge step already, we're proud of you!\n\n"
"Mind that this is not a confirmation of participation in the event, but a confirmation that we received your application.\n\n"
"You'll receive an email from the team that organizes Django Girls {CITY} soon. "
"You can always reach them by answering to this email or by writing to {your event mail}.\n"
"For your reference, we're attaching your answers below.\n\n"
"Hugs, cupcakes and high-fives!\n"
"Django Girls",
help_text="Mail will be sent from your event mail.\nAlso the answers will be attached.")
open_from = models.DateTimeField(
null=True, verbose_name="Application process is open from")
open_until = models.DateTimeField(
null=True, verbose_name="Application process is open until")
def __str__(self):
return 'Application form for {}'.format(self.event.name)
def save(self, *args, **kwargs):
is_form_new = False if self.pk else True
super(Form, self).save(*args, **kwargs)
if is_form_new:
self.create_default_questions()
def create_default_questions(self):
i = 1
for question in DEFAULT_QUESTIONS:
question['form'] = self
question['order'] = i
Question.objects.create(**question)
i += 1
@property
def number_of_applications(self):
return self.application_set.count()
@property
def application_open(self):
if self.open_from and self.open_until:
return (self.open_from < timezone.now() < self.open_until)
return True
@python_2_unicode_compatible
class Question(models.Model):
form = models.ForeignKey(Form, null=False, blank=False)
title = models.TextField(verbose_name="Question")
help_text = models.TextField(
blank=True, default='', verbose_name="Additional help text to the question?")
question_type = models.CharField(
max_length=50,
choices=QUESTION_TYPES, verbose_name="Type of the question")
is_required = models.BooleanField(
default=True, verbose_name="Is the answer to the question required?")
choices = models.TextField(
blank=True, default='', verbose_name="List all available options, separated with semicolon (;)",
help_text="Used only with 'Choices' question type")
is_multiple_choice = models.BooleanField(
default=False, verbose_name="Are there multiple choices allowed?",
help_text="Used only with 'Choices' question type")
order = models.PositiveIntegerField(
null=False, blank=False, help_text="Position of the question")
class Meta:
ordering = ['order']
def __str__(self):
return self.title
def get_choices_as_list(self):
if self.question_type != 'choices':
raise TypeError(
"You can only get choices for fields that have"
" question_type == choices."
)
return self.choices.split(';')
@python_2_unicode_compatible
class Application(models.Model):
form = models.ForeignKey(Form, null=False, blank=False)
number = models.PositiveIntegerField(default=1, blank=True)
created = models.DateTimeField(auto_now_add=True)
state = models.CharField(
max_length=50,
choices=APPLICATION_STATES, verbose_name="State of the application",
null=True,
default='submitted'
)
email = models.EmailField(null=True, blank=True)
newsletter_optin = models.BooleanField(default=False)
rsvp_status = models.CharField(
max_length=50,
choices=RSVP_STATUSES, verbose_name="RSVP status",
default=RSVP_WAITING
)
rsvp_yes_code = models.CharField(max_length=24, null=True, blank=True)
rsvp_no_code = models.CharField(max_length=24, null=True, blank=True)
class Meta:
unique_together = ("form", "email")
def save(self, *args, **kwargs):
if self.pk is None:
current_max = Application.objects.filter(form=self.form).aggregate(models.Max('number'))['number__max']
self.number = (current_max or 0) + 1
super(Application, self).save(*args, **kwargs)
@property
def average_score(self):
"""
Return the average score for this Application.
"""
scores = [s.score for s in self.scores.all() if (s.score and s.score > 0)]
if not scores:
return 0
else:
return sum(scores) / float(len(scores))
def variance(self):
data = [s.score for s in self.scores.all() if s.score]
n = len(data)
if n == 0:
return 0
c = sum(data) / float(len(data))
if n < 2:
return 0
ss = sum((x-c)**2 for x in data)
ss -= sum((x-c) for x in data)**2/len(data)
assert not ss < 0, 'negative sum of square deviations: %f' % ss
return ss / (n-1)
def stdev(self):
return self.variance() ** 0.5
def generate_code(self):
return ''.join([random.choice(string.ascii_letters + string.digits) for i in range(24)])
def get_rsvp_yes_code(self):
if not self.rsvp_yes_code:
self.rsvp_yes_code = self.generate_code()
self.save()
return self.rsvp_yes_code
def get_rsvp_no_code(self):
if not self.rsvp_no_code:
self.rsvp_no_code = self.generate_code()
self.save()
return self.rsvp_no_code
@classmethod
def get_by_rsvp_code(self, code, event):
""" Returns application and RSVP status or None """
try:
application = self.objects.get(rsvp_yes_code=code, form__event=event)
return application, RSVP_YES
except self.DoesNotExist:
try:
application = self.objects.get(rsvp_no_code=code, form__event=event)
return application, RSVP_NO
except self.DoesNotExist:
return None, None
return None, None
@property
def is_accepted(self):
return self.state == 'accepted'
def is_scored_by_user(self, user):
"""
Returns true if the given user has scored this application
or false if they have not, or there is a zero score.
"""
return self.scores.filter(user=user, score__gt=0).exists()
def __str__(self):
return str(self.pk)
class Answer(models.Model):
application = models.ForeignKey(Application, null=False, blank=False)
question = models.ForeignKey(Question, null=False, blank=False)
answer = models.TextField()
class Meta:
ordering = ('question__order',)
class Score(models.Model):
"""
A score represents the score given by a coach for an application.
"""
user = models.ForeignKey(User, related_name='scores')
application = models.ForeignKey(Application, related_name='scores')
score = models.FloatField(
help_text='5 being the most positive, 1 being the most negative.',
validators=[MaxValueValidator(5), MinValueValidator(0)],
default=0
)
comment = models.TextField(
null=True, blank=True, help_text='Any extra comments?')
class Meta:
unique_together = ('user', 'application',)
@python_2_unicode_compatible
class Email(models.Model):
form = models.ForeignKey(Form)
author = models.ForeignKey(User, related_name="author")
subject = models.CharField(max_length=255)
text = models.TextField(
verbose_name="Content of the email",
help_text="You can use HTML syntax in this message. Preview on the right."
)
recipients_group = models.CharField(
max_length=50, choices=APPLICATION_STATES+RSVP_STATUSES,
verbose_name="Recipients",
help_text="Only people assigned to chosen group will receive this email."
)
number_of_recipients = models.IntegerField(default=0, null=True)
successfuly_sent = models.TextField(null=True, blank=True)
failed_to_sent = models.TextField(null=True, blank=True)
sent_from = models.EmailField()
created = models.DateTimeField(auto_now_add=True)
sent = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.subject
def get_rsvp_link(self, code):
return 'http://djangogirls.org/{}/rsvp/{}'.format(self.form.event.page_url, code)
def add_rsvp_links(self, body, application):
body = body.replace('[rsvp-url-yes]', self.get_rsvp_link(application.get_rsvp_yes_code()))
body = body.replace('[rsvp-url-no]', self.get_rsvp_link(application.get_rsvp_no_code()))
return body
def get_applications(self):
application_states = [x[0] for x in APPLICATION_STATES]
rsvp_statuses = [x[0] for x in RSVP_STATUSES]
if self.recipients_group in application_states:
return Application.objects.filter(form=self.form, state=self.recipients_group)
elif self.recipients_group in rsvp_statuses:
return Application.objects.filter(form=self.form, state='accepted', rsvp_status=self.recipients_group)
else:
return Application.objects.none()
def send(self):
recipients = self.get_applications()
self.number_of_recipients = recipients.count()
self.sent_from = (
self.form.event.email or
'{}@djangogirls.org'.format(self.form.event.page_url))
successfuly_sent = []
failed_to_sent = []
for recipient in recipients:
if recipient.email:
body = self.text.replace('\n', '<br />')
for rsvp_link in RSVP_LINKS:
if rsvp_link in body:
body = self.add_rsvp_links(body, recipient)
break
msg = EmailMessage(self.subject, body, self.sent_from, [recipient.email])
msg.content_subtype = "html"
try:
msg.send()
successfuly_sent.append(recipient.email)
except:
failed_to_sent.append(recipient.email)
self.sent = timezone.now()
self.successfuly_sent = ', '.join(successfuly_sent)
self.failed_to_sent = ', '.join(failed_to_sent)
self.save()
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
FromHex,
ToHex,
hash256,
hex_str_to_bytes,
ser_uint256,
sha256,
uint256_from_str,
)
from .script import (
CScript,
CScriptNum,
CScriptOp,
OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
hash160,
)
from .util import assert_equal
from io import BytesIO
MAX_BLOCK_SIGOPS = 20000
# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1515840634
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def create_block(hashprev, coinbase, ntime=None, *, version=1):
"""Create a block (with regtest difficulty)."""
block = CBlock()
block.nVersion = version
if ntime is None:
import time
block.nTime = int(time.time() + 600)
else:
block.nTime = ntime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def script_BIP34_coinbase_height(height):
if height <= 16:
res = CScriptOp.encode_op_n(height)
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
return CScript([res, OP_1])
return CScript([CScriptNum(height)])
def create_coinbase(height, pubkey=None):
"""Create a coinbase transaction, assuming no miner fees.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output."""
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 80 * COIN
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey is not None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert n < len(prevtx.vout)
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx)))
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
rawtx = node.createrawtransaction(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-witness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert "errors" not in signed or len(["errors"]) == 0
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import glob
import os
import re
import sys
import tarfile
import tempfile
import time
from contextlib import closing
from optparse import OptionParser
from xml.dom import minidom
os.environ["PATH"] += os.pathsep + "/var/lib/ambari-agent"
sys.path.append("/usr/lib/ambari-server/lib")
from ambari_server.serverClassPath import JDBC_DRIVER_PATH_PROPERTY
from ambari_server.serverConfiguration import get_value_from_properties, get_ambari_properties
from resource_management.core import File
from resource_management.core import shell
from resource_management.core.environment import Environment
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import Execute
from resource_management.core.source import StaticFile
from resource_management.libraries import ConfigDictionary
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.oozie_prepare_war import prepare_war
from resource_management.libraries.resources.hdfs_resource import HdfsResource
DEFAULT_SQL_DRIVER_PATH = "/var/lib/ambari-server/resources/sqljdbc41.jar"
with Environment() as env:
def get_stack_version():
if not options.hdp_version:
# Ubuntu returns: "stdin: is not a tty", as subprocess32 output.
tmpfile = tempfile.NamedTemporaryFile()
out = None
with open(tmpfile.name, 'r+') as file:
get_stack_version_cmd = '/usr/bin/hdp-select status %s > %s' % ('hadoop-mapreduce-historyserver', tmpfile.name)
code, stdoutdata = shell.call(get_stack_version_cmd)
out = file.read()
pass
if code != 0 or out is None:
Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
(get_stack_version_cmd, str(code), str(out)))
return 1
matches = re.findall(r"([\d\.]+\-\d+)", out)
stack_version = matches[0] if matches and len(matches) > 0 else None
if not stack_version:
Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
return 1
else:
stack_version = options.hdp_version
return stack_version
parser = OptionParser()
parser.add_option("-d", "--database-driver", dest="sql_driver_path",
default=get_value_from_properties(get_ambari_properties(), JDBC_DRIVER_PATH_PROPERTY, DEFAULT_SQL_DRIVER_PATH),
help="Path to JDBC driver")
parser.add_option("-f", "--fs-type", dest="fs_type", default="wasb",
help="Expected protocol of fs.defaultFS")
parser.add_option("-v", "--hdp-version", dest="hdp_version", default="",
help="hdp-version used in path of tarballs")
parser.add_option("-u", "--upgrade", dest="upgrade", action="store_true",
help="flag to indicate script is being run for upgrade", default=False)
(options, args) = parser.parse_args()
if not os.path.exists(options.sql_driver_path):
Logger.error("SQL driver file {} does not exist".format(options.sql_driver_path))
if os.path.exists(DEFAULT_SQL_DRIVER_PATH):
Logger.warning("Fallback to SQL driver {}".format(DEFAULT_SQL_DRIVER_PATH))
options.sql_driver_path = DEFAULT_SQL_DRIVER_PATH
else:
sys.exit(1)
Logger.info("Using SQL driver from {}".format(options.sql_driver_path))
sql_driver_filename = os.path.basename(options.sql_driver_path)
# See if hdfs path prefix is provided on the command line. If yes, use that value, if no
# use empty string as default.
hdfs_path_prefix = ""
if len(args) > 0:
hdfs_path_prefix = args[0]
stack_version = get_stack_version()
def getPropertyValueFromConfigXMLFile(xmlfile, name, defaultValue=None):
xmldoc = minidom.parse(xmlfile)
propNodes = [node.parentNode for node in xmldoc.getElementsByTagName("name") if node.childNodes[0].nodeValue == name]
if len(propNodes) > 0:
for node in propNodes[-1].childNodes:
if node.nodeName == "value":
if len(node.childNodes) > 0:
return node.childNodes[0].nodeValue
else:
return defaultValue
return defaultValue
def get_fs_root(fsdefaultName=None):
fsdefaultName = "fake"
expected_fs_protocol = options.fs_type + '://'
while True:
fsdefaultName = getPropertyValueFromConfigXMLFile("/etc/hadoop/conf/core-site.xml", "fs.defaultFS")
if fsdefaultName and fsdefaultName.startswith(expected_fs_protocol):
break
Logger.info("Waiting to read appropriate value of fs.defaultFS from /etc/hadoop/conf/core-site.xml ...")
time.sleep(10)
Logger.info("Returning fs.defaultFS -> " + fsdefaultName)
return fsdefaultName
# These values must be the suffix of the properties in cluster-env.xml
TAR_SOURCE_SUFFIX = "_tar_source"
TAR_DESTINATION_FOLDER_SUFFIX = "_tar_destination_folder"
class params:
hdfs_path_prefix = hdfs_path_prefix
hdfs_user = "hdfs"
mapred_user ="mapred"
hadoop_bin_dir="/usr/hdp/" + stack_version + "/hadoop/bin"
hadoop_conf_dir = "/etc/hadoop/conf"
user_group = "hadoop"
security_enabled = False
oozie_user = "oozie"
execute_path = "/usr/hdp/" + stack_version + "/hadoop/bin"
ambari_libs_dir = "/var/lib/ambari-agent/lib"
hdfs_site = ConfigDictionary({'dfs.webhdfs.enabled':False,
})
fs_default = get_fs_root()
slider_home_dir = '/usr/hdp/' + stack_version + '/slider'
slider_lib_dir = slider_home_dir + '/lib'
slider_tarball = slider_lib_dir + "/slider.tar.gz"
oozie_secure = ''
oozie_home="/usr/hdp/" + stack_version + "/oozie"
oozie_setup_sh=format("/usr/hdp/" + stack_version + "/oozie/bin/oozie-setup.sh")
oozie_setup_sh_current="/usr/hdp/current/oozie-server/bin/oozie-setup.sh"
oozie_tmp_dir = "/var/tmp/oozie"
oozie_libext_dir = "/usr/hdp/" + stack_version + "/oozie/libext"
oozie_env_sh_template = \
'''
#!/bin/bash
export OOZIE_CONFIG=${{OOZIE_CONFIG:-/usr/hdp/{0}/oozie/conf}}
export OOZIE_DATA=${{OOZIE_DATA:-/var/lib/oozie/data}}
export OOZIE_LOG=${{OOZIE_LOG:-/var/log/oozie}}
export CATALINA_BASE=${{CATALINA_BASE:-/usr/hdp/{0}/oozie/oozie-server}}
export CATALINA_TMPDIR=${{CATALINA_TMPDIR:-/var/tmp/oozie}}
export CATALINA_PID=${{CATALINA_PID:-/var/run/oozie/oozie.pid}}
export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
'''.format(stack_version)
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
security_enabled = False,
keytab = None,
kinit_path_local = None,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = None,
hdfs_site = hdfs_site,
default_fs = fs_default,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
)
def _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed):
"""
:param source_and_dest_pairs: List of tuples (x, y), where x is the source file in the local file system,
and y is the destination file path in HDFS
:param file_owner: Owner to set for the file copied to HDFS (typically hdfs account)
:param group_owner: Owning group to set for the file copied to HDFS (typically hadoop group)
:param kinit_if_needed: kinit command if it is needed, otherwise an empty string
:return: Returns 0 if at least one file was copied and no exceptions occurred, and 1 otherwise.
Must kinit before calling this function.
"""
for (source, destination) in source_and_dest_pairs:
params.HdfsResource(destination,
action="create_on_execute",
type = 'file',
mode=0444,
owner=file_owner,
group=group_owner,
source=source,
)
def copy_tarballs_to_hdfs(source, dest, stack_select_component_name, component_user, file_owner, group_owner):
"""
:param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
:param stack_select_component_name: Component name to get the status to determine the version
:param component_user: User that will execute the Hadoop commands
:param file_owner: Owner of the files copied to HDFS (typically hdfs account)
:param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
:return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
In order to call this function, params.py must have all of the following,
stack_version_formatted, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
"""
component_tar_source_file, component_tar_destination_folder = source, dest
if not os.path.exists(component_tar_source_file):
Logger.warning("Could not find file: %s" % str(component_tar_source_file))
return 1
file_name = os.path.basename(component_tar_source_file)
destination_file = os.path.join(component_tar_destination_folder, file_name)
destination_file = destination_file.replace("{{ stack_version_formatted }}", stack_version)
kinit_if_needed = ""
if params.security_enabled:
kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
if kinit_if_needed:
Execute(kinit_if_needed,
user=component_user,
path='/bin'
)
source_and_dest_pairs = [(component_tar_source_file, destination_file), ]
return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
def createHdfsResources():
Logger.info("Creating hdfs directories...")
params.HdfsResource(format('{hdfs_path_prefix}/atshistory'), user='hdfs', change_permissions_for_parents=True, owner='yarn', group='hadoop', type='directory', action= ['create_on_execute'], mode=0755)
params.HdfsResource(format('{hdfs_path_prefix}/user/hcat'), owner='hcat', type='directory', action=['create_on_execute'], mode=0755)
params.HdfsResource(format('{hdfs_path_prefix}/hive/warehouse'), owner='hive', type='directory', action=['create_on_execute'], mode=0777)
params.HdfsResource(format('{hdfs_path_prefix}/user/hive'), owner='hive', type='directory', action=['create_on_execute'], mode=0755)
params.HdfsResource(format('{hdfs_path_prefix}/tmp'), mode=0777, action=['create_on_execute'], type='directory', owner='hdfs')
params.HdfsResource(format('{hdfs_path_prefix}/user/ambari-qa'), type='directory', action=['create_on_execute'], mode=0770)
params.HdfsResource(format('{hdfs_path_prefix}/user/oozie'), owner='oozie', type='directory', action=['create_on_execute'], mode=0775)
params.HdfsResource(format('{hdfs_path_prefix}/app-logs'), recursive_chmod=True, owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
params.HdfsResource(format('{hdfs_path_prefix}/tmp/entity-file-history/active'), owner='yarn', group='hadoop', type='directory', action=['create_on_execute'])
params.HdfsResource(format('{hdfs_path_prefix}/mapred'), owner='mapred', type='directory', action=['create_on_execute'])
params.HdfsResource(format('{hdfs_path_prefix}/mapred/system'), owner='hdfs', type='directory', action=['create_on_execute'])
params.HdfsResource(format('{hdfs_path_prefix}/mr-history/done'), change_permissions_for_parents=True, owner='mapred', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
params.HdfsResource(format('{hdfs_path_prefix}/atshistory/done'), owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=0700)
params.HdfsResource(format('{hdfs_path_prefix}/atshistory/active'), owner='yarn', group='hadoop', type='directory', action=['create_on_execute'], mode=01777)
params.HdfsResource(format('{hdfs_path_prefix}/ams/hbase'), owner='ams', type='directory', action=['create_on_execute'], mode=0775)
params.HdfsResource(format('{hdfs_path_prefix}/amshbase/staging'), owner='ams', type='directory', action=['create_on_execute'], mode=0711)
params.HdfsResource(format('{hdfs_path_prefix}/user/ams/hbase'), owner='ams', type='directory', action=['create_on_execute'], mode=0775)
params.HdfsResource(format('{hdfs_path_prefix}/hdp'), owner='hdfs', type='directory', action=['create_on_execute'], mode=0755)
params.HdfsResource(format('{hdfs_path_prefix}/user/spark'), owner='spark', group='hadoop', type='directory', action=['create_on_execute'], mode=0775)
params.HdfsResource(format('{hdfs_path_prefix}/user/livy'), owner='livy', group='hadoop', type='directory', action=['create_on_execute'], mode=0775)
params.HdfsResource(format('{hdfs_path_prefix}/hdp/spark-events'), owner='spark', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
params.HdfsResource(format('{hdfs_path_prefix}/hdp/spark2-events'), owner='spark', group='hadoop', type='directory', action=['create_on_execute'], mode=0777)
params.HdfsResource(format('{hdfs_path_prefix}/hbase'), owner='hbase', type='directory', action=['create_on_execute'])
params.HdfsResource(format('{hdfs_path_prefix}/apps/hbase/staging'), owner='hbase', type='directory', action=['create_on_execute'], mode=0711)
params.HdfsResource(format('{hdfs_path_prefix}/user/hbase'), owner='hbase', type='directory', action=['create_on_execute'], mode=0755)
params.HdfsResource(format('{hdfs_path_prefix}/apps/zeppelin'), owner='zeppelin', group='hadoop', type='directory', action=['create_on_execute'])
params.HdfsResource(format('{hdfs_path_prefix}/user/zeppelin'), owner='zeppelin', group='hadoop', type='directory', action=['create_on_execute'])
params.HdfsResource(format('{hdfs_path_prefix}/user/zeppelin/test'), owner='zeppelin', group='hadoop', type='directory', action=['create_on_execute'])
def copy_zeppelin_dependencies_to_hdfs(file_pattern):
spark_deps_full_path = glob.glob(file_pattern)
if spark_deps_full_path and os.path.exists(spark_deps_full_path[0]):
copy_tarballs_to_hdfs(spark_deps_full_path[0], hdfs_path_prefix+'/apps/zeppelin/', 'hadoop-mapreduce-historyserver', params.hdfs_user, 'zeppelin', 'zeppelin')
else:
Logger.info('zeppelin-spark-dependencies not found at %s.' % file_pattern)
def putCreatedHdfsResourcesToIgnore(env):
if not 'hdfs_files' in env.config:
Logger.info("Not creating .hdfs_resource_ignore as no resources to use.")
return
file_content = ""
for file in env.config['hdfs_files']:
if not file['target'].startswith(hdfs_path_prefix):
raise Exception("Something created outside hdfs_path_prefix!")
file_content += file['target'][len(hdfs_path_prefix):]
file_content += "\n"
with open("/var/lib/ambari-agent/data/.hdfs_resource_ignore", "a+") as fp:
fp.write(file_content)
def putSQLDriverToOozieShared():
params.HdfsResource(hdfs_path_prefix + '/user/oozie/share/lib/sqoop/{0}'.format(sql_driver_filename),
owner='hdfs', type='file', action=['create_on_execute'], mode=0644, source=options.sql_driver_path)
def recreate_slider_tarball():
"""
Re-create tarball to include extra jars, which were put into slider lib dir.
"""
Logger.info(format("Re-creating {slider_tarball}"))
with closing(tarfile.open(params.slider_tarball, "w:gz")) as tar:
for filepath in glob.glob(format("{slider_lib_dir}/*.jar")):
tar.add(os.path.realpath(filepath), arcname=os.path.basename(filepath))
env.set_params(params)
hadoop_conf_dir = params.hadoop_conf_dir
Directory('/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir',
owner=params.hdfs_user,
group=params.user_group,
mode=01777
)
Directory('/var/log/hadoop',
create_parents = True,
owner='root',
group=params.user_group,
mode=0775,
cd_access='a',
)
Directory('/var/run/hadoop',
create_parents = True,
owner='root',
group='root',
cd_access='a',
)
Directory('/tmp/hadoop-hdfs',
create_parents = True,
owner=params.hdfs_user,
cd_access='a',
)
Directory('/tmp/hbase-hbase',
owner='hbase',
mode=0775,
create_parents = True,
cd_access="a",
)
oozie_libext_dir = params.oozie_libext_dir
oozie_home=params.oozie_home
configure_cmds = []
configure_cmds.append(('tar','-xvf', oozie_home + '/oozie-sharelib.tar.gz','-C', oozie_home))
configure_cmds.append(('cp', "/usr/share/HDP-oozie/ext-2.2.zip", options.sql_driver_path, oozie_libext_dir))
configure_cmds.append(('chown', 'oozie:hadoop', oozie_libext_dir + "/ext-2.2.zip", oozie_libext_dir + "/" + sql_driver_filename))
no_op_test = "ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1"
File("/etc/oozie/conf/oozie-env.sh",
owner=params.oozie_user,
content=params.oozie_env_sh_template
)
hashcode_file = format("{oozie_home}/.hashcode")
skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")
Execute( configure_cmds,
not_if = format("{no_op_test} || {skip_recreate_sharelib}"),
sudo = True,
)
File(hashcode_file,
mode = 0644,
)
prepare_war(params)
oozie_shared_lib = format("/usr/hdp/{stack_version}/oozie/share")
oozie_user = 'oozie'
oozie_hdfs_user_dir = format("{hdfs_path_prefix}/user/{oozie_user}")
kinit_if_needed = ''
recreate_slider_tarball()
if options.upgrade:
Logger.info("Skipping uploading oozie shared lib during upgrade")
else:
params.HdfsResource(format("{oozie_hdfs_user_dir}/share/"),
action="delete_on_execute",
type = 'directory'
)
spark_client_dir = format("/usr/hdp/{stack_version}/spark")
if os.path.exists(spark_client_dir):
try:
# Rename /usr/hdp/{stack_version}/oozie/share/lib/spark to spark-orig
if not os.path.exists(format("{oozie_shared_lib}/lib/spark-orig")):
Execute(("mv",
format("{oozie_shared_lib}/lib/spark"),
format("{oozie_shared_lib}/lib/spark-orig")),
sudo=True)
# Create /usr/hdp/{stack_version}/oozie/share/lib/spark
if not os.path.exists(format("{oozie_shared_lib}/lib/spark")):
Execute(('mkdir', format('{oozie_shared_lib}/lib/spark')),
sudo=True)
# Copy oozie-sharelib-spark from /usr/hdp/{stack_version}/oozie/share/lib/spark-orig to spark
Execute(format("cp -f {oozie_shared_lib}/lib/spark-orig/oozie-sharelib-spark*.jar {oozie_shared_lib}/lib/spark"))
# Copy /usr/hdp/{stack_version}/spark-client/*.jar except spark-examples*.jar
Execute(format("cp -P {spark_client_dir}/lib/*.jar {oozie_shared_lib}/lib/spark"))
Execute(format("find {oozie_shared_lib}/lib/spark/ -type l -delete"))
try:
Execute(format("rm -f {oozie_shared_lib}/lib/spark/spark-examples*.jar"))
except:
Logger.warning("No spark-examples jar files found in Spark client lib.")
# Copy /usr/hdp/{stack_version}/spark-client/python/lib/*.zip & *.jar to /usr/hdp/{stack_version}/oozie/share/lib/spark
Execute(format("cp -f {spark_client_dir}/python/lib/*.zip {oozie_shared_lib}/lib/spark"))
try:
Execute(format("cp -f {spark_client_dir}/python/lib/*.jar {oozie_shared_lib}/lib/spark"))
except:
Logger.warning("No jar files found in Spark client python lib.")
Execute(("chmod", "-R", "0755", format('{oozie_shared_lib}/lib/spark')),
sudo=True)
# Skipping this step since it might cause issues to automated scripts that rely on hdfs://user/oozie/share/lib
# Rename /usr/hdp/{stack_version}/oozie/share/lib to lib_ts
# millis = int(round(time.time() * 1000))
# Execute(("mv",
# format("{oozie_shared_lib}/lib"),
# format("{oozie_shared_lib}/lib_{millis}")),
# sudo=True)
except Exception, e:
Logger.warning('Exception occurred while preparing oozie share lib: '+ repr(e))
params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
action="create_on_execute",
type = 'directory',
mode=0755,
recursive_chmod = True,
owner=oozie_user,
source = oozie_shared_lib,
)
Logger.info("Copying tarballs...")
# TODO, these shouldn't hardcode the stack root or destination stack name.
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hadoop/mapreduce.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/tez/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hive/hive.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
# Needed by Hive Interactive
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/tez_hive2/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/tez_hive2/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/pig/pig.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hadoop-mapreduce/hadoop-streaming.jar"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/sqoop/sqoop.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/slider/lib/slider.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/slider/", 'hadoop-mapreduce-historyserver', params.hdfs_user, params.hdfs_user, params.user_group)
createHdfsResources()
copy_zeppelin_dependencies_to_hdfs(format("/usr/hdp/{stack_version}/zeppelin/interpreter/spark/dep/zeppelin-spark-dependencies*.jar"))
putSQLDriverToOozieShared()
putCreatedHdfsResourcesToIgnore(env)
# jar shouldn't be used before (read comment below)
File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
mode=0644,
content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
)
# Create everything in one jar call (this is fast).
# (! Before everything should be executed with action="create_on_execute/delete_on_execute" for this time-optimization to work)
try:
params.HdfsResource(None,
logoutput=True,
action="execute"
)
except:
os.remove("/var/lib/ambari-agent/data/.hdfs_resource_ignore")
raise
Logger.info("Completed tarball copy.")
if not options.upgrade:
Logger.info("Executing stack-selector-tool for stack {0} ...".format(stack_version))
Execute(
('/usr/bin/hdp-select', 'set', 'all', stack_version),
sudo = True
)
Logger.info("Ambari preupload script completed.")
|
|
# -*- coding: utf-8 -*-
"""
molvs.charge
~~~~~~~~~~~~
This module implements tools for manipulating charges on molecules. In particular, :class:`~molvs.charge.Reionizer`,
which competitively reionizes acids such that the strongest acids ionize first, and :class:`~molvs.charge.Uncharger`,
which attempts to neutralize ionized acids and bases on a molecule.
:copyright: Copyright 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
import copy
import logging
from rdkit import Chem
from .utils import memoized_property
log = logging.getLogger(__name__)
class AcidBasePair(object):
"""An acid and its conjugate base, defined by SMARTS.
A strength-ordered list of AcidBasePairs can be used to ensure the strongest acids in a molecule ionize first.
"""
def __init__(self, name, acid, base):
"""Initialize an AcidBasePair with the following parameters:
:param string name: A name for this AcidBasePair.
:param string acid: SMARTS pattern for the protonated acid.
:param string base: SMARTS pattern for the conjugate ionized base.
"""
log.debug('Initializing AcidBasePair: %s', name)
self.name = name
self.acid_str = acid
self.base_str = base
@memoized_property
def acid(self):
log.debug('Loading AcidBasePair acid: %s', self.name)
return Chem.MolFromSmarts(self.acid_str)
@memoized_property
def base(self):
log.debug('Loading AcidBasePair base: %s', self.name)
return Chem.MolFromSmarts(self.base_str)
def __repr__(self):
return 'AcidBasePair({!r}, {!r}, {!r})'.format(self.name, self.acid_str, self.base_str)
def __str__(self):
return self.name
#: The default list of AcidBasePairs, sorted from strongest to weakest. This list is derived from the Food and Drug
#: Administration Substance Registration System Standard Operating Procedure guide.
ACID_BASE_PAIRS = (
AcidBasePair('-OSO3H', 'OS(=O)(=O)[OH]', 'OS(=O)(=O)[O-]'),
AcidBasePair('-SO3H', '[!O]S(=O)(=O)[OH]', '[!O]S(=O)(=O)[O-]'),
AcidBasePair('-OSO2H', 'O[SD3](=O)[OH]', 'O[SD3](=O)[O-]'),
AcidBasePair('-SO2H', '[!O][SD3](=O)[OH]', '[!O][SD3](=O)[O-]'),
AcidBasePair('-OPO3H2', 'OP(=O)([OH])[OH]', 'OP(=O)([OH])[O-]'),
AcidBasePair('-PO3H2', '[!O]P(=O)([OH])[OH]', '[!O]P(=O)([OH])[O-]'),
AcidBasePair('-CO2H', 'C(=O)[OH]', 'C(=O)[O-]'),
AcidBasePair('thiophenol', 'c[SH]', 'c[S-]'),
AcidBasePair('(-OPO3H)-', 'OP(=O)([O-])[OH]', 'OP(=O)([O-])[O-]'),
AcidBasePair('(-PO3H)-', '[!O]P(=O)([O-])[OH]', '[!O]P(=O)([O-])[O-]'),
AcidBasePair('phthalimide', 'O=C2c1ccccc1C(=O)[NH]2', 'O=C2c1ccccc1C(=O)[N-]2'),
AcidBasePair('CO3H (peracetyl)', 'C(=O)O[OH]', 'C(=O)O[O-]'),
AcidBasePair('alpha-carbon-hydrogen-nitro group', 'O=N(O)[CH]', 'O=N(O)[C-]'),
AcidBasePair('-SO2NH2', 'S(=O)(=O)[NH2]', 'S(=O)(=O)[NH-]'),
AcidBasePair('-OBO2H2', 'OB([OH])[OH]', 'OB([OH])[O-]'),
AcidBasePair('-BO2H2', '[!O]B([OH])[OH]', '[!O]B([OH])[O-]'),
AcidBasePair('phenol', 'c[OH]', 'c[O-]'),
AcidBasePair('SH (aliphatic)', 'C[SH]', 'C[S-]'),
AcidBasePair('(-OBO2H)-', 'OB([O-])[OH]', 'OB([O-])[O-]'),
AcidBasePair('(-BO2H)-', '[!O]B([O-])[OH]', '[!O]B([O-])[O-]'),
AcidBasePair('cyclopentadiene', 'C1=CC=C[CH2]1', 'c1ccc[cH-]1'),
AcidBasePair('-CONH2', 'C(=O)[NH2]', 'C(=O)[NH-]'),
AcidBasePair('imidazole', 'c1cnc[nH]1', 'c1cnc[n-]1'),
AcidBasePair('-OH (aliphatic alcohol)', '[CX4][OH]', '[CX4][O-]'),
AcidBasePair('alpha-carbon-hydrogen-keto group', 'O=C([!O])[C!H0+0]', 'O=C([!O])[C-]'),
AcidBasePair('alpha-carbon-hydrogen-acetyl ester group', 'OC(=O)[C!H0+0]', 'OC(=O)[C-]'),
AcidBasePair('sp carbon hydrogen', 'C#[CH]', 'C#[C-]'),
AcidBasePair('alpha-carbon-hydrogen-sulfone group', 'CS(=O)(=O)[C!H0+0]', 'CS(=O)(=O)[C-]'),
AcidBasePair('alpha-carbon-hydrogen-sulfoxide group', 'C[SD3](=O)[C!H0+0]', 'C[SD3](=O)[C-]'),
AcidBasePair('-NH2', '[CX4][NH2]', '[CX4][NH-]'),
AcidBasePair('benzyl hydrogen', 'c[CX4H2]', 'c[CX3H-]'),
AcidBasePair('sp2-carbon hydrogen', '[CX3]=[CX3!H0+0]', '[CX3]=[CX2-]'),
AcidBasePair('sp3-carbon hydrogen', '[CX4!H0+0]', '[CX3-]'),
)
class ChargeCorrection(object):
"""An atom that should have a certain charge applied, defined by a SMARTS pattern."""
def __init__(self, name, smarts, charge):
"""Initialize a ChargeCorrection with the following parameters:
:param string name: A name for this ForcedAtomCharge.
:param string smarts: SMARTS pattern to match. Charge is applied to the first atom.
:param int charge: The charge to apply.
"""
log.debug('Initializing ChargeCorrection: %s', name)
self.name = name
self.smarts_str = smarts
self.charge = charge
@memoized_property
def smarts(self):
log.debug('Loading ChargeCorrection smarts: %s', self.name)
return Chem.MolFromSmarts(self.smarts_str)
def __repr__(self):
return 'ChargeCorrection({!r}, {!r}, {!r})'.format(self.name, self.smarts_str, self.charge)
def __str__(self):
return self.name
#: The default list of ChargeCorrections.
CHARGE_CORRECTIONS = (
ChargeCorrection('[Li,Na,K]', '[Li,Na,K;X0+0]', 1),
ChargeCorrection('[Mg,Ca]', '[Mg,Ca;X0+0]', 2),
ChargeCorrection('[Cl]', '[Cl;X0+0]', -1),
# TODO: Extend to other incorrectly charged atoms
)
class Reionizer(object):
"""A class to fix charges and reionize a molecule such that the strongest acids ionize first."""
def __init__(self, acid_base_pairs=ACID_BASE_PAIRS, charge_corrections=CHARGE_CORRECTIONS):
"""Initialize a Reionizer with the following parameter:
:param acid_base_pairs: A list of :class:`AcidBasePairs <molvs.charge.AcidBasePair>` to reionize, sorted from
strongest to weakest.
:param charge_corrections: A list of :class:`ChargeCorrections <molvs.charge.ChargeCorrection>`.
"""
log.debug('Initializing Reionizer')
self.acid_base_pairs = acid_base_pairs
self.charge_corrections = charge_corrections
def __call__(self, mol):
"""Calling a Reionizer instance like a function is the same as calling its reionize(mol) method."""
return self.reionize(mol)
def reionize(self, mol):
"""Enforce charges on certain atoms, then perform competitive reionization.
First, charge corrections are applied to ensure, for example, that free metals are correctly ionized. Then, if
a molecule with multiple acid groups is partially ionized, ensure the strongest acids ionize first.
The algorithm works as follows:
- Use SMARTS to find the strongest protonated acid and the weakest ionized acid.
- If the ionized acid is weaker than the protonated acid, swap proton and repeat.
:param mol: The molecule to reionize.
:type mol: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
:return: The reionized molecule.
:rtype: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
"""
log.debug('Running Reionizer')
start_charge = Chem.GetFormalCharge(mol)
# Apply forced charge corrections
for cc in self.charge_corrections:
for match in mol.GetSubstructMatches(cc.smarts):
atom = mol.GetAtomWithIdx(match[0])
log.info('Applying charge correction %s (%s %+d)',
cc.name, atom.GetSymbol(), cc.charge)
atom.SetFormalCharge(cc.charge)
current_charge = Chem.GetFormalCharge(mol)
charge_diff = Chem.GetFormalCharge(mol) - start_charge
# If molecule is now neutral, assume everything is now fixed
# But otherwise, if charge has become more positive, look for additional protonated acid groups to ionize
if not current_charge == 0:
while charge_diff > 0:
ppos, poccur = self._strongest_protonated(mol)
if ppos is None:
break
log.info('Ionizing %s to balance previous charge corrections',
self.acid_base_pairs[ppos].name)
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
if patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# else:
patom.UpdatePropertyCache()
charge_diff -= 1
already_moved = set()
while True:
ppos, poccur = self._strongest_protonated(mol)
ipos, ioccur = self._weakest_ionized(mol)
if ioccur and poccur and ppos < ipos:
if poccur[-1] == ioccur[-1]:
# Bad! H wouldn't be moved, resulting in infinite loop.
log.warning('Aborted reionization due to unexpected situation')
break
key = tuple(sorted([poccur[-1], ioccur[-1]]))
if key in already_moved:
log.warning(
'Aborting reionization to avoid infinite loop due to it being ambiguous where to put a Hydrogen')
break
already_moved.add(key)
log.info('Moved proton from %s to %s',
self.acid_base_pairs[ppos].name, self.acid_base_pairs[ipos].name)
# Remove hydrogen from strongest protonated
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
# If no implicit Hs to autoremove, and at least 1 explicit H to remove, reduce explicit count by 1
if patom.GetNumImplicitHs() == 0 and patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# TODO: Remove any chiral label on patom?
patom.UpdatePropertyCache()
# Add hydrogen to weakest ionized
iatom = mol.GetAtomWithIdx(ioccur[-1])
iatom.SetFormalCharge(iatom.GetFormalCharge() + 1)
# Increase explicit H count if no implicit, or aromatic N or P, or non default valence state
if (iatom.GetNoImplicit() or
((patom.GetAtomicNum() == 7 or patom.GetAtomicNum() == 15) and patom.GetIsAromatic()) or
iatom.GetTotalValence() not in list(Chem.GetPeriodicTable().GetValenceList(iatom.GetAtomicNum()))):
iatom.SetNumExplicitHs(iatom.GetNumExplicitHs() + 1)
iatom.UpdatePropertyCache()
else:
break
# TODO: Canonical ionization position if multiple equivalent positions?
Chem.SanitizeMol(mol)
return mol
def _strongest_protonated(self, mol):
for position, pair in enumerate(self.acid_base_pairs):
for occurrence in mol.GetSubstructMatches(pair.acid):
return position, occurrence
return None, None
def _weakest_ionized(self, mol):
for position, pair in enumerate(reversed(self.acid_base_pairs)):
for occurrence in mol.GetSubstructMatches(pair.base):
return len(self.acid_base_pairs) - position - 1, occurrence
return None, None
class Uncharger(object):
"""Class for neutralizing ionized acids and bases.
This class uncharges molecules by adding and/or removing hydrogens. For zwitterions, hydrogens are moved to
eliminate charges where possible. However, in cases where there is a positive charge that is not neutralizable, an
attempt is made to also preserve the corresponding negative charge.
The method is derived from the neutralise module in `Francis Atkinson's standardiser tool
<https://github.com/flatkinson/standardiser>`_, which is released under the Apache License v2.0.
"""
def __init__(self):
log.debug('Initializing Uncharger')
#: Neutralizable positive charge (with hydrogens attached)
self._pos_h = Chem.MolFromSmarts('[+!H0!$(*~[-])]')
#: Non-neutralizable positive charge (no hydrogens attached)
self._pos_quat = Chem.MolFromSmarts('[+H0!$(*~[-])]')
#: Negative charge, not bonded to a positive charge with no hydrogens
self._neg = Chem.MolFromSmarts('[-!$(*~[+H0])]')
#: Negative oxygen bonded to [C,P,S]=O, negative aromatic nitrogen?
self._neg_acid = Chem.MolFromSmarts('[$([O-][C,P,S]=O),$([n-]1nnnc1),$(n1[n-]nnc1)]')
def __call__(self, mol):
"""Calling an Uncharger instance like a function is the same as calling its uncharge(mol) method."""
return self.uncharge(mol)
def uncharge(self, mol):
"""Neutralize molecule by adding/removing hydrogens. Attempts to preserve zwitterions.
:param mol: The molecule to uncharge.
:type mol: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
:return: The uncharged molecule.
:rtype: :rdkit:`Mol <Chem.rdchem.Mol-class.html>`
"""
log.debug('Running Uncharger')
mol = copy.deepcopy(mol)
# Get atom ids for matches
p = [x[0] for x in mol.GetSubstructMatches(self._pos_h)]
q = [x[0] for x in mol.GetSubstructMatches(self._pos_quat)]
n = [x[0] for x in mol.GetSubstructMatches(self._neg)]
a = [x[0] for x in mol.GetSubstructMatches(self._neg_acid)]
# Neutralize negative charges
if q:
# Surplus negative charges more than non-neutralizable positive charges
neg_surplus = len(n) - len(q)
if a and neg_surplus > 0:
# zwitterion with more negative charges than quaternary positive centres
while neg_surplus > 0 and a:
# Add hydrogen to first negative acid atom, increase formal charge
# Until quaternary positive == negative total or no more negative acid
atom = mol.GetAtomWithIdx(a.pop(0))
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetFormalCharge(atom.GetFormalCharge() + 1)
neg_surplus -= 1
log.info('Removed negative charge')
else:
#
for atom in [mol.GetAtomWithIdx(x) for x in n]:
while atom.GetFormalCharge() < 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetFormalCharge(atom.GetFormalCharge() + 1)
log.info('Removed negative charge')
# Neutralize positive charges
for atom in [mol.GetAtomWithIdx(x) for x in p]:
# Remove hydrogen and reduce formal change until neutral or no more hydrogens
while atom.GetFormalCharge() > 0 and atom.GetNumExplicitHs() > 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() - 1)
atom.SetFormalCharge(atom.GetFormalCharge() - 1)
log.info('Removed positive charge')
return mol
|
|
###
# Copyright (c) 2003-2005, Jeremiah Fincher
# Copyright (c) 2008-2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import sys
import types
import codecs
import base64
import binascii
import supybot.utils as utils
from supybot.commands import *
import supybot.utils.minisix as minisix
import supybot.plugins as plugins
import supybot.commands as commands
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('String')
import multiprocessing
class String(callbacks.Plugin):
"""Provides useful commands for manipulating characters and strings."""
@internationalizeDocstring
def ord(self, irc, msg, args, letter):
"""<letter>
Returns the 8-bit value of <letter>.
"""
irc.reply(str(ord(letter)))
ord = wrap(ord, ['letter'])
@internationalizeDocstring
def chr(self, irc, msg, args, i):
"""<number>
Returns the character associated with the 8-bit value <number>
"""
try:
irc.reply(chr(i))
except ValueError:
irc.error(_('That number doesn\'t map to an 8-bit character.'))
chr = wrap(chr, ['int'])
@internationalizeDocstring
def encode(self, irc, msg, args, encoding, text):
"""<encoding> <text>
Returns an encoded form of the given text; the valid encodings are
available in the documentation of the Python codecs module:
<http://docs.python.org/library/codecs.html#standard-encodings>.
"""
# Binary codecs are prefixed with _codec in Python 3
if encoding in 'base64 bz2 hex quopri uu zlib':
encoding += '_codec'
if encoding.endswith('_codec'):
text = text.encode()
# Do the encoding
try:
encoder = codecs.getencoder(encoding)
except LookupError:
irc.errorInvalid(_('encoding'), encoding)
text = encoder(text)[0]
# If this is a binary codec, re-encode it with base64
if encoding.endswith('_codec') and encoding != 'base64_codec':
text = codecs.getencoder('base64_codec')(text)[0].decode()
# Change result into a string
if minisix.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
elif minisix.PY3 and isinstance(text, bytes):
text = text.decode()
if encoding in ('base64', 'base64_codec'):
text = text.replace('\n', '')
# Reply
irc.reply(text.rstrip('\n'))
encode = wrap(encode, ['something', 'text'])
@internationalizeDocstring
def decode(self, irc, msg, args, encoding, text):
"""<encoding> <text>
Returns an un-encoded form of the given text; the valid encodings are
available in the documentation of the Python codecs module:
<http://docs.python.org/library/codecs.html#standard-encodings>.
"""
# Binary codecs are prefixed with _codec in Python 3
if encoding in 'base64 bz2 hex quopri uu zlib':
encoding += '_codec'
# If this is a binary codec, pre-decode it with base64
if encoding.endswith('_codec') and encoding != 'base64_codec':
text = codecs.getdecoder('base64_codec')(text.encode())[0]
# Do the decoding
try:
decoder = codecs.getdecoder(encoding)
except LookupError:
irc.errorInvalid(_('encoding'), encoding)
if minisix.PY3 and not isinstance(text, bytes):
text = text.encode()
try:
text = decoder(text)[0]
except binascii.Error:
irc.errorInvalid(_('base64 string'),
s=_('Base64 strings must be a multiple of 4 in '
'length, padded with \'=\' if necessary.'))
return
# Change result into a string
if minisix.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
elif minisix.PY3 and isinstance(text, bytes):
try:
text = text.decode()
except UnicodeDecodeError:
pass
# Reply
irc.reply(text)
decode = wrap(decode, ['something', 'text'])
@internationalizeDocstring
def levenshtein(self, irc, msg, args, s1, s2):
"""<string1> <string2>
Returns the levenshtein distance (also known as the "edit distance"
between <string1> and <string2>)
"""
max = self.registryValue('levenshtein.max')
if len(s1) > max or len(s2) > max:
irc.error(_('Levenshtein distance is a complicated algorithm, try '
'it with some smaller inputs.'))
else:
irc.reply(str(utils.str.distance(s1, s2)))
levenshtein = thread(wrap(levenshtein, ['something', 'text']))
@internationalizeDocstring
def soundex(self, irc, msg, args, text, length):
"""<string> [<length>]
Returns the Soundex hash to a given length. The length defaults to
4, since that's the standard length for a soundex hash. For unlimited
length, use 0. Maximum length 1024.
"""
if length > 1024:
irc.error("Maximum allowed length is 1024.")
return
irc.reply(utils.str.soundex(text, length))
soundex = wrap(soundex, ['somethingWithoutSpaces', additional('int', 4)])
@internationalizeDocstring
def len(self, irc, msg, args, text):
"""<text>
Returns the length of <text>.
"""
irc.reply(str(len(text)))
len = wrap(len, ['text'])
@internationalizeDocstring
def re(self, irc, msg, args, f, text):
"""<regexp> <text>
If <regexp> is of the form m/regexp/flags, returns the portion of
<text> that matches the regexp. If <regexp> is of the form
s/regexp/replacement/flags, returns the result of applying such a
regexp to <text>.
"""
if f('') and len(f(' ')) > len(f(''))+1: # Matches the empty string.
s = _('You probably don\'t want to match the empty string.')
irc.error(s)
else:
t = self.registryValue('re.timeout')
try:
v = process(f, text, timeout=t, pn=self.name(), cn='re')
if isinstance(v, list):
v = format('%L', v)
irc.reply(v)
except commands.ProcessTimeoutError as e:
irc.error("ProcessTimeoutError: %s" % (e,))
except re.error as e:
irc.error(e.args[0])
re = thread(wrap(re, [first('regexpMatcherMany', 'regexpReplacer'),
'text']))
def xor(self, irc, msg, args, password, text):
"""<password> <text>
Returns <text> XOR-encrypted with <password>. See
http://www.yoe.org/developer/xor.html for information about XOR
encryption.
"""
chars = utils.iter.cycle(password)
ret = [chr(ord(c) ^ ord(next(chars))) for c in text]
irc.reply(''.join(ret))
xor = wrap(xor, ['something', 'text'])
@internationalizeDocstring
def md5(self, irc, msg, args, text):
"""<text>
Returns the md5 hash of a given string. Read
http://www.rsasecurity.com/rsalabs/faq/3-6-6.html for more information
about md5.
"""
irc.reply(utils.crypt.md5(text.encode('utf8')).hexdigest())
md5 = wrap(md5, ['text'])
@internationalizeDocstring
def sha(self, irc, msg, args, text):
"""<text>
Returns the SHA hash of a given string. Read
http://www.secure-hash-algorithm-md5-sha-1.co.uk/ for more information
about SHA.
"""
irc.reply(utils.crypt.sha(text.encode('utf8')).hexdigest())
sha = wrap(sha, ['text'])
Class = String
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace-by-fee
#
import os
import sys
# Add python-bitcoinlib to module search path, prior to any system-wide
# python-bitcoinlib.
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinlib"))
import unittest
import bitcoin
bitcoin.SelectParams('regtest')
import bitcoin.rpc
from bitcoin.core import *
from bitcoin.core.script import *
from bitcoin.wallet import *
MAX_REPLACEMENT_LIMIT = 100
class Test_ReplaceByFee(unittest.TestCase):
proxy = None
@classmethod
def setUpClass(cls):
if cls.proxy is None:
cls.proxy = bitcoin.rpc.Proxy()
@classmethod
def mine_mempool(cls):
"""Mine until mempool is empty"""
mempool_size = 1
while mempool_size:
cls.proxy.call('generate', 1)
new_mempool_size = len(cls.proxy.getrawmempool())
# It's possible to get stuck in a loop here if the mempool has
# transactions that can't be mined.
assert(new_mempool_size != mempool_size)
mempool_size = new_mempool_size
@classmethod
def tearDownClass(cls):
# Make sure mining works
cls.mine_mempool()
def make_txout(self, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while self.proxy.getbalance() < amount + fee:
self.proxy.call('generate', 100)
addr = P2SHBitcoinAddress.from_redeemScript(CScript([]))
txid = self.proxy.sendtoaddress(addr, amount + fee)
tx1 = self.proxy.getrawtransaction(txid)
i = None
for i, txout in enumerate(tx1.vout):
if txout.scriptPubKey == addr.to_scriptPubKey():
break
assert i is not None
tx2 = CTransaction([CTxIn(COutPoint(txid, i), CScript([1, CScript([])]), nSequence=0)],
[CTxOut(amount, scriptPubKey)])
tx2_txid = self.proxy.sendrawtransaction(tx2, True)
# If requested, ensure txouts are confirmed.
if confirmed:
self.mine_mempool()
return COutPoint(tx2_txid, 0)
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = self.make_txout(1.1*COIN)
tx1a = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(1*COIN, CScript([b'a']))])
tx1a_txid = self.proxy.sendrawtransaction(tx1a, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(1*COIN, CScript([b'b']))])
try:
tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26) # insufficient fee
else:
self.fail()
# Extra 0.1 BTC fee
tx1b = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(0.9*COIN, CScript([b'b']))])
tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
# tx1a is in fact replaced
with self.assertRaises(IndexError):
self.proxy.getrawtransaction(tx1a_txid)
self.assertEqual(tx1b, self.proxy.getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = self.make_txout(initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction([CTxIn(prevout, nSequence=0)],
[CTxOut(remaining_value, CScript([1]))])
txid = self.proxy.sendrawtransaction(tx, True)
chain_txids.append(txid)
prevout = COutPoint(txid, 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(initial_nValue - 30*COIN, CScript([1]))])
try:
self.proxy.sendrawtransaction(dbl_tx, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26) # insufficient fee
else:
self.fail()
# Accepted with sufficient fee
dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(1*COIN, CScript([1]))])
self.proxy.sendrawtransaction(dbl_tx, True)
for doublespent_txid in chain_txids:
with self.assertRaises(IndexError):
self.proxy.getrawtransaction(doublespent_txid)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = self.make_txout(initial_nValue)
def branch(prevout, initial_value, max_txs, *, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction([CTxIn(prevout, nSequence=0)],
vout)
self.assertTrue(len(tx.serialize()) < 100000)
txid = self.proxy.sendrawtransaction(tx, True)
yield tx
_total_txs[0] += 1
for i, txout in enumerate(tx.vout):
yield from branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs)
fee = 0.0001*COIN
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
self.assertEqual(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(initial_nValue - fee*n, CScript([1]))])
try:
self.proxy.sendrawtransaction(dbl_tx, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26) # insufficient fee
else:
self.fail()
# 1 BTC fee is enough
dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))])
self.proxy.sendrawtransaction(dbl_tx, True)
for tx in tree_txs:
with self.assertRaises(IndexError):
self.proxy.getrawtransaction(tx.GetHash())
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT, MAX_REPLACEMENT_LIMIT*2):
fee = 0.0001*COIN
tx0_outpoint = self.make_txout(initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
self.assertEqual(len(tree_txs), n)
dbl_tx = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(initial_nValue - fee*n, CScript([1]))])
try:
self.proxy.sendrawtransaction(dbl_tx, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26)
else:
self.fail()
for tx in tree_txs:
self.proxy.getrawtransaction(tx.GetHash())
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = self.make_txout(1.1*COIN)
tx1a = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(1*COIN, CScript([b'a']))])
tx1a_txid = self.proxy.sendrawtransaction(tx1a, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction([CTxIn(tx0_outpoint, nSequence=0)],
[CTxOut(0.001*COIN,
CScript([b'a'*999000]))])
try:
tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26) # insufficient fee
else:
self.fail()
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = self.make_txout(1.2*COIN)
utxo2 = self.make_txout(3.0*COIN)
tx1a = CTransaction([CTxIn(utxo1, nSequence=0)],
[CTxOut(1.1*COIN, CScript([b'a']))])
tx1a_txid = self.proxy.sendrawtransaction(tx1a, True)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction([CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)],
tx1a.vout)
try:
tx2_txid = self.proxy.sendrawtransaction(tx2, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26)
else:
self.fail()
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction([CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)],
[CTxOut(1.0*COIN, CScript([b'a']))])
tx1b_txid = self.proxy.sendrawtransaction(tx1b, True)
tx2 = CTransaction([CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))],
tx1a.vout)
try:
tx2_txid = self.proxy.sendrawtransaction(tx2, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26)
else:
self.fail()
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = self.make_txout(1.1*COIN)
unconfirmed_utxo = self.make_txout(0.1*COIN, False)
tx1 = CTransaction([CTxIn(confirmed_utxo)],
[CTxOut(1.0*COIN, CScript([b'a']))])
tx1_txid = self.proxy.sendrawtransaction(tx1, True)
tx2 = CTransaction([CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)],
tx1.vout)
try:
tx2_txid = self.proxy.sendrawtransaction(tx2, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26)
else:
self.fail()
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = self.make_txout(initial_nValue)
fee = 0.0001*COIN
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction([CTxIn(utxo, nSequence=0)], outputs)
txid = self.proxy.sendrawtransaction(splitting_tx, True)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction([CTxIn(COutPoint(txid, i), nSequence=0)],
[CTxOut(split_value-fee, CScript([b'a']))])
self.proxy.sendrawtransaction(tx_i, True)
# Now create doublespend of the whole lot, should fail
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction(inputs, [CTxOut(double_spend_value, CScript([b'a']))])
try:
self.proxy.sendrawtransaction(double_tx, True)
except bitcoin.rpc.JSONRPCException as exp:
self.assertEqual(exp.error['code'], -26)
self.assertEqual("too many potential replacements" in exp.error['message'], True)
else:
self.fail()
# If we remove an input, it should pass
double_tx = CTransaction(inputs[0:-1],
[CTxOut(double_spend_value, CScript([b'a']))])
self.proxy.sendrawtransaction(double_tx, True)
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of non-prod utility functions for placement.
All the dependencies in this files should be non-prod.
"""
import datetime
import re
import textwrap
from typing import Dict, Iterator, List, Optional, Text, Tuple
from absl import logging
from circuit_training.environment import plc_client
import numpy as np
# Internal gfile dependencies
def nodes_of_types(plc: plc_client.PlacementCost,
type_list: List[Text]) -> Iterator[int]:
"""Yields the index of a node of certain types."""
i = 0
while True:
node_type = plc.get_node_type(i)
if not node_type:
break
if node_type in type_list:
yield i
i += 1
def extract_attribute_from_comments(attribute: Text,
filenames: List[Text]) -> Optional[Text]:
"""Parses the files' comments section, tries to extract the attribute.
Args:
attribute: attribute to look for (case sensetive).
filenames: List of protobuf file or a plc file.
Returns:
Attribute name string, or None if not found.
"""
for filename in filenames:
if filename:
f = filename.split(',')[0]
if f:
with open(f, 'rt') as infile:
for line in infile:
if line.startswith('#'):
match = re.search(fr'{attribute} : ([-\w]+)', line)
if match:
return match.group(1)
else:
# Do not parse the rest of the file, since all the comments are at
# the top.
break
return None
def get_blockages_from_comments(
filenames: List[Text]) -> Optional[List[List[float]]]:
"""Returns list of blockages if they exist in the file's comments section."""
for filename in filenames:
if not filename:
continue
blockages = []
# Read the first file if filename is comma separated list.
# Expected blockage info line format is:
# "# Blockage : <float> <float> <float> <float> <float>"
# where first four float numbers correspond to minx, miny, maxx, maxy of
# the rectangular region, the fifth one is the blockage rate. It's usually
# set to 1.
try:
with open(filename, 'rt') as infile:
for line in infile:
if line.startswith('# Blockage : '):
blockages.append([float(x) for x in line.split()[3:8]])
elif not line.startswith('#'):
break
except OSError:
logging.error('could not read file %s.', filename)
if blockages:
return blockages
def extract_sizes_from_comments(
filenames: List[Text]) -> Optional[Tuple[float, float, int, int]]:
"""Parses the file's comments section, tries to extract canvas/grid sizes.
Args:
filenames: A list of netlist (.pb.txt) or placement (.plc) files.
Returns:
Tuple of canvas_width, canvas_height, grid_cols, grid_rows
"""
for filename in filenames:
if not filename:
continue
canvas_width, canvas_height = None, None
grid_cols, grid_rows = None, None
with open(filename, 'rt') as infile:
for line in infile:
if line.startswith('#'):
fp_re = re.search(
r'FP bbox: \{([\d\.]+) ([\d\.]+)\} \{([\d\.]+) ([\d\.]+)\}', line)
if fp_re:
canvas_width = float(fp_re.group(3))
canvas_height = float(fp_re.group(4))
continue
plc_wh = re.search(r'Width : ([\d\.]+) Height : ([\d\.]+)', line)
if plc_wh:
canvas_width = float(plc_wh.group(1))
canvas_height = float(plc_wh.group(2))
continue
plc_cr = re.search(r'Columns : ([\d]+) Rows : ([\d]+)', line)
if plc_cr:
grid_cols = int(plc_cr.group(1))
grid_rows = int(plc_cr.group(2))
else:
# Do not parse the rest of the file, since all the comments are at the
# top.
break
if canvas_width and canvas_height and grid_cols and grid_rows:
return canvas_width, canvas_height, grid_cols, grid_rows
def fix_port_coordinates(plc: plc_client.PlacementCost):
"""Find all ports and fix their coordinates.
Args:
plc: the placement cost object.
"""
for node in nodes_of_types(plc, ['PORT']):
plc.fix_node_coord(node)
# The routing capacities are calculated based on the public information about
# 7nm technology (https://en.wikichip.org/wiki/7_nm_lithography_process)
# with an arbitary, yet reasonable, assumption of 18% of the tracks for
# the power grids.
def create_placement_cost(
netlist_file: Text,
init_placement: Optional[Text] = None,
overlap_threshold: float = 4e-3,
congestion_smooth_range: int = 2,
# TODO(b/211039937): Increase macro spacing to 3-5um, after matching the
# performance for Ariane.
macro_macro_x_spacing: float = 0.1,
macro_macro_y_spacing: float = 0.1,
boundary_check: bool = False,
horizontal_routes_per_micron: float = 70.33,
vertical_routes_per_micron: float = 74.51,
macro_horizontal_routing_allocation: float = 51.79,
macro_vertical_routing_allocation: float = 51.79,
) -> plc_client.PlacementCost:
"""Creates a placement_cost object.
Args:
netlist_file: Path to the netlist proto text file.
init_placement: Path to the inital placement .plc file.
overlap_threshold: Used for macro overlap detection.
congestion_smooth_range: Smoothing factor used for congestion estimation.
Congestion is distributed to this many neighboring columns/rows.'
macro_macro_x_spacing: Macro-to-macro x spacing in microns.
macro_macro_y_spacing: Macro-to-macro y spacing in microns.
boundary_check: Do a boundary check during node placement.
horizontal_routes_per_micron: Horizontal route capacity per micros.
vertical_routes_per_micron: Vertical route capacity per micros.
macro_horizontal_routing_allocation: Macro horizontal routing allocation.
macro_vertical_routing_allocation: Macro vertical routing allocation.
Returns:
A PlacementCost object.
"""
if not netlist_file:
raise ValueError('netlist_file should be provided.')
block_name = extract_attribute_from_comments('Block',
[init_placement, netlist_file])
if not block_name:
logging.warning(
'block_name is not set. '
'Please add the block_name in:\n%s\nor in:\n%s', netlist_file,
init_placement)
plc = plc_client.PlacementCost(
netlist_file,
macro_macro_x_spacing,
macro_macro_y_spacing)
blockages = get_blockages_from_comments([netlist_file, init_placement])
if blockages:
for blockage in blockages:
plc.create_blockage(*blockage)
sizes = extract_sizes_from_comments([netlist_file, init_placement])
if sizes:
canvas_width, canvas_height, grid_cols, grid_rows = sizes
if canvas_width and canvas_height and grid_cols and grid_rows:
plc.set_canvas_size(canvas_width, canvas_height)
plc.set_placement_grid(grid_cols, grid_rows)
plc.set_project_name('circuit_training')
plc.set_block_name(block_name or 'unset_block')
plc.set_routes_per_micron(horizontal_routes_per_micron,
vertical_routes_per_micron)
plc.set_macro_routing_allocation(macro_horizontal_routing_allocation,
macro_vertical_routing_allocation)
plc.set_congestion_smooth_range(congestion_smooth_range)
plc.set_overlap_threshold(overlap_threshold)
plc.set_canvas_boundary_check(boundary_check)
plc.make_soft_macros_square()
if init_placement:
plc.restore_placement(init_placement)
fix_port_coordinates(plc)
return plc
def get_node_type_counts(plc: plc_client.PlacementCost) -> Dict[Text, int]:
"""Returns number of each type of nodes in the netlist.
Args:
plc: the placement cost object.
Returns:
Number of each type of node in a dict.
"""
counts = {
'MACRO': 0,
'STDCELL': 0,
'PORT': 0,
'MACRO_PIN': 0,
'SOFT_MACRO': 0,
'HARD_MACRO': 0,
'SOFT_MACRO_PIN': 0,
'HARD_MACRO_PIN': 0
}
for node_index in nodes_of_types(plc,
['MACRO', 'STDCELL', 'PORT', 'MACRO_PIN']):
node_type = plc.get_node_type(node_index)
counts[node_type] += 1
if node_type == 'MACRO':
if plc.is_node_soft_macro(node_index):
counts['SOFT_MACRO'] += 1
else:
counts['HARD_MACRO'] += 1
if node_type == 'MACRO_PIN':
ref_id = plc.get_ref_node_id(node_index)
if plc.is_node_soft_macro(ref_id):
counts['SOFT_MACRO_PIN'] += 1
else:
counts['HARD_MACRO_PIN'] += 1
return counts
def make_blockage_text(plc: plc_client.PlacementCost) -> Text:
ret = ''
for blockage in plc.get_blockages():
ret += 'Blockage : {}\n'.format(' '.join([str(b) for b in blockage]))
return ret
def save_placement(plc: plc_client.PlacementCost,
filename: Text,
user_comments: Text = '') -> None:
"""Saves the placement file with some information in the comments section."""
cols, rows = plc.get_grid_num_columns_rows()
width, height = plc.get_canvas_width_height()
hor_routes, ver_routes = plc.get_routes_per_micron()
hor_macro_alloc, ver_macro_alloc = plc.get_macro_routing_allocation()
smooth = plc.get_congestion_smooth_range()
info = textwrap.dedent("""\
Placement file for Circuit Training
Source input file(s) : {src_filename}
This file : {filename}
Date : {date}
Columns : {cols} Rows : {rows}
Width : {width:.3f} Height : {height:.3f}
Area : {area}
Wirelength : {wl:.3f}
Wirelength cost : {wlc:.4f}
Congestion cost : {cong:.4f}
Density cost : {density:.4f}
Project : {project}
Block : {block_name}
Routes per micron, hor : {hor_routes:.3f} ver : {ver_routes:.3f}
Routes used by macros, hor : {hor_macro_alloc:.3f} ver : {ver_macro_alloc:.3f}
Smoothing factor : {smooth}
Overlap threshold : {overlap_threshold}
""".format(
src_filename=plc.get_source_filename(),
filename=filename,
date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
cols=cols,
rows=rows,
width=width,
height=height,
area=plc.get_area(),
wl=plc.get_wirelength(),
wlc=plc.get_cost(),
cong=plc.get_congestion_cost(),
density=plc.get_density_cost(),
project=plc.get_project_name(),
block_name=plc.get_block_name(),
hor_routes=hor_routes,
ver_routes=ver_routes,
hor_macro_alloc=hor_macro_alloc,
ver_macro_alloc=ver_macro_alloc,
smooth=smooth,
overlap_threshold=plc.get_overlap_threshold()))
info += '\n' + make_blockage_text(plc) + '\n'
info += '\nCounts of node types:\n'
node_type_counts = get_node_type_counts(plc)
for node_type in sorted(node_type_counts):
info += '{:<15} : {:>9}\n'.format(node_type + 's',
node_type_counts[node_type])
if user_comments:
info += '\nUser comments:\n' + user_comments + '\n'
info += '\nnode_index x y orientation fixed'
return plc.save_placement(filename, info)
def fd_placement_schedule(plc: plc_client.PlacementCost,
num_steps: Tuple[int, ...] = (100, 100, 100),
io_factor: float = 1.0,
move_distance_factors: Tuple[float,
...] = (1.0, 1.0, 1.0),
attract_factor: Tuple[float,
...] = (100.0, 1.0e-3, 1.0e-5),
repel_factor: Tuple[float, ...] = (0.0, 1.0e6, 1.0e7),
use_current_loc: bool = False,
move_macros: bool = False) -> None:
"""A placement schedule that uses force directed method.
Args:
plc: The plc object.
num_steps: Number of steps of the force-directed algorithm during each call.
io_factor: I/O attract factor.
move_distance_factors: Maximum distance relative to canvas size that a node
can move in a single step of the force-directed algorithm.
attract_factor: The spring constants between two connected nodes in the
force-directed algorithm. The FD algorithm will be called size of this
list times. Make sure that the size of fd_repel_factor has the same size.
repel_factor: The repellent factor for spreading the nodes to avoid
congestion in the force-directed algorithm.'
use_current_loc: If true, use the current location as the initial location.
move_macros: If true, also move the macros.
"""
assert len(num_steps) == len(move_distance_factors)
assert len(num_steps) == len(repel_factor)
assert len(num_steps) == len(attract_factor)
canvas_size = max(plc.get_canvas_width_height())
max_move_distance = [
f * canvas_size / s for s, f in zip(num_steps, move_distance_factors)
]
move_stdcells = True
log_scale_conns = False
use_sizes = False
plc.optimize_stdcells(use_current_loc, move_stdcells, move_macros,
log_scale_conns, use_sizes, io_factor, num_steps,
max_move_distance, attract_factor, repel_factor)
def get_ordered_node_indices(mode, plc, exclude_fixed_nodes=True):
"""Returns an ordering of node indices according to the specified mode.
Args:
mode: node ordering mode
plc: placement cost object
exclude_fixed_nodes: Whether fixed nodes should be excluded.
Returns:
Node indices sorted according to the mode.
"""
macro_indices = plc.get_macro_indices()
hard_macro_indices = [
m for m in macro_indices if not plc.is_node_soft_macro(m)
]
soft_macro_indices = [m for m in macro_indices if plc.is_node_soft_macro(m)]
def macro_area(idx):
w, h = plc.get_node_width_height(idx)
return w * h
if mode == 'descending_size_macro_first':
ordered_indices = (
sorted(hard_macro_indices, key=macro_area)[::-1] +
sorted(soft_macro_indices, key=macro_area)[::-1])
elif mode == 'random':
np.random.shuffle(macro_indices)
ordered_indices = macro_indices
elif mode == 'random_macro_first':
np.random.shuffle(hard_macro_indices)
ordered_indices = hard_macro_indices + soft_macro_indices
else:
raise ValueError('{} is an unsupported node placement mode.'.format(mode))
if exclude_fixed_nodes:
ordered_indices = [m for m in ordered_indices if not plc.is_node_fixed(m)]
return ordered_indices
|
|
import sys
def _print(*args):
for arg in args:
sys.stdout.write('%s '%arg)
sys.stdout.write('\n')
if sys.version.startswith('3.0'):
PY_VER = '3.x'
from urllib.parse import quote_plus,parse_qsl
from urllib.request import urlopen,urlretrieve
from hashlib import sha1
new_sha = lambda s: sha1(bytes(s,'utf-8'))
else:
PY_VER = '2.x'
from urllib import quote_plus,urlretrieve
from urllib2 import urlopen, Request, URLError, HTTPError
from cgi import parse_qsl
try: # >= 2.5
from hashlib import sha1 as new_sha
except ImportError: # < 2.5
from sha import new as new_sha
def quote(s):
try:
return quote_plus(s.encode('utf-8'),'+.,:|/?&$=')
except:
return quote_plus(s,'+.,:|/?&$=')
def smart_str(s):
try:
s = quote(s)
except:
pass
if PY_VER == '2.x':
return unicode(s).encode('utf-8') # Py2K
return str(s)#.encode('utf-8') # Py3K
APIPARAMS = ('chxtc', 'chxt', 'chxp', 'chxs', 'chxr', 'chco', 'chtm', 'chld',
'chts', 'chtt', 'chxl', 'chd', 'chf', 'chg', 'chl', 'chm', 'chp', 'chs',
'cht', 'chls', 'chdlp', 'chds', 'chbh', 'chdl', 'choe', 'chst')
MARKERS = 'acdostvVhxrRbBDF'
TYPES = ('bvs', 'p3', 'qr', 'lc', 'p', 'bhg', 'pc', 's', 'r', 'rs', 'bvg', 't',
'v', 'lxy', 'bhs', 'gom', 'ls')
IMGATTRS = ('title','alt','align','border','height','width','ismap','longdesc',
'usemap','id','class','style','lang','xml:lang','onclick','ondblclick','onmousedown',
'onmouseup','onmouseover','onmousemove','onmouseout','onkeypress','onkeydown','onkeyup')
GEO = ('africa','asia','europe','middle_east','south_america','usa','world')
TTAGSATTRS = ('label','title','color','line','grid','bar','marker','fill','legend','axes',
'encoding','scale','size','type','dataset','img','map','bar_width_spacing',
'legend_pos','output_encoding','level_data')
APIURL = 'http://chart.apis.google.com/chart?'
COLOR_MAP = {
'aliceblue': 'F0F8FF',
'antiquewhite': 'FAEBD7',
'aqua': '00FFFF',
'aquamarine': '7FFFD4',
'azure': 'F0FFFF',
'beige': 'F5F5DC',
'bisque': 'FFE4C4',
'black': '000000',
'blanchedalmond': 'FFEBCD',
'blue': '0000FF',
'blueviolet': '8A2BE2',
'brown': 'A52A2A',
'burlywood': 'DEB887',
'cadetblue': '5F9EA0',
'chartreuse': '7FFF00',
'chocolate': 'D2691E',
'coral': 'FF7F50',
'cornflowerblue': '6495ED',
'cornsilk': 'FFF8DC',
'crimson': 'DC143C',
'cyan': '00FFFF',
'darkblue': '00008B',
'darkcyan': '008B8B',
'darkgoldenrod': 'B8860B',
'darkgray': 'A9A9A9',
'darkgreen': '006400',
'darkkhaki': 'BDB76B',
'darkmagenta': '8B008B',
'darkolivegreen': '556B2F',
'darkorange': 'FF8C00',
'darkorchid': '9932CC',
'darkred': '8B0000',
'darksalmon': 'E9967A',
'darkseagreen': '8FBC8F',
'darkslateblue': '483D8B',
'darkslategray': '2F4F4F',
'darkturquoise': '00CED1',
'darkviolet': '9400D3',
'deeppink': 'FF1493',
'deepskyblue': '00BFFF',
'dimgray': '696969',
'dodgerblue': '1E90FF',
'firebrick': 'B22222',
'floralwhite': 'FFFAF0',
'forestgreen': '228B22',
'fuchsia': 'FF00FF',
'gainsboro': 'DCDCDC',
'ghostwhite': 'F8F8FF',
'gold': 'FFD700',
'goldenrod': 'DAA520',
'gray': '808080',
'green': '008000',
'greenyellow': 'ADFF2F',
'honeydew': 'F0FFF0',
'hotpink': 'FF69B4',
'indianred ': 'CD5C5C',
'indigo ': '4B0082',
'ivory': 'FFFFF0',
'khaki': 'F0E68C',
'lavender': 'E6E6FA',
'lavenderblush': 'FFF0F5',
'lawngreen': '7CFC00',
'lemonchiffon': 'FFFACD',
'lightblue': 'ADD8E6',
'lightcoral': 'F08080',
'lightcyan': 'E0FFFF',
'lightgoldenrodyellow': 'FAFAD2',
'lightgrey': 'D3D3D3',
'lightgreen': '90EE90',
'lightpink': 'FFB6C1',
'lightsalmon': 'FFA07A',
'lightseagreen': '20B2AA',
'lightskyblue': '87CEFA',
'lightslategray': '778899',
'lightsteelblue': 'B0C4DE',
'lightyellow': 'FFFFE0',
'lime': '00FF00',
'limegreen': '32CD32',
'linen': 'FAF0E6',
'magenta': 'FF00FF',
'maroon': '800000',
'mediumaquamarine': '66CDAA',
'mediumblue': '0000CD',
'mediumorchid': 'BA55D3',
'mediumpurple': '9370D8',
'mediumseagreen': '3CB371',
'mediumslateblue': '7B68EE',
'mediumspringgreen': '00FA9A',
'mediumturquoise': '48D1CC',
'mediumvioletred': 'C71585',
'midnightblue': '191970',
'mintcream': 'F5FFFA',
'mistyrose': 'FFE4E1',
'moccasin': 'FFE4B5',
'navajowhite': 'FFDEAD',
'navy': '000080',
'oldlace': 'FDF5E6',
'olive': '808000',
'olivedrab': '6B8E23',
'orange': 'FFA500',
'orangered': 'FF4500',
'orchid': 'DA70D6',
'palegoldenrod': 'EEE8AA',
'palegreen': '98FB98',
'paleturquoise': 'AFEEEE',
'palevioletred': 'D87093',
'papayawhip': 'FFEFD5',
'peachpuff': 'FFDAB9',
'peru': 'CD853F',
'pink': 'FFC0CB',
'plum': 'DDA0DD',
'powderblue': 'B0E0E6',
'purple': '800080',
'red': 'FF0000',
'rosybrown': 'BC8F8F',
'royalblue': '4169E1',
'saddlebrown': '8B4513',
'salmon': 'FA8072',
'sandybrown': 'F4A460',
'seagreen': '2E8B57',
'seashell': 'FFF5EE',
'sienna': 'A0522D',
'silver': 'C0C0C0',
'skyblue': '87CEEB',
'slateblue': '6A5ACD',
'slategray': '708090',
'snow': 'FFFAFA',
'springgreen': '00FF7F',
'steelblue': '4682B4',
'tan': 'D2B48C',
'teal': '008080',
'thistle': 'D8BFD8',
'tomato': 'FF6347',
'turquoise': '40E0D0',
'violet': 'EE82EE',
'wheat': 'F5DEB3',
'white': 'FFFFFF',
'whitesmoke': 'F5F5F5',
'yellow': 'FFFF00',
'yellowgreen': '9ACD32'
}
PIN_TYPES = ('pin_letter','pin_icon','xpin_letter','xpin_icon','spin')
PIN_ICONS = ('home', 'home', 'WC', 'WCfemale', 'WCmale', 'accomm', 'airport',
'baby', 'bar', 'bicycle', 'bus', 'cafe', 'camping', 'car', 'caution', 'cinema',
'computer', 'corporate', 'dollar', 'euro', 'fire', 'flag', 'floral', 'helicopter',
'home', 'info', 'landslide', 'legal', 'location', 'locomotive', 'medical',
'mobile', 'motorcycle', 'music', 'parking', 'pet', 'petrol', 'phone', 'picnic',
'postal', 'pound', 'repair', 'restaurant', 'sail', 'school', 'scissors', 'ship',
'shoppingbag', 'shoppingcart', 'ski', 'snack', 'snow', 'sport', 'swim', 'taxi',
'train', 'truck', 'wheelchair', 'yen')
PIN_SHAPES = ('pin','star','sleft','sright')
NOTE_TYPES = ('note_title','note','weather')
NOTE_IMAGES = ('arrow_d', 'balloon', 'pinned_c', 'sticky_y', 'taped_y', 'thought')
NOTE_WEATHERS = ('clear-night-moon', 'cloudy-heavy', 'cloudy-sunny', 'cloudy',
'rain', 'rainy-sunny', 'snow', 'snowflake', 'snowy-sunny', 'sunny-cloudy',
'sunny', 'thermometer-cold', 'thermometer-hot', 'thunder', 'windy')
BUBBLE_TYPES = ('icon_text_small','icon_text_big','icon_texts_big','texts_big')
BUBBLE_SICONS = ('WC', 'WCfemale', 'WCmale', 'accomm', 'airport', 'baby', 'bar',
'bicycle', 'bus', 'cafe', 'camping', 'car', 'caution', 'cinema', 'computer',
'corporate', 'dollar', 'euro', 'fire', 'flag', 'floral', 'helicopter', 'home',
'info', 'landslide', 'legal', 'location', 'locomotive', 'medical', 'mobile',
'motorcycle', 'music', 'parking', 'pet', 'petrol', 'phone', 'picnic', 'postal',
'pound', 'repair', 'restaurant', 'sail', 'school', 'scissors', 'ship', 'shoppingbag',
'shoppingcart', 'ski', 'snack', 'snow', 'sport', 'swim', 'taxi', 'train',
'truck', 'wheelchair', 'yen')
BUBBLE_LICONS = ('beer', 'bike', 'car', 'house', 'petrol', 'ski', 'snack')
LEGEND_POSITIONS = ('b','t','r','l','bv','tv')
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from libcloud.common.base import JsonResponse
from libcloud.common.base import ConnectionUserAndKey
from libcloud.utils.py3 import b
from libcloud.common.types import ProviderError
__all__ = [
'API_HOST',
'LiquidWebException',
'LiquidWebResponse',
'LiquidWebConnection',
]
# Endpoint for liquidweb api.
API_HOST = 'api.stormondemand.com'
class LiquidWebException(ProviderError):
"""The base class for other Liquidweb exceptions"""
def __init__(self, value, http_code, extra=None):
"""
:param value: message contained in error
:type value: ``str``
:param http_code: error code
:type http_code: ``int``
:param extra: extra fields specific to error type
:type extra: ``list``
"""
self.extra = extra
super(LiquidWebException, self).__init__(value, http_code, driver=None)
def __str__(self):
return "%s %s" % (self.http_code, self.value)
def __repr__(self):
return "LiquidWebException %s %s" % (self.http_code, self.value)
class APIException(LiquidWebException):
def __init__(self, error_class, full_msg, http_code, extra=None):
self.error_class = error_class
super(APIException, self).__init__(full_msg, http_code, extra=extra)
def __str__(self):
return "%s: %s" % (self.error_class, self.value)
def __repr__(self):
return "%s: %s" % (self.error_class, self.value)
EXCEPTIONS_FIELDS = {
'LW::Exception::API::Internal': {
'fields': []
},
'LW::Exception::API::InvalidEncoding': {
'fields': ['encoding']
},
'LW::Exception::API::InvalidMethod': {
'fields': ['method']
},
'LW::Exception::API::Maintenance': {
'fields': []
},
'LW::Exception::API::RateLimit': {
'fields': ['account', 'ip', 'method']
},
'LW::Exception::Authorization': {
'fields': ['username']
},
'LW::Exception::DNS::NoResponse': {
'fields': ['nameservers']
},
'LW::Exception::DNS::Servfail': {
'fields': ['nameservers']
},
'LW::Exception::Deserialize': {
'fields': ['data', 'encoding']
},
'LW::Exception::DuplicateRecord': {
'fields': ['field', 'input', 'statement']
},
'LW::Exception::Forbidden': {
'fields': []
},
'LW::Exception::Incapable': {
'fields': ['capability', 'thing']
},
'LW::Exception::Input': {
'fields': ['field']
},
'LW::Exception::Input::Disallowed': {
'fields': ['field']
},
'LW::Exception::Input::Multiple': {
'fields': ['errors', 'field', 'type']
},
'LW::Exception::Input::NotInRealm': {
'fields': ['field', 'valid', 'value']
},
'LW::Exception::Input::OutOfBounds': {
'fields': ['field', 'max', 'min', 'value']
},
'LW::Exception::Input::Required': {
'fields': ['field', 'position']
},
'LW::Exception::Input::Unknown': {
'fields': ['field', 'value']
},
'LW::Exception::Input::Validation': {
'fields': ['field', 'type', 'value']
},
'LW::Exception::Permission': {
'fields': ['account', 'identifier']
},
'LW::Exception::RecordNotFound': {
'fields': ['field', 'input']
},
'LW::Exception::RemoteService::Authorization': {
'fields': ['url']
},
'LW::Exception::Resource': {
'fields': ['resource']
},
'LW::Exception::Resource::Insufficient': {
'fields': ['available', 'requested', 'resource']
},
'LW::Exception::Resource::Unavailable': {
'fields': ['resource']
},
'LW::Exception::Serialize': {
'fields': ['data', 'encoding']
},
'LW::Exception::Workflow::Conflict': {
'fields': ['conflict', 'workflow']
}
}
class LiquidWebResponse(JsonResponse):
objects = None
errors = None
error_dict = {}
def __init__(self, response, connection):
self.errors = []
super(LiquidWebResponse, self).__init__(response=response,
connection=connection)
self.objects, self.errors = self.parse_body_and_errors()
if self.errors:
error = self.errors.pop()
raise self._make_excp(error, self.status)
def parse_body_and_errors(self):
data = []
errors = []
js = super(LiquidWebResponse, self).parse_body()
if 'items' in js:
data.append(js['items'])
if 'name' in js:
data.append(js)
if 'deleted' in js:
data.append(js['deleted'])
if 'error_class' in js:
errors.append(js)
return (data, errors)
def success(self):
"""
Returns ``True`` if our request is successful.
"""
return (len(self.errors) == 0)
def _make_excp(self, error, status):
"""
Raise LiquidWebException.
"""
exc_type = error.get('error_class')
message = error.get('full_message')
try:
_type = EXCEPTIONS_FIELDS[exc_type]
fields = _type.get('fields')
extra = {}
except KeyError:
fields = []
for field in fields:
extra[field] = error.get(field)
return APIException(exc_type, message, status, extra=extra)
class LiquidWebConnection(ConnectionUserAndKey):
host = API_HOST
responseCls = LiquidWebResponse
def add_default_headers(self, headers):
b64string = b('%s:%s' % (self.user_id, self.key))
encoded = base64.b64encode(b64string).decode('utf-8')
authorization = 'Basic ' + encoded
headers['Authorization'] = authorization
headers['Content-Type'] = 'application/json'
return headers
|
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','ti.touchid.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','TiTouchidModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release OTHER_CFLAGS=\"-fembed-bitcode\"")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release OTHER_CFLAGS=\"-fembed-bitcode\"")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
|
# -*- coding: utf-8 -*-
"""
wakatime.languages.templates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Templates.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
from ..compat import u
""" If these keywords are found in the source file, treat them as a dependency.
Must be lower-case strings.
"""
KEYWORDS = [
'_',
'$',
'angular',
'assert', # probably mocha
'backbone',
'batman',
'c3',
'can',
'casper',
'chai',
'chaplin',
'd3',
'define', # probably require
'describe', # mocha or jasmine
'eco',
'ember',
'espresso',
'expect', # probably jasmine
'exports', # probably npm
'express',
'gulp',
'handlebars',
'highcharts',
'jasmine',
'jquery',
'jstz',
'ko', # probably knockout
'm', # probably mithril
'marionette',
'meteor',
'moment',
'monitorio',
'mustache',
'phantom',
'pickadate',
'pikaday',
'qunit',
'react',
'reactive',
'require', # probably the commonjs spec
'ripple',
'rivets',
'socketio',
'spine',
'thorax',
'underscore',
'vue',
'way',
'zombie',
]
class LassoJavascriptParser(TokenParser):
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if u(token) == 'Token.Name.Other':
self._process_name(token, content)
elif u(token) == 'Token.Literal.String.Single' or u(token) == 'Token.Literal.String.Double':
self._process_literal_string(token, content)
def _process_name(self, token, content):
if content.lower() in KEYWORDS:
self.append(content.lower())
def _process_literal_string(self, token, content):
if 'famous/core/' in content.strip('"').strip("'"):
self.append('famous')
class HtmlDjangoParser(TokenParser):
tags = []
getting_attrs = False
current_attr = None
current_attr_value = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if u(token) == 'Token.Name.Tag':
self._process_tag(token, content)
elif u(token) == 'Token.Literal.String':
self._process_string(token, content)
elif u(token) == 'Token.Name.Attribute':
self._process_attribute(token, content)
@property
def current_tag(self):
return None if len(self.tags) == 0 else self.tags[0]
def _process_tag(self, token, content):
if content.startswith('</') or content.startswith('/'):
try:
self.tags.pop(0)
except IndexError:
# ignore errors from malformed markup
pass
self.getting_attrs = False
elif content.startswith('<'):
self.tags.insert(0, content.replace('<', '', 1).strip().lower())
self.getting_attrs = True
elif content.startswith('>'):
self.getting_attrs = False
self.current_attr = None
def _process_attribute(self, token, content):
if self.getting_attrs:
self.current_attr = content.lower().strip('=')
else:
self.current_attr = None
self.current_attr_value = None
def _process_string(self, token, content):
if self.getting_attrs and self.current_attr is not None:
if content.endswith('"') or content.endswith("'"):
if self.current_attr_value is not None:
self.current_attr_value += content
if self.current_tag == 'script' and self.current_attr == 'src':
self.append(self.current_attr_value)
self.current_attr = None
self.current_attr_value = None
else:
if len(content) == 1:
self.current_attr_value = content
else:
if self.current_tag == 'script' and self.current_attr == 'src':
self.append(content)
self.current_attr = None
self.current_attr_value = None
elif content.startswith('"') or content.startswith("'"):
if self.current_attr_value is None:
self.current_attr_value = content
else:
self.current_attr_value += content
class VelocityHtmlParser(HtmlDjangoParser):
pass
class MyghtyHtmlParser(HtmlDjangoParser):
pass
class MasonParser(HtmlDjangoParser):
pass
class MakoHtmlParser(HtmlDjangoParser):
pass
class CheetahHtmlParser(HtmlDjangoParser):
pass
class HtmlGenshiParser(HtmlDjangoParser):
pass
class RhtmlParser(HtmlDjangoParser):
pass
class HtmlPhpParser(HtmlDjangoParser):
pass
class HtmlSmartyParser(HtmlDjangoParser):
pass
class EvoqueHtmlParser(HtmlDjangoParser):
pass
class ColdfusionHtmlParser(HtmlDjangoParser):
pass
class LassoHtmlParser(HtmlDjangoParser):
pass
class HandlebarsHtmlParser(HtmlDjangoParser):
pass
class YamlJinjaParser(HtmlDjangoParser):
pass
class TwigHtmlParser(HtmlDjangoParser):
pass
|
|
# TODO merge this with normal nnet
import gnumpy as gp
import numpy as np
import ctc_fast as ctc
#import ctc
# debug tool
#from IPython import embed
#from ipsh import *
#DEBUG = TRUE
def relu_hard(x, computeGrad = False):
if (not computeGrad):
f = (1/2.)*(x+gp.sign(x)*x)
return f
g = np.sign(x)
return g
def relu(x, computeGrad = False):
negslope = .01
a = (1+negslope)/2.; b = (1-negslope)/2.
if (not computeGrad):
f = a*x + b*gp.sign(x)*x
return f
g = a + b*gp.sign(x)
return g
def sigmoid(x, computeGrad = False):
if (not computeGrad):
f = gp.logistic(x)
return f
g = x * (1.-x)
return g
class RNNet:
def __init__(self,inputDim,outputDim,layerSizes,train=True,
activation='relu', temporalLayer = -1):
"""
temporalLayer indicates which layer is recurrent. <= 0 indicates no recurrernce
"""
self.outputDim = outputDim
self.inputDim = inputDim
self.layerSizes = layerSizes
self.temporalLayer = temporalLayer
self.stack = None
self.train = train
self.funcdict = {
"relu_hard" : relu_hard,
"relu" : relu,
"sigmoid" : sigmoid,
}
self.activation = self.funcdict[activation]
self.hist = {}
def initParams(self):
"""
Initialize parameters using 6/sqrt(fanin+fanout)
"""
sizes = [self.inputDim]+self.layerSizes+[self.outputDim]
scales = [gp.sqrt(6)/gp.sqrt(n+m) for n,m in zip(sizes[:-1],sizes[1:])]
self.stack = [[gp.rand(m,n)*2*s-s,gp.zeros((m,1))] \
for n,m,s in zip(sizes[:-1],sizes[1:],scales)]
if self.temporalLayer > 0:
rs = sizes[self.temporalLayer]
s = gp.sqrt(6)/ rs
# temporal layer stored at end of stack
self.stack.append([gp.rand(rs,rs) * 2 * s - s, gp.zeros((2,1))])
if self.train:
#TODO why store all deltas?
#self.deltas = [gp.empty((s,self.mbSize)) for s in sizes[1:]]
#NOTE if a temporal layer is used it's already added to stack so will have a grad
self.grad = [[gp.empty(w.shape),gp.empty(b.shape)] for w,b in self.stack]
def updateParams(self,scale, update):
"""
This is to do parameter updates in place. Performs the same whether RNN or not
"""
self.stack = [[ws[0]+scale*wsDelta[0],ws[1]+scale*wsDelta[1]]
for ws,wsDelta in zip(self.stack,update)]
def vecToStack(self,vec):
start = 0
sizes = [self.inputDim]+self.layerSizes+[self.outputDim]
for n,m,i in zip(sizes[:-1],sizes[1:],range(len(sizes)-1)):
# TODO why is this ugpacking into a list for w,b instead of tuple?
self.stack[i] = [gp.garray(vec[start:start+m*n]).reshape(m,n),\
gp.garray(vec[start+m*n:start+m*(n+1)]).reshape(m,1)]
start += m*(n+1)
if self.temporalLayer > 0:
rs = self.layerSizes[self.temporalLayer-1]
self.stack[-1] = [gp.garray(vec[start:start+rs*rs]).reshape(rs,rs),\
gp.garray(vec[start+rs*rs:])]
def vectorize(self, x):
"""
Converts a stack object into a single parameter vector
x is a stack
returns a single numpy array
XXX or does this return a list of lists?
"""
return [v for layer in x for wb in layer for w_or_b in wb for v in w_or_b]
# r = []
# for l in x:
# for wb in l:
# for w_or_b in wb:
# r.extend(w_or_b.reshape(-1).tolist())
#r = [(v for v in w_or_b) if isinstance(w_or_b, np.ndarray) else w_or_b for layer in x for wb in layer for w_or_b in wb]
return r
#return [v for layer in x for wb in layer for w_or_b in wb for v in w_or_b]
def paramVec(self):
return self.vectorize(self.stack)
def costAndGradVec(self,params,data,labels):
"""
Vectorized version of CTC cost
data is a single utterance. Each column is a time index [0...T]
"""
self.vecToStack(params)
cost,grad = self.costAndGrad(data,labels)
if (grad != None):
vecgrad = self.vectorize(grad)
return cost,vecgrad
def costAndGrad(self,data,labels,key=None):
"""
Forward prop entire utterance
Call CTC cost function
Compute gradient
data is a 2-D matrix where each column is a single time frame
Number of input frames changes across iterations
labels is a vector of symbol ids, length unknown and does not
depend on the number of time frames
"""
## forward prop
T = data.shape[1]
sizes = [self.inputDim]+self.layerSizes+[self.outputDim]
stackMax = len(self.stack)-1
if self.temporalLayer > 0:
stackMax -= 1
self.hActs = [gp.empty((s,T)) for s in sizes]
self.hActs[0] = data
#for t in range(T):
i = 1
for l in range(stackMax+1):
w,b = self.stack[l]
self.hActs[i] = w.dot(self.hActs[i-1]) + b
# loop over time for recurrent layer
if (self.temporalLayer-1) == l:
for t in range(T):
if t > 0:
self.hActs[i][:,t] += self.stack[-1][0].dot(self.hActs[i][:,t-1])
# nonlinearity
if i <= stackMax:
self.hActs[i][:,t] = self.activation(self.hActs[i][:,t])
# hidden layer activation function for batch forward prop
elif i <= stackMax:
self.hActs[i] = self.activation(self.hActs[i])
# w_t,b_t = self.stack[-1][0]
# self.hActs[i][:,t] += self.stack[-1][0].dot(self.hActs[i][:,t-1])
i += 1
# convert final layer to probs after all time iteration complete
probs = self.hActs[-1]-gp.max(self.hActs[-1],axis=0)
probs = gp.as_numpy_array(probs)
probs = np.exp(probs)
probs = probs/np.sum(probs,axis=0)
## pass probs and label string to ctc loss
# TODO how much does passing to different function cost us?
cost, delta_output, skip = ctc.ctc_loss(probs, labels.squeeze(), blank=0)
# Store probabilities and error signal for a given key
if key is not None and key in self.hist:
self.hist[key].append((probs,delta_output))
if not self.train:
return cost,None
delta_output = gp.garray(delta_output)
## back prop through time
# zero gradients
self.grad = [[gp.zeros(w.shape),gp.zeros(b.shape)] for w,b in self.stack]
if self.temporalLayer > 0:
delta_t = np.zeros(self.layerSizes[self.temporalLayer-1])
for t in reversed(range(T)):
# get delta from loss function
delta = delta_output[:,t].T
# compute gradient for output layer
#print self.hActs[-2].shape, delta.shape, self.stack[stackMax][0].shape
#print delta.reshape(-1,1).shape, self.hActs[-2][:,t].reshape(-1,1).shape
# TODO can we get rid of some of these annoying reshape -1 1?
self.grad[stackMax][0] += delta.reshape(-1,1).dot(self.hActs[-2][:,t].reshape(-1,1).T)
self.grad[stackMax][1] += delta.reshape(-1, 1)
# push delta through output layer
delta = self.stack[stackMax][0].T.dot(delta)
# iterate over lower layers
i = len(self.layerSizes)-1
while i >= 0:
# add the temporal delta if this is the recurrent layer
if (self.temporalLayer-1) == i:
#print delta.shape, delta_t.shape
delta += delta_t
# push delta through activation function for this layer
#print i, stackMax, delta.shape, self.hActs[i+1][:,t].shape
delta = delta * self.activation(self.hActs[i+1][:,t], True)
#embed()
# compute the gradient
#print i, delta.shape, self.hActs[i][:,t].T.reshape(1,-1).shape, self.grad[i][0].shape
self.grad[i][0] += delta.reshape(-1,1).dot(self.hActs[i][:,t].T.reshape(1,-1))
self.grad[i][1] += delta.reshape(-1,1)
# add the temporal delta if this is the recurrent layer
if (self.temporalLayer-1) == i and t > 0:
self.grad[-1][0] += delta.reshape(-1,1).dot(self.hActs[i+1][:,t-1].T.reshape(1,-1))
# push delta through temporal connections
delta_t = self.stack[-1][0].T.dot(delta)
# HACK no bias for temporal layer. Give it a gradient of 0
self.grad[-1][1] = np.zeros((2,1))
# push the delta downward
w,b = self.stack[i]
delta = w.T.dot(delta)
i -= 1
#print self.grad
return cost,self.grad, skip
def toFile(self,fid):
"""
Saves only the network parameters to the given fd.
"""
import cPickle as pickle
pickle.dump(self.stack,fid)
def fromFile(self,fid):
import cPickle as pickle
self.stack = pickle.load(fid)
if __name__=='__main__':
inputDim = 3
numPhones = 6
outputDim = numPhones + 1
seq_len_out = 2
seq_len_in = 5
# can't output more symbols than input times
assert seq_len_in >= seq_len_out
layerSizes = [10, 4]
# make sure seq labels do not have '0' which is our blank index
label_seq = np.floor(np.random.rand(seq_len_out,1)*numPhones)
label_seq = 1 + label_seq.astype(np.int32)
data = np.random.randn(inputDim,seq_len_in)
# make nnet
nn = RNNet(inputDim, outputDim, layerSizes, train=True, temporalLayer=2)
nn.initParams()
# run
cost,grad = nn.costAndGrad(data,label_seq)
print cost
#print grad
# timing for a larger model
if True:
import timeit
setup = '''
import numpy as np
import rnnet
inputDim = 300
numPhones = 61
outputDim = numPhones + 1
seq_len_out = 100
seq_len_in = 400
layerSizes = [2048, 2048]
label_seq = np.floor(np.random.rand(seq_len_out,1)*numPhones)
label_seq = 1 + label_seq.astype(np.int32)
data = np.random.randn(inputDim,seq_len_in)
nn = rnnet.RNNet(inputDim, outputDim, layerSizes, train=True, temporalLayer=2)
nn.initParams()
'''
# run
print timeit.timeit('nn.costAndGrad(data,label_seq)', setup=setup, number=1)
|
|
"""Ansible integration test infrastructure."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import contextlib
import json
import os
import shutil
import tempfile
from .. import types as t
from ..target import (
analyze_integration_target_dependencies,
walk_integration_targets,
)
from ..config import (
IntegrationConfig,
NetworkIntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
)
from ..util import (
ApplicationError,
display,
make_dirs,
COVERAGE_CONFIG_NAME,
MODE_DIRECTORY,
MODE_DIRECTORY_WRITE,
MODE_FILE,
to_bytes,
)
from ..util_common import (
named_temporary_file,
write_text_file,
ResultType,
)
from ..coverage_util import (
generate_coverage_config,
)
from ..cache import (
CommonCache,
)
from ..cloud import (
CloudEnvironmentConfig,
)
from ..data import (
data_context,
)
def setup_common_temp_dir(args, path):
"""
:type args: IntegrationConfig
:type path: str
"""
if args.explain:
return
os.mkdir(path)
os.chmod(path, MODE_DIRECTORY)
if args.coverage:
coverage_config_path = os.path.join(path, COVERAGE_CONFIG_NAME)
coverage_config = generate_coverage_config(args)
write_text_file(coverage_config_path, coverage_config)
os.chmod(coverage_config_path, MODE_FILE)
coverage_output_path = os.path.join(path, ResultType.COVERAGE.name)
os.mkdir(coverage_output_path)
os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE)
def generate_dependency_map(integration_targets):
"""
:type integration_targets: list[IntegrationTarget]
:rtype: dict[str, set[IntegrationTarget]]
"""
targets_dict = dict((target.name, target) for target in integration_targets)
target_dependencies = analyze_integration_target_dependencies(integration_targets)
dependency_map = {}
invalid_targets = set()
for dependency, dependents in target_dependencies.items():
dependency_target = targets_dict.get(dependency)
if not dependency_target:
invalid_targets.add(dependency)
continue
for dependent in dependents:
if dependent not in dependency_map:
dependency_map[dependent] = set()
dependency_map[dependent].add(dependency_target)
if invalid_targets:
raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
return dependency_map
def get_files_needed(target_dependencies):
"""
:type target_dependencies: list[IntegrationTarget]
:rtype: list[str]
"""
files_needed = []
for target_dependency in target_dependencies:
files_needed += target_dependency.needs_file
files_needed = sorted(set(files_needed))
invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
if invalid_paths:
raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
return files_needed
def check_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> None
"""Check the given inventory for issues."""
if args.docker or args.remote:
if os.path.exists(inventory_path):
with open(inventory_path) as inventory_file:
inventory = inventory_file.read()
if 'ansible_ssh_private_key_file' in inventory:
display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.')
def get_inventory_relative_path(args): # type: (IntegrationConfig) -> str
"""Return the inventory path used for the given integration configuration relative to the content root."""
inventory_names = {
PosixIntegrationConfig: 'inventory',
WindowsIntegrationConfig: 'inventory.winrm',
NetworkIntegrationConfig: 'inventory.networking',
} # type: t.Dict[t.Type[IntegrationConfig], str]
return os.path.join(data_context().content.integration_path, inventory_names[type(args)])
def delegate_inventory(args, inventory_path_src): # type: (IntegrationConfig, str) -> None
"""Make the given inventory available during delegation."""
if isinstance(args, PosixIntegrationConfig):
return
def inventory_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""
Add the inventory file to the payload file list.
This will preserve the file during delegation even if it is ignored or is outside the content and install roots.
"""
if data_context().content.collection:
working_path = data_context().content.collection.directory
else:
working_path = ''
inventory_path = os.path.join(working_path, get_inventory_relative_path(args))
if os.path.isfile(inventory_path_src) and os.path.relpath(inventory_path_src, data_context().content.root) != inventory_path:
originals = [item for item in files if item[1] == inventory_path]
if originals:
for original in originals:
files.remove(original)
display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src))
else:
display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src))
files.append((inventory_path_src, inventory_path))
data_context().register_payload_callback(inventory_callback)
@contextlib.contextmanager
def integration_test_environment(args, target, inventory_path_src):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
:type inventory_path_src: str
"""
ansible_config_src = args.get_ansible_config()
ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command)
if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path)
targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path)
inventory_path = inventory_path_src
ansible_config = ansible_config_src
vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
return
root_temp_dir = os.path.expanduser('~/.ansible/test/tmp')
prefix = '%s-' % target.name
suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
suffix = '-ansible'
if args.explain:
temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
else:
make_dirs(root_temp_dir)
temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
try:
display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
inventory_relative_path = get_inventory_relative_path(args)
inventory_path = os.path.join(temp_dir, inventory_relative_path)
cache = IntegrationCache(args)
target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
files_needed = get_files_needed(target_dependencies)
integration_dir = os.path.join(temp_dir, data_context().content.integration_path)
targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path)
ansible_config = os.path.join(temp_dir, ansible_config_relative)
vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path)
file_copies = [
(ansible_config_src, ansible_config),
(inventory_path_src, inventory_path),
]
if os.path.exists(vars_file_src):
file_copies.append((vars_file_src, vars_file))
file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
integration_targets_relative_path = data_context().content.integration_targets_path
directory_copies = [
(
os.path.join(integration_targets_relative_path, target.relative_path),
os.path.join(temp_dir, integration_targets_relative_path, target.relative_path)
)
for target in target_dependencies
]
directory_copies = sorted(set(directory_copies))
file_copies = sorted(set(file_copies))
if not args.explain:
make_dirs(integration_dir)
for dir_src, dir_dst in directory_copies:
display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
if not args.explain:
shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True)
for file_src, file_dst in file_copies:
display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
if not args.explain:
make_dirs(os.path.dirname(file_dst))
shutil.copy2(file_src, file_dst)
yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
finally:
if not args.explain:
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def integration_test_config_file(args, env_config, integration_dir):
"""
:type args: IntegrationConfig
:type env_config: CloudEnvironmentConfig
:type integration_dir: str
"""
if not env_config:
yield None
return
config_vars = (env_config.ansible_vars or {}).copy()
config_vars.update(dict(
ansible_test=dict(
environment=env_config.env_vars,
module_defaults=env_config.module_defaults,
)
))
config_file = json.dumps(config_vars, indent=4, sort_keys=True)
with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path:
filename = os.path.relpath(path, integration_dir)
display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3)
yield path
class IntegrationEnvironment:
"""Details about the integration environment."""
def __init__(self, integration_dir, targets_dir, inventory_path, ansible_config, vars_file):
self.integration_dir = integration_dir
self.targets_dir = targets_dir
self.inventory_path = inventory_path
self.ansible_config = ansible_config
self.vars_file = vars_file
class IntegrationCache(CommonCache):
"""Integration cache."""
@property
def integration_targets(self):
"""
:rtype: list[IntegrationTarget]
"""
return self.get('integration_targets', lambda: list(walk_integration_targets()))
@property
def dependency_map(self):
"""
:rtype: dict[str, set[IntegrationTarget]]
"""
return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
|
|
import re
import numpy as np
import math
import os
from skbeam.core.fitting.xrf_model import K_LINE, L_LINE, M_LINE
from skbeam.core.constants.xrf import XrfElement
from skbeam.core.fitting.lineshapes import gaussian
from ..model.load_data_from_db import save_data_to_hdf5
from ..core.quant_analysis import ParamQuantEstimation
from ..core.xrf_utils import generate_eline_list
import logging
logger = logging.getLogger(__name__)
def _get_elemental_line_parameters(*, elemental_line, incident_energy):
r"""
Retrieve information on all emission lines for the given element and the group (K, L or M)
at given incident energy. For each emission line, the information includes emission line name,
energy and ratio. The data is used for simulation of emission spectra of elements.
Parameters
----------
elemental_line: str
Elemental line name in the format ``Fe_K``, ``Ca_L``, etc.
incident_energy: float
Incident energy in keV
Returns
-------
List of dictionaries. Keys: ``name`` - emission line name, ``energy`` - energy of the
emission line, "ratio" - ratio of the emission line area to the area of ``a1`` line.
Raises
------
RuntimeError
Elemental line is not in the list of supported lines or the emission line is
incorrectly formatted.
"""
# Check format (Fe_K, Y_L, W_M etc.)
if not re.search(r"^[A-Z][a-z]?_[KLM]$", elemental_line):
raise RuntimeError(f"Elemental line {elemental_line} has incorrect format")
element, line = elemental_line.split("_")
line_name = elemental_line
ALL_LINES = K_LINE + L_LINE + M_LINE
if line_name not in ALL_LINES:
raise RuntimeError(f"Elemental line {line_name} is not supported")
elemental_lines = []
# XrfElement class provides convenient access to xraylib library functions
e = XrfElement(element)
# Check if the emission line is activated (check if 'a1' line is active)
em_line_a1 = f"{line.lower()}a1"
i_line_a1 = e.cs(incident_energy)[em_line_a1]
if i_line_a1 > 0:
for num, item in enumerate(e.emission_line.all):
l_name = item[0] # Line name (ka1, kb2 etc.)
energy_v = item[1] # Energy (in kEv)
if line.lower() not in l_name:
continue
i_line = e.cs(incident_energy)[l_name]
ratio_v = i_line / i_line_a1
if energy_v == 0 or ratio_v == 0:
continue
elemental_lines.append({"name": l_name, "energy": energy_v, "ratio": ratio_v})
return elemental_lines
def gen_xrf_spectrum(
element_line_groups=None,
*,
incident_energy=12.0,
n_spectrum_points=4096,
e_offset=0.0,
e_linear=0.01,
e_quadratic=0.0,
fwhm_offset=0.102333594,
fwhm_fanoprime=0.000113169,
epsilon=3.85,
):
r"""
Computes simulated XRF spectrum for the set of element line groups. Returns the spectrum
as ndarray with 'n_spectrum_points' elements.
Parameters
----------
element_line_groups: dict(dict)
Dictionary of element line groups that need to be included in the spectrum: key - element
line group (``K``, ``L`` or ``M`` group suppoted by scikit beam, e.g. ``Si_K``, ``Ba_L``,
``Pt_M`` etc.); value - dictionary that contains spectrum parameters for the group.
Currently only the parameter ``area`` is supported, which defines the area under the
spectrum composed of all emission lines that belong to the group expressed in counts
(must be positive floating point number).
Example: ``{"Si_K": {'area' : 800}, "Ba_L": {'area' : 900}, "Pt_M": {'area' : 1000}}``
incident_energy: float
incident energy of the beam (used in simulation)
n_spectrum_points: int
the number of spectrum points. Currently PyXRF is working 4096-point spectra
e_offset, e_linear, e_quadratic: float
parameters used to compute energy values for the energy axis.
The energy value #``nn`` is computed as ``e_offset + e_linear * nn + e_quadratic * np.square(nn)``,
where ``nn = 0 .. n_spectrum_points - 1``. The default values should be typically used.
fwhm_offset, fwhm_fanoprime, epsilon: float
parameters theat determine the shape of the emission line peaks. The default values
should be typically used.
Returns
-------
spectrum_total: ndarray(float)
The spectrum that contains active emission lines of the specified group.
Size: 'n_spectrum_points'.
xx_energy: ndarray(float)
The values for the energy axis. Size: 'n_spectrum_points'.
Raises
------
RuntimeError
Raised if the list of emission line groups contains incorrectly formatted or not supported
emission lines. Also raised if ``n_spectrum_points`` is zero or negative.
"""
if n_spectrum_points < 1:
raise RuntimeError(f"Spectrum must contain at least one point (n_spectrum_points={n_spectrum_points})")
if (element_line_groups is not None) and (not isinstance(element_line_groups, dict)):
raise RuntimeError(
f"Parameter 'element_line_groups' has invalid type {type(element_line_groups)} "
f"(must be None or dict)"
)
spectrum_total = np.zeros((n_spectrum_points,), dtype="float")
# Energy axis
nn = np.asarray(range(n_spectrum_points))
xx_energy = e_offset + e_linear * nn + e_quadratic * np.square(nn)
if element_line_groups is not None:
for element_line_group, parameters in element_line_groups.items():
element_area = parameters["area"]
spectrum = np.zeros((n_spectrum_points,), dtype="float")
elemental_lines = _get_elemental_line_parameters(
elemental_line=element_line_group, incident_energy=incident_energy
)
for line in elemental_lines:
sigma = fwhm_offset / 2.0 / math.sqrt(2 * math.log(2))
sigma = math.sqrt(sigma**2 + line["energy"] * epsilon * fwhm_fanoprime)
spectrum += gaussian(x=xx_energy, area=line["ratio"], center=line["energy"], sigma=sigma)
# Normalize the spectrum, make the area equal to 'element_area'
spectrum *= element_area / spectrum.sum()
spectrum_total += spectrum
return spectrum_total, xx_energy
def gen_xrf_map_const(
element_line_groups=None,
*,
nx=10,
ny=5,
incident_energy=12.0,
n_spectrum_points=4096,
background_area=0,
spectrum_parameters=None,
):
r"""
Generate ny (vertical) by nx (horizontal) XRF map with identical spectrum for each pixel.
element_line_groups: dict(dict)
Dictionary of element line groups that need to be included in the spectrum: key - element
line group (``K``, ``L`` or ``M`` group suppoted by scikit beam, e.g. ``Si_K``, ``Ba_L``,
``Pt_M`` etc.); value - dictionary that contains spectrum parameters for the group.
Currently only the parameter ``area`` is supported, which defines the area under the
spectrum composed of all emission lines that belong to the group expressed in counts
(must be positive floating point number).
Example: ``{"Si_K": {'area' : 800}, "Ba_L": {'area' : 900}, "Pt_M": {'area' : 1000}}``
nx: int
Horizontal dimension (axis 1) of the XRF map
ny: int
Vertical dimension (axis 0) of the XRF map
incident_energy: float
incident energy of the beam (used in simulation)
n_spectrum_points: int
the number of spectrum points. Currently PyXRF is working 4096-point spectra
spectrum_parameters: dict
dict of optional spectrum parameters, which is passed to ``gen_xrf_spectrum``.
May be None.
Returns
-------
xrf_map: ndarray(np.float32)
XRF map with the shape ``(ny, nx, n_spectrum_points)``.
Raw XRF spectra are represented with 32-bit precision.
xx_energy: ndarray(float)
The values for the energy axis. Size: 'n_spectrum_points'.
Raises
------
RuntimeError
Raised if the list of emission line groups contains incorrectly formatted or not supported
emission lines. Also raised if ``n_spectrum_points`` is zero or negative, or map with zero
points is generated.
"""
if spectrum_parameters is None:
spectrum_parameters = {}
if nx < 1 or ny < 1:
raise RuntimeError(f"XRF map has zero pixels: nx={nx}, ny={ny}")
spectrum, xx_energy = gen_xrf_spectrum(
element_line_groups,
incident_energy=incident_energy,
n_spectrum_points=n_spectrum_points,
**spectrum_parameters,
)
background = background_area / spectrum.size
spectrum += background
# One spectrum is computed. Now change precision to 32 bit before using it to create a map
spectrum = np.float32(spectrum)
xrf_map = np.broadcast_to(spectrum, shape=[ny, nx, len(spectrum)])
return xrf_map, xx_energy
def create_xrf_map_data(
*,
scan_id,
element_line_groups=None,
num_det_channels=3,
nx=10,
ny=5,
incident_energy=12.0,
n_spectrum_points=4096,
background_area=0,
spectrum_parameters=None,
):
r"""
Generates a complete simulated XRF dataset based on set of element lines, XRF map size,
incident energy etc. The dataset may be used for testing of XRF map processing functions.
Parameters
----------
scan_id: str or int
Scan ID that is included in metadata of the generated dataset.
element_line_groups: dict(dict)
Dictionary of element line groups that need to be included in the spectrum: key - element
line group (``K``, ``L`` or ``M`` group suppoted by scikit beam, e.g. ``Si_K``, ``Ba_L``,
``Pt_M`` etc.); value - dictionary that contains spectrum parameters for the group.
Currently only the parameter ``area`` is supported, which defines the area under the
spectrum composed of all emission lines that belong to the group expressed in counts
(must be positive floating point number).
Example: ``{"Si_K": {'area' : 800}, "Ba_L": {'area' : 900}, "Pt_M": {'area' : 1000}}``
num_det_channels: int
The number of detector channels to simulate. Must be integer greater than 1.
nx: int
Horizontal dimension (axis 1) of the XRF map
ny: int
Vertical dimension (axis 0) of the XRF map
incident_energy: float
incident energy of the beam (used in simulation)
n_spectrum_points: int
the number of spectrum points. Currently PyXRF is working 4096-point spectra
background_area: float
The area of the background. The background represents a rectangle, which occupies
all ``n_spectrum_points``. If the generated spectrum is truncated later, the value
of the area will change proportionally.
spectrum_parameters: dict
dict of optional spectrum parameters, which is passed to ``gen_xrf_map_const``.
May be None.
Returns
-------
data_xrf: dict(ndarray)
The dictionary of the datasets. The dictionary keys are ``det_sum``, ``det1``, ``det2`` etc.
The values are 3D arrays with ``shape = (ny, nx, n_spectrum_points)``.
data_scalers: dict
The dictionary with scaler information. The dictionary has two entries:
``data_scalers["scaler_names"]`` contains the list of scaler names (currently
``["i0", "time", "time_diff"]``). ``data_scalers["scaler_data"]`` contains 3D array
with scaler data (``shape = (ny, nx, N_SCALERS)``, ``N_SCALERS`` is equal to the number
of scaler names).
data_pos: dict
The dictionary of positional data: ``data_pos["pos_names"]=["x_pos", "y_pos"]``,
``data_pos["pos_data"]`` is 3D array with ``shape=(N_POS, ny, nx)``, where ``N_POS``
is equal to the number of position names (currently 2). Note, that the ``y_pos``
is measured along vertical dimension (axis 0 of the array) and ``x_pos`` is measured
along horizontal dimension (axis 1 of the array).
metadata: dict
dictionary of metadata values, which include ``scan_id`` (passed to the function,
``scan_uid`` (randomly generated), and ``instrument_mono_incident_energy``
(incident energy passed to the function).
Raises
------
RuntimeError
Raised if the list of emission line groups contains incorrectly formatted or not supported
emission lines. Also raised if ``n_spectrum_points`` is zero or negative, or map with zero
points is generated (``nx`` or ``ny`` is 0).
"""
if spectrum_parameters is None:
spectrum_parameters = {}
if num_det_channels < 1:
num_det_channels = 1
# Generate XRF map (sum of all channels)
xrf_map, _ = gen_xrf_map_const(
element_line_groups,
nx=nx,
ny=ny,
incident_energy=incident_energy,
n_spectrum_points=n_spectrum_points,
background_area=background_area,
**spectrum_parameters,
)
# Distribute total fluorescence into 'num_det_channels'
channel_coef = np.arange(num_det_channels) + 10.0
channel_coef /= np.sum(channel_coef)
# Create datasets
data_xrf = {}
data_xrf["det_sum"] = xrf_map # 'xrf_map' is np.float32
for n in range(num_det_channels):
data_xrf[f"det{n + 1}"] = xrf_map * channel_coef[n] # The arrays remain np.float32
data_scalers = {}
scaler_names = ["i0", "time", "time_diff"]
data_scalers["scaler_names"] = scaler_names
# Scaler 'i0'
scaler_data = np.zeros(shape=(ny, nx, len(scaler_names)), dtype=float)
scaler_data[:, :, 0] = np.ones(shape=(ny, nx), dtype=float) * 0.1
# Time
time = np.arange(nx, dtype=float) * 2.0
time = np.broadcast_to(time, shape=(ny, nx))
scaler_data[:, :, 1] = time
# Time difference
scaler_data[:, :, 2] = np.ones(shape=(ny, nx), dtype=float) * 2.0
data_scalers["scaler_data"] = scaler_data
# Generate positions
data_pos = {}
data_pos["pos_names"] = ["x_pos", "y_pos"]
x_pos_line = np.arange(nx) * 0.01 + 2.0
x_pos = np.broadcast_to(x_pos_line, shape=(ny, nx))
y_pos_column = np.arange(ny) * 0.02 + 1
y_pos = np.broadcast_to(y_pos_column, shape=(nx, ny))
y_pos = np.transpose(y_pos)
data_pos["pos_data"] = np.zeros(shape=(2, ny, nx), dtype=float)
data_pos["pos_data"][0, :, :] = x_pos
data_pos["pos_data"][1, :, :] = y_pos
# Generate metadata
metadata = {}
metadata["scan_id"] = scan_id
def _gen_rs(n):
uid_characters = list("abcdef0123456789")
s = ""
for i in range(n):
s += np.random.choice(uid_characters)
return s
metadata["scan_uid"] = _gen_rs(8) + "-" + _gen_rs(4) + "-" + _gen_rs(4) + "-" + _gen_rs(4) + "-" + _gen_rs(12)
metadata["instrument_mono_incident_energy"] = incident_energy
return data_xrf, data_scalers, data_pos, metadata
def create_hdf5_xrf_map_const(
*,
scan_id,
wd=None,
fln_suffix=None,
element_line_groups=None,
save_det_sum=True,
save_det_channels=True,
num_det_channels=3,
nx=10,
ny=5,
incident_energy=12.0,
n_spectrum_points=4096,
background_area=0,
spectrum_parameters=None,
):
r"""
Generates and saves the simulated XRF map data to file. The file may be loaded in PyXRF
and used for testing processing functions. The function overwrites existing files.
Parameters
----------
scan_id: str or int
Scan ID (positive integer) that is included in the file metadata and used in file name.
wd: str or None
The directory where data is to be saved. If None, then current directory is used
fln_suffix: str or None
File name suffix, which is attached to file name (before extension). May be used
to specify some additional information, useful for visual interpretation of file name.
element_line_groups: dict(dict)
Dictionary of element lines, see docstring for ``create_xrf_map_data`` for detailed
information.
save_det_sum: bool
Indicates if the sum of detector channels should be saved (currently the sum is always
saved, so False value is ignored)
save_det_channels: bool
Indicates if the individual detector channels are saved.
num_det_channels: int
The number of the detector channels. The area specified in ``element_line_groups`` is
distributed between the detector channels.
nx, ny: int
The dimensions along vertical (``ny``, axis 0) and horizontal (``nx``, axis 1) axes.
incident_energy: float
Incident beam energy, used for dataset generation and also saved in metadata.
n_spectrum_points: int
The number of points in the spectrum
background_area: float
The area of the simulated background, see docstring for ``create_xrf_map_data``
for detailed discussion.
spectrum_parameters: dict
dict of optional spectrum parameters, which is passed to ``create_xrf_map_data``.
May be None.
Returns
-------
fpath: str
The path to the saved file.
Raises
------
RuntimeError
Raised if the list of emission line groups contains incorrectly formatted or not supported
emission lines. Also raised if ``n_spectrum_points`` is zero or negative, or map with zero
points is generated (``nx`` or ``ny`` is 0).
IOError may be raised in case of IO errors.
"""
if not save_det_sum:
logger.warning("The sum of the detector channels is always saved. ")
# Prepare file name
fln = f"scan2D_{scan_id}_sim"
if fln_suffix:
fln += f"_{fln_suffix}"
fln += ".h5"
if wd:
wd = os.path.expanduser(wd)
os.makedirs(wd, exist_ok=True)
fpath = os.path.join(wd, fln)
else:
fpath = fln
data_xrf, data_scalers, data_pos, metadata = create_xrf_map_data(
scan_id=scan_id,
element_line_groups=element_line_groups,
num_det_channels=num_det_channels,
nx=nx,
ny=ny,
incident_energy=incident_energy,
n_spectrum_points=n_spectrum_points,
background_area=background_area,
spectrum_parameters=spectrum_parameters,
)
data = {}
data.update(data_xrf)
data.update(data_scalers)
data.update(data_pos)
save_data_to_hdf5(
fpath, data, metadata=metadata, file_overwrite_existing=True, create_each_det=save_det_channels
)
return fpath
def gen_hdf5_qa_dataset(*, wd=None, standards_serials=None, test_elements=None):
r"""
Create a set of data files for testing quantitative analysis features.
The following files are created:
-- one simulated raw .h5 is created for each reference standard in the list
``standards_serials``. The file name is scan2D_<scanID>_sim_<serial>.h5,
where ``scanID`` is integer that starts from 1000 and increments for each
saved file and ``serial`` is the respective serial number of reference standard
from the list. In order for the function to work, the descriptions of all used
standards must exist in the built-in file ``xrf_quant_standards.yaml`` or
in the file ``quantitative_standards.yaml`` in ``~/.pyxrf`` directory.
-- one simulated raw .h5 file for the set of elements specified in ``test_elements``.
Any elements may be included (both present and not present in calibration references).
``test_elements`` is a dictionary, with the key representing element name (such as Fe, Ca, K),
and the value is the dictionary of element spectrum parameters. Currently the only
required parameter is ``density``. The following code will create the dictionary with
three elements (density is typically expressed in ug/cm^2):
test_elements = {}
test_elements["Fe"] = {"density": 50}
test_elements["W"] = {"density": 70}
test_elements["Au"] = {"density": 80}
-- log file ``qa_files_log.txt`` with information on each saved file
Parameters
----------
wd: str (optional)
Working directory, where the files are saved. Files are saved in local
directory if ``wd`` is not specified.
standards_serials: list(str)
The list of serial numbers of standards. One simulated reference data file will be
generated for each serial number. See the description above
test_elements: dict(dict)
The dictionary with parameters of element spectra for generation of the test files.
See the description above.
Returns
-------
The list of saved files.
"""
if not standards_serials:
raise RuntimeError(
"There must be at least one standard loaded. Pass the list "
"of standards as value of the parameter 'standard_list'"
)
nx, ny = 30, 20
incident_energy = 13.0
# For simplicity use the same emission intensity for all lines
# This should be the same value for reference and test files
counts_per_unit = 10.0
# If there are no test elements, then the test file is not generated
if test_elements is None:
test_elements = {}
# Load standards
param_quant_estimation = ParamQuantEstimation()
param_quant_estimation.load_standards()
element_lines = []
lines_for_testing = {}
scan_id = 1000 # Starting scan ID for reference scans
# Go through the list of reference scans and save the data on present element lines
# in 'element_lines' list. If an exception is raised (one of the serials is not found)
# then no files are saved.
for serial in standards_serials:
standard = param_quant_estimation.find_standard(serial, key="serial")
# ALL standards must exist
if not standard:
raise RuntimeError(f"Standard with serial #{serial} is not found.")
param_quant_estimation.set_selected_standard(standard)
param_quant_estimation.gen_fluorescence_data_dict(incident_energy=incident_energy)
element_lines.append(param_quant_estimation.fluorescence_data_dict["element_lines"].copy())
files_saved = []
# Log file that contains brief data on every saved file
fln_log = "qa_files_log.txt"
if wd:
wd = os.path.expanduser(wd)
os.makedirs(wd, exist_ok=True)
fln_log = os.path.join(wd, fln_log)
with open(fln_log, "wt") as f_log:
for serial, elines in zip(standards_serials, element_lines):
el_grp = {}
for line, info in elines.items():
el_grp[line] = {"area": info["density"] * counts_per_unit}
el = line.split("_")[0] # Element: e.g. Fe_K -> Fe
if line not in lines_for_testing:
if el in test_elements:
density = test_elements[el]["density"]
lines_for_testing[line] = {
"area": density * counts_per_unit,
"counts_per_unit": counts_per_unit,
"density": density,
"in_reference": True,
}
fln = create_hdf5_xrf_map_const(
scan_id=scan_id,
wd=wd,
fln_suffix=f"{serial}",
element_line_groups=el_grp,
nx=nx,
ny=ny,
incident_energy=incident_energy,
)
s = f"Reference standard file: '{fln}'\n Standard serial: {serial}\n Emission lines:\n"
for line, info in elines.items():
s += f" {line}: density = {info['density']}\n"
f_log.write(f"{s}\n")
files_saved.append(fln)
scan_id += 1
test_elines = generate_eline_list(list(test_elements.keys()), incident_energy=incident_energy)
for line in test_elines:
if line not in lines_for_testing:
el = line.split("_")[0]
density = test_elements[el]["density"]
lines_for_testing[line] = {
"area": density * counts_per_unit,
"counts_per_unit": counts_per_unit,
"density": density,
"in_reference": False,
}
fln_suffix = "test" + "_" + "_".join(test_elements.keys())
fln = create_hdf5_xrf_map_const(
scan_id=2000,
wd=wd,
fln_suffix=fln_suffix,
element_line_groups=lines_for_testing,
nx=nx,
ny=ny,
incident_energy=incident_energy,
)
s = f"Test file '{fln}'\n Emission lines:\n"
for line, info in lines_for_testing.items():
s += (
f" {line}: density = {info['density']}, "
f"counts_per_unit = {info['counts_per_unit']}, "
f"area = {info['area']}, "
f"in_reference = {info['in_reference']}\n"
)
f_log.write(f"{s}\n")
files_saved.append(fln)
files_saved.append(fln_log)
return files_saved
def gen_hdf5_qa_dataset_preset_1(*, wd=None):
r"""
Generate a set of HDF5 files for testing of quantitative analysis procedures.
The following files are created:
calibration (reference) files:
``scan2D_1000_sim_41151.h5`` - based on standard with serial 41151
``scan2D_1001_sim_41163.h5`` - based on standard with serial 41163
test file with elements Fe, W and Au:
``scan2D_2000_sim_test_Fe_W_Au.h5
It is required that standards 41151 and 41163 are present in the list of
quantitative standards (``xrf_quant_standards.yaml``).
The test file contains the elements with the following densities:
``Fe``: 50 um/cm^2
``W``: 70 um/cm^2
``Au``: 80 um/cm^2
The files contain scaler ``i0`` which could be used for normalization.
Additionally, the log file ``qa_files_log.txt`` with information on the dataset is saved.
Parameters
----------
wd: str (optional)
Working directory, where the files are saved. Files are saved in local
directory if ``wd`` is not specified.
"""
standards_serials = ["41151", "41163"]
test_elements = {}
test_elements["Fe"] = {"density": 50} # Density in ug/cm^2 (for simulated test scan)
test_elements["W"] = {"density": 70}
test_elements["Au"] = {"density": 80}
files_saved = gen_hdf5_qa_dataset(wd=wd, standards_serials=standards_serials, test_elements=test_elements)
# Print saved file names
f_names = [f" '{_}'" for _ in files_saved]
f_names = "\n".join(f_names)
s = "Success. The following files were created:\n" + f_names
logger.info(s)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import numbers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def _like_rnncell(cell):
"""Checks that a given object is an RNNCell by using duck typing."""
conditions = [hasattr(cell, "output_size"), hasattr(cell, "state_size"),
hasattr(cell, "zero_state"), callable(cell)]
return all(conditions)
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s"
% (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
c_static = _concat(batch_size, s, static=True)
size = array_ops.zeros(c, dtype=dtype)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size)
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(scope,
custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
with vs.variable_scope(vs.get_variable_scope(),
custom_getter=self._rnn_get_variable):
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
trainable = (variable in tf_variables.trainable_variables() or
(isinstance(variable, tf_variables.PartitionedVariable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
state_size = self.state_size
return _zero_state_tensors(state_size, batch_size, dtype)
class BasicRNNCell(RNNCell):
"""The most basic RNN cell.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
def __init__(self, num_units, activation=None, reuse=None):
super(BasicRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
output = self._activation(_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(GRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
bias_ones = self._bias_initializer
if self._bias_initializer is None:
dtype = [a.dtype for a in [inputs, state]][0]
bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
value = math_ops.sigmoid(
_linear([inputs, state], 2 * self._num_units, True, bias_ones,
self._kernel_initializer))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
with vs.variable_scope("candidate"):
c = self._activation(
_linear([inputs, r * state], self._num_units, True,
self._bias_initializer, self._kernel_initializer))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
that follows.
"""
def __init__(self, num_units, forget_bias=1.0,
state_is_tuple=True, activation=None, reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BasicLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Long short-term memory cell (LSTM)."""
sigmoid = math_ops.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
new_c = (
c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=None, num_proj_shards=None,
forget_bias=1.0, state_is_tuple=True,
activation=None, reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:
if self._num_unit_shards is not None:
unit_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_unit_shards))
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope) as projection_scope:
if self._num_unit_shards is not None:
projection_scope.set_partitioner(None)
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection") as proj_scope:
if self._num_proj_shards is not None:
proj_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_proj_shards))
m = _linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
def _enumerated_map_structure(map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure(enumerated_fn, *args, **kwargs)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
state_keep_prob=1.0, variational_recurrent=False,
input_size=None, dtype=None, seed=None):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the the same dropout mask is applied at every step, as described in:
Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks". https://arxiv.org/abs/1512.05287
Otherwise a different dropout mask is applied at every time step.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the *output* states of the cell.
variational_recurrent: Python bool. If `True`, then the same
dropout pattern is applied across all time steps per run call.
If this parameter is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff**
`variational_recurrent = True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not a RNNCell.")
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d"
% (attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set cell, variational_recurrent, seed before running the code below
self._cell = cell
self._variational_recurrent = variational_recurrent
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(
([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure(
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure(
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure(
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob):
"""Decides whether to perform standard dropout or recurrent dropout."""
if not self._variational_recurrent:
def dropout(i, v):
return nn_ops.dropout(
v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))
return _enumerated_map_structure(dropout, values)
else:
def dropout(i, v, n):
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
return _enumerated_map_structure(dropout, values, recurrent_noise)
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input",
self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = self._cell(inputs, state, scope)
if _should_dropout(self._state_keep_prob):
new_state = self._dropout(new_state, "state",
self._recurrent_state_noise,
self._state_keep_prob)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output",
self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
class ResidualWrapper(RNNCell):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
"""
self._cell = cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(
lambda inp, out: inp + out, inputs, outputs)
return (res_outputs, new_state)
class DeviceWrapper(RNNCell):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
self._cell = cell
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell on specified device."""
with ops.device(self._device):
return self._cell(inputs, state, scope=scope)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else fancy
return super(MultiRNNCell, self).zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s" %
(len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(state, [0, cur_state_pos],
[-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(new_states, 1))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id$
#
import os
import sys
import string
import MySQLdb
import subprocess
import traceback
import logging
import threading
import time
import usermanagement
from zoni.data.infostore import InfoStore
from zoni.extra.util import checkSuper, createKey
from zoni.agents.dhcpdns import DhcpDns
class ResourceQuerySql(InfoStore):
def __init__(self, config, verbose=None):
self.config = config
self.verbose = verbose
self.host = config['dbHost']
self.user = config['dbUser']
self.passwd = config['dbPassword']
self.db = config['dbInst']
self.port = config['dbPort']
self._isDb = 1
self.vlan_max = config['vlanMax']
if self.port == "":
self.port = 3306
self.log = logging.getLogger(__name__)
self.conn = self.createConn()
def createConn(self):
# Connect to DB
try:
return MySQLdb.connect(host = self.host, port = self.port, user = self.user, passwd = self.passwd, db = self.db)
#return MySQLdb.connect(host = self.config['dbHost'], port = self.config['dbPort'], user = self.config['dbUser'], passwd = self.config['dbPassword'], db = self.config['dbInst'])
except MySQLdb.OperationalError, e:
if e[0] == 2005:
print "ERROR :" + str(e[1])
return
else:
print "ERROR : ", e
return
def keepAlive(self):
while True:
if not self.conn.open:
self.log.info("Reinitializing DB connection")
self.conn = self.createConn()
time.sleep(10)
def getNote(self):
return "Created by Zoni"
def addDomain(self, name, desc, vlanInfo):
# Check if vlans exist
vlans = []
for val in vlanInfo.split(","):
try:
ret = self.getVlanId(val.split(":")[0])
vlans.append(val)
except Exception, e:
print e
exit()
if desc == None:
desc = self.getNote()
if self.__checkDup("domaininfo", "domain_name", name):
self.log.error("Domain (%s) already exists" % (name))
return -1
# Create a key for the reservation
domainKey = createKey(name)
query = "insert into domaininfo (domain_name, domain_desc, domain_key) values ('%s','%s', '%s')" % (name, desc, domainKey)
try:
result = self.insertDb(query)
mesg = "Adding domain %s(%s)" % (name, desc)
self.log.info(mesg)
except Exception, e:
mesg = "Adding domain %s(%s) failed : %s" % (name, desc, e)
self.log.error(mesg)
# Get the domain_id
domainId = int(self.getDomainIdFromKey(domainKey))
# Map domain to vlan
for i in vlans:
vlanId = int(i.split(":")[0])
vlanType = i.split(":")[1]
query = "insert into domainmembermap values (%d, %d, '%s')" % (domainId, vlanId, vlanType)
try:
result = self.insertDb(query)
except Exception, e:
print e
def getDomainMembership(self, sys_id):
query = "select v.vlan_num, q.vlan_type from allocationinfo a, vlanmembermap q, vlaninfo v where q.vlan_id = v.vlan_id and a.allocation_id = q.allocation_id and sys_id = '%s'" % (sys_id)
result = self.selectDb(query)
data = {}
if result.rowcount:
for i in result.fetchall():
data[int(i[0])] = i[1]
return data
else:
return -1
def removeDomain(self, name):
domainId = self.__getSomething("domain_id", "domaininfo", "domain_name", name)
mesg = "Removing domain %s" % (name)
self.log.info(mesg)
query = "delete from domaininfo where domain_name = '%s'" % (name)
result = self.__deleteDb(query)
# Need to remove any vlans attached to this domain
query = "delete from domainmembermap where domain_id = '%s'" % (domainId)
result = self.__deleteDb(query)
def showDomains(self):
usermgt = eval("usermanagement.%s" % (self.config['userManagement']) + "()")
query = "select r.reservation_id, r.user_id, d.domain_name, d.domain_desc from domaininfo d, allocationinfo a, reservationinfo r where d.domain_id = a.domain_id and a.reservation_id = r.reservation_id"
result = self.selectDb(query)
desc = result.description
if result.rowcount > 0:
print "%s\t%s\t%s\t%s" % (result.description[0][0], result.description[1][0], result.description[2][0], result.description[3][0])
print "------------------------------------------------------------"
for row in result.fetchall():
resName = usermgt.getUserName(row[1])
print "%s\t\t%s\t%s\t\t%s" % (row[0], resName, row[2], row[3])
return 0
else:
mesg = "No Domains exist"
self.log.info(mesg)
return -1
def addVlan(self, vnum, desc=None):
vnumber = vnum
if ":" in vnum:
vnumber = vnum.split(":")[0]
desc = vnum.split(":")[1:len(vnumber)][0]
print vnumber, desc
if desc == None:
desc = "Created by Zoni"
if int(vnumber) > self.vlan_max:
self.log.error("Max vlan size is %s" % (self.vlan_max))
return -1
if self.__checkDup("vlaninfo", "vlan_num", vnumber):
self.log.error("Vlan %s already exists" % (vnumber))
return -1
query = "insert into vlaninfo (vlan_num, vlan_desc) values ('%s','%s')" % (vnumber, desc)
try:
result = self.insertDb(query)
mesg = "Adding vlan %s(%s)" % (vnumber, desc)
self.log.info(mesg)
except Exception, e:
mesg = "Adding vlan %s(%s) failed : %s" % (vnumber, desc, e)
self.log.error(mesg)
def removeVlan(self, vnumber):
query = "delete from vlaninfo where vlan_num = '%s'" % (vnumber)
result = self.__deleteDb(query)
if result > 0:
mesg = "Successfully removed vlan %s" % (vnumber)
self.log.info(mesg)
return 0
else:
mesg = "Failed to removed vlan %s" % (vnumber)
self.log.info(mesg)
return -1
# Need to remove any vlans attached to this vlan
def showVlans (self):
query = "select vlan_num, vlan_desc from vlaninfo order by vlan_num"
try:
result = self.selectDb(query)
print "%s\t%s\n-------------------------------------" % (result.description[0][0], result.description[1][0])
for row in result.fetchall():
print "%s\t\t%s" % (row[0], row[1])
return 0
except Exception, e:
mesg = "No Vlans defined: %s" % (e)
self.log.info(mesg)
return -1
def assignVlan(self, vlan, domain, force=None):
domainId = self.__getSomething("domain_id", "domaininfo", "domain_name", domain)
vlanId = self.__getSomething("vlan_id", "vlaninfo", "vlan_num", vlan)
query = "select * from domainmembermap m, vlaninfo v, domaininfo d where d.domain_id = '%s' and v.vlan_id = %s and v.vlan_id = m.vlan_id and m.domain_id = d.domain_id" % (int(domainId), int(vlanId))
if self.selectDb(query).rowcount > 0:
self.log.warning("Vlan %s already assigned to domain %s" % (vlan, domain));
return 0
# warning if vlan already assigned to another domain
query = "select * from domainmembermap where vlan_id = %s" % (vlanId)
if self.selectDb(query).rowcount > 0:
self.log.warning("Vlan %s already assigned to a domain" % (vlan));
if not force:
return -1
self.log.info("Assigning vlan %s to domain %s" % (vlan, domain))
query = "insert into domainmembermap (domain_id, vlan_id) values (%s, %s)" % (domainId, vlanId)
self.insertDb(query)
def __getSomething(self, fieldname, table, critField, crit):
query = "select %s from %s where %s = '%s'" % (fieldname, table, critField, crit)
result = self.selectDb(query)
if result.rowcount == 0:
mesg = "No entry for '%s' found" % (crit)
self.log.error(mesg)
#return []
return -1
return result.fetchall()[0][0]
def __checkDup (self, table, colname, value, colname2=None, value2=None):
cond = "where %s = '%s' " % (colname, value)
if (colname2 != None and value2 != None):
cond += " and %s = '%s'" % (colname2, value2)
query = "select * from %s %s" % (table, cond)
result = self.selectDb(query)
if result.rowcount == 0:
return []
return result.fetchall()
def __create_queryopts(self, cmdargs, extra=None):
cmdlen = len(cmdargs)
queryopt = ""
if extra:
queryopt += extra
if cmdlen == 0:
pass
else:
num = cmdlen
if extra:
queryopt += " and "
for k, v in cmdargs.iteritems():
if k == "num_procs":
queryopt += k + " = " + v + " "
if k == "mem_total":
queryopt += k + " >= " + v + " "
if k == "clock_speed":
queryopt += k + " >= " + v + " "
if k == "num_cores":
queryopt += k + " = " + v + " "
if k == "cpu_flags":
queryopt += k + " like \"%" + v + "%\" "
if k == "sys_id":
queryopt += " location = " + "\'" + v + "\' "
if num > 1:
queryopt += " and "
num -= 1
if queryopt:
tmp = " where " + queryopt
queryopt = tmp
return queryopt
def updateDatabase(self, table, query):
pass
def showResources(self, cmdargs):
queryopt = ""
defaultFields = "mac_addr, location, num_procs, num_cores, clock_speed, mem_total "
#defaultFields = "*"
queryopt = self.__create_queryopts(cmdargs)
#query = "show fields from sysinfo"
#results = self.selectDb(query)
query = "select " + defaultFields + "from sysinfo " + queryopt
result = self.selectDb(query)
line = ""
for i in defaultFields.split(","):
#line += string.strip(str(i)) + "\t"
line += str(i.center(20))
# header
print line
sum = {}
for row in result.fetchall():
line = ""
sum['totProc'] = sum.get('totProc', 0)
sum['totProc'] += int(row[2])
sum['totCores'] = sum.get('totCores', 0)
sum['totCores'] += int(row[3])
sum['totMemory'] = sum.get('totMemory', 0)
sum['totMemory'] += int(row[5])
for val in row:
line += str(val).center(20)
print line
print "\n%s systems registered - %d procs | %d cores | %d bytes RAM" % (str(result.rowcount), sum['totProc'], sum['totCores'], sum['totMemory'],)
def getAvailableResources(self):
# Maybe should add a status flag?
query = "select sys_id from allocationinfo"
result = self.selectDb(query)
allocatedNodes = []
if result.rowcount:
for i in result.fetchall()[0]:
allocatedNodes.append(i)
query = "select sys_id, location, num_procs, num_cores, mem_total, clock_speed, proc_model from sysinfo where state_id = 0 or state_id = 1"
result = self.selectDb(query)
desc = result.description
res = {}
for i in result.fetchall():
sys_id = int(i[0])
if sys_id not in allocatedNodes:
location = i[1]
res[location] = res.get(location, {})
res[location][desc[0][0]] = sys_id
res[location][desc[1][0]] = location
res[location][desc[2][0]] = int(i[2])
res[location][desc[3][0]] = int(i[3])
res[location][desc[4][0]] = int(i[4])
res[location][desc[5][0]] = int(i[5])
res[location][desc[6][0]] = i[6]
return res
def getMyResources(self, key):
query = "select s.sys_id, s.location, s.num_procs, s.num_cores, s.mem_total, s.clock_speed, s.proc_model from sysinfo s, allocationinfo a, domaininfo d where a.sys_id = s.sys_id and a.domain_id = d.domain_id and d.domain_key = '%s'" % key
result = self.selectDb(query)
desc = result.description
res = {}
for i in result.fetchall():
sys_id = int(i[0])
location = i[1]
res[location] = res.get(location, {})
res[location][desc[0][0]] = sys_id
res[location][desc[1][0]] = location
res[location][desc[2][0]] = int(i[2])
res[location][desc[3][0]] = int(i[3])
res[location][desc[4][0]] = int(i[4])
res[location][desc[5][0]] = int(i[5])
res[location][desc[6][0]] = i[6]
return res
def getDomainFromKey(self, key):
return self.__getSomething("domain_name", "domaininfo", "domain_key", key)
def getDomainIdFromKey(self, key):
return self.__getSomething("domain_id", "domaininfo", "domain_key", key)
def getKeyFromDomain(self, domain):
return self.__getSomething("domain_key", "domaininfo", "domain_name", domain)
def getLocationFromSysId (self, nodeId):
query = "select location from sysinfo where sys_id = \"" + str(nodeId) + "\""
result = self.selectDb(query)
return result.fetchall()[0][0]
def getSysIdFromLocation(self, location):
return self.__getSomething("sys_id", "sysinfo", "location", location)
def getMacFromSysId(self, nodeId):
query = "select mac_addr from sysinfo where sys_id = \"" + str(nodeId) + "\""
result = self.selectDb(query)
return result.fetchall()[0][0]
def getIpFromSysId(self, nodeId):
query = "select ip_addr from sysinfo where sys_id = \"" + str(nodeId) + "\""
result = self.selectDb(query)
return result.fetchall()[0][0]
def getAllSwitches(self):
switchList = []
query = "select hw_name from hardwareinfo where hw_type = \"switch\""
result = self.selectDb(query)
for switch in result.fetchall():
switchList.append(switch[0])
return switchList
def getAvailableVlan(self):
# Get list of available vlans
query = "select vlan_num from vlaninfo where domain = 'private'"
result = self.selectDb(query)
for vlan in result.fetchall()[0]:
avail = self.isVlanAvailable(vlan)
if avail:
myvlan = vlan
break
if not myvlan:
mesg = "No Vlans for you! You Go Now\n"
self.log.info(mesg)
return myvlan
def isVlanAvailable(self, vlan):
query = "select a.vlan_id, v.vlan_num from allocationinfo a, vlaninfo v where a.vlan_id = v.vlan_id and v.vlan_num = " + str(vlan)
result = self.selectDb(query)
if result.rowcount > 1:
return 0
else:
return 1
def getVlanId(self, vlan):
query = "select vlan_id from vlaninfo where vlan_num = \"" + str(vlan) + "\""
result = self.selectDb(query)
#print result.rowcount
if result.rowcount > 0:
return int(result.fetchall()[0][0])
else:
mesg = "VLAN does not exist: " + str(vlan)
self.log.error(mesg)
raise Exception, mesg
def isIpAvailable(self, ip_addr, vlan_id):
query = "select * from allocationinfo where ip_addr = \"" + str(ip_addr) + "\" and vlan_id = \"" + str(vlan_id) + "\""
#print "query ", query
result = self.selectDb(query)
#print "select row count is ", result.rowcount
if result.rowcount > 0:
return 0
else:
return 1
def getDomainIp(self, vlan):
ip_start = 30
query = "select ip_network from vlaninfo where vlan_num = " + str(vlan)
result = self.selectDb(query)
ip_network = result.fetchall()[0][0]
v = ip_network.split(".")
ip_base = v[0] + "." + v[1] + "." + v[2]
# Check for other allocations and assign IP address
query = "select a.vlan_id, v.vlan_num from allocationinfo a, vlaninfo v where a.vlan_id = v.vlan_id and v.vlan_num = " + str(vlan)
#print "ip is ", ip_network
query = "select a.ip_addr from allocationinfo a, vlaninfo v where a.vlan_id = v.vlan_id and v.vlan_num = " + str(vlan);
result = self.selectDb(query)
#print "row count is ", result.rowcount
if result.rowcount > 0:
for ip in xrange(ip_start, 255):
ip_check = ip_base + "." + str(ip)
check = self.isIpAvailable(ip_check, self.getVlanId(vlan))
if check:
ip_addr = ip_check
break
else:
ip_addr = ip_base + "." + str(ip_start)
#print "ip_addr", ip_addr
return ip_addr
def showAllocation(self, userId=None):
#from IPython.Shell import IPShellEmbed
#shell = IPShellEmbed(argv="")
#shell(local_ns=locals(), global_ns=globals())
# specify usermanagement - ldap or files
usermgt = eval("usermanagement.%s" % (self.config['userManagement']) + "()")
#select a.reservation_id, a.sys_id, r.user_id, s.location, s.num_cores, s.mem_total, a.hostname, ii.image_name
#from allocationinfo a, reservationinfo r, sysinfo s, imageinfo ii, imagemap im
#where r.reservation_id = a.reservation_id and a.sys_id = s.sys_id and im.image_id = ii.image_id and im.allocation_id = a.allocation_id;
query = "select r.user_id, d.domain_name, s.location, s.num_cores, s.mem_total, r.reservation_expiration, r.notes, r.reservation_id, a.hostname, a.notes, ii.image_name, a.allocation_id from sysinfo s, imageinfo ii, allocationinfo a, domaininfo d, reservationinfo r, imagemap im where im.image_id = ii.image_id and r.reservation_id = a.reservation_id and d.domain_id = a.domain_id and s.sys_id = a.sys_id and im.allocation_id = a.allocation_id"
if userId:
myid = userId
if type(userId) == str:
# convert username to id
myid = usermgt.getUserId(userId)
query += " and user_id = '%s' " % (myid)
query += " order by r.reservation_id asc, s.location"
result = self.selectDb(query)
print "NODE ALLOCATION\n"
sum = {}
if self.verbose:
print "%-5s%-10s%-10s%-10s%-13s%-12s%-10s%-34s%-20s%s" % ("Res", "User", "Host", "Domain", "Cores/Mem","Expiration", "Hostname", "Boot Image Name", "Vlan Member", "Notes")
else:
print "%-10s%-10s%-10s%-13s%-12s%s" % ("User", "Node", "Domain", "Cores/Mem","Expiration", "Notes")
for i in result.fetchall():
uid = i[0]
domain = i[1]
host = i[2]
cores = i[3]
memory = i[4]
expire = str(i[5])[0:10]
if expire == "None":
expire = "0000-00-00"
rnotes = i[6]
resId= i[7]
hostname = i[8]
if hostname == None:
hostname = host
anotes = i[9]
image_name = i[10]
allocation_id = i[11]
userName = usermgt.getUserName(uid)
combined_notes = str(rnotes) + "|" + str(anotes)
sum['totCores'] = sum.get('totCores', 0)
sum['totCores'] += cores
sum['totMemory'] = sum.get('totMemory', 0)
sum['totMemory'] += memory
if self.verbose:
query = "select v.vlan_num, m.vlan_type from vlaninfo v, vlanmembermap m where v.vlan_id = m.vlan_id and allocation_id = '%d' order by vlan_num asc" % allocation_id
vlanRes = self.selectDb(query)
vlanList = []
for i in vlanRes.fetchall():
tag = string.upper(str(i[1][0]))
mytag = "%s(%s)" %(str(i[0]), tag )
vlanList.append(mytag)
vlanMember = string.join(vlanList, ",")
print "%-5s%-10s%-10s%-10s%-2s/%-10s%-12s%-10s%-34s%-20s%s" % (resId, userName, host, domain, cores, memory,expire, hostname, image_name, vlanMember,combined_notes)
else:
print "%-10s%-10s%-10s%-2s/%-10s%-12s%s" % (userName, host, domain, cores, memory,expire, combined_notes)
print "\n%s systems allocated - %d cores| %d bytes RAM" % (str(result.rowcount), sum['totCores'], sum['totMemory'])
def showReservation(self, userId=None):
#from IPython.Shell import IPShellEmbed
#shell = IPShellEmbed(argv="")
#shell(local_ns=locals(), global_ns=globals())
# specify usermanagement - ldap or files
usermgt = usermanagement.ldap()
query = "select reservation_id, user_id, \
reservation_expiration, notes \
from reservationinfo order by reservation_id"
if self.verbose:
query = "select r.reservation_id, r.user_id, r.reservation_expiration, r.notes, count(a.reservation_id) \
from reservationinfo r, allocationinfo a \
where r.reservation_id = a.reservation_id \
group by r.reservation_id order by reservation_id"
#if userId:
#myid = userId
#if type(userId) == str:
## convert username to id
#myid = usermgt.getUserId(userId)
#query += " and user_id = " + myid + " "
#query += "order by r.user_id, s.location"
result = self.selectDb(query)
print "RESERVATIONS"
print "---------------------------------------------------------------------------------"
if self.verbose:
print "%-7s%-10s%-12s%-7s%s" % ("ResId", "UserName", "Expire", "Total", "Notes")
else:
print "%-7s%-10s%-12s%s" % ("ResId", "UserName", "Expire", "Notes")
total = 0
for i in result.fetchall():
resId= i[0]
uid = i[1]
expire = str(i[2])[0:10]
if expire == "None":
expire = "0000-00-00"
notes = i[3]
userName = usermgt.getUserName(uid)
if self.verbose:
num_nodes = i[4]
total += num_nodes
#print "%s \t%s \t%s\t%s\t\t%s " % (resId, userName, expire, num_nodes, notes)
print "%-7s%-10s%-12s%-7s%s" % (resId, userName, expire, num_nodes, notes)
else:
print "%-7s%-10s%-12s%s" % (resId, userName, expire, notes)
if self.verbose:
print "---------------------------------------------------------------------------------"
print "Total number of nodes - %s" % (total)
def getPxeImages(self):
query = "select image_name from imageinfo"
result = self.selectDb(query)
row = result.fetchall()
desc = result.description
imagelist = []
for i in row:
imagelist.append(i[0])
return imagelist
def showPxeImages(self):
query = "select image_name, dist, dist_ver from imageinfo"
result = self.selectDb(query)
row = result.fetchall()
desc = result.description
for i in row:
print i
def getKernelOptions(self,image):
val = {}
query = "select i.image_name, k.kernel_name, k.kernel_arch, n.initrd_name, n.initrd_options from imageinfo i, kernelinfo k, initrdinfo n where k.kernel_id = i.kernel_id and i.initrd_id = n.initrd_id and i.image_name = '%s'" % image
result = self.selectDb(query)
row = result.fetchone()
desc = result.description
val[desc[0][0]] = row[0]
val[desc[1][0]] = row[1]
val[desc[2][0]] = row[2]
val[desc[3][0]] = row[3]
val[desc[4][0]] = row[4]
return val
def showPxeImagesToSystemMap(self, cmdargs):
extra = "j.image_id = i.image_id"
queryopt = self.__create_queryopts(cmdargs, extra=extra)
query = "select s.location, s.mac_addr, i.image_name from sysinfo s , imageinfo i, imagemap j " + queryopt + " order by s.location"
#print query
result = self.selectDb(query)
for i in result.fetchall():
print i
def close(self):
self.conn.close()
def getHwAccessMethod(self):
pass
mylist = []
return mylist
def getHostInfo(self, node):
host = {}
query = "select sys_id, mac_addr, num_procs, num_cores, mem_total, clock_speed, sys_vendor, sys_model, proc_vendor, proc_model, proc_cache, cpu_flags, bios_rev, location, system_serial_number, ip_addr from sysinfo where location = \"" + node + "\""
result = self.selectDb(query)
if result.rowcount > 1:
mesg = "Multiple entries for system exist. Please correct"
self.log.info(mesg)
exit
if result.rowcount < 1:
mesg = "node does not exist :" + str(node) + "\n"
self.log.error(mesg)
exit()
for i in result.fetchall():
host['mac_addr'] = host.get("mac_addr", "")
host['sys_id'] = int(i[0])
host['mac_addr'] = i[1]
host['num_procs'] = int(i[2])
host['num_cores'] = int(i[3])
host['mem_total'] = int(i[4])
host['clock_speed'] = int(i[5])
host['sys_vendor'] = i[6]
host['sys_model'] = i[7]
host['proc_vendor'] = i[8]
host['proc_model'] = i[9]
host['proc_cache'] = i[10]
host['cpu_flags'] = i[11]
host['bios_rev'] = i[12]
host['location'] = i[13]
host['system_serial_number'] = i[14]
host['ip_addr'] = i[14]
'''
for k, v in host.iteritems():
print k, v, "\n"
'''
# Get IPMI info
query = "select h.hw_userid, h.hw_password, h.hw_ipaddr from hardwareinfo h, portmap p, sysinfo s where p.sys_id = s.sys_id and h.hw_id = p.hw_id and h.hw_type = 'ipmi' and s.sys_id = " + str(host['sys_id']) + ""
result = self.selectDb(query)
if result.rowcount> 1:
print "Multiple entries for system exist. Please correct"
return
for i in result.fetchall():
host['ipmi_user'] = i[0]
host['ipmi_password'] = i[1]
host['ipmi_addr'] = i[2]
# Get image info
query = "select image_name from imagemap i, imageinfo j where i.image_id = j.image_id"
result = self.selectDb(query)
if result.rowcount == 0:
host['pxe_image_name'] = "None"
else:
for i in result.fetchall():
host['pxe_image_name'] = i[0]
# Get switch info
query = "select h.hw_id, h.hw_name, h.hw_model, h.hw_ipaddr, h.hw_userid, h.hw_password, p.port_num from hardwareinfo h, portmap p where p.hw_id = h.hw_id and hw_type = 'switch' and sys_id = " + str(host['sys_id'])
result = self.selectDb(query)
for i in result.fetchall():
host['hw_id'] = int(i[0])
host['hw_name'] = i[1]
host['hw_model'] = i[2]
host['hw_ipaddr'] = i[3]
host['hw_userid'] = i[4]
host['hw_password'] = i[5]
host['hw_port'] = int(i[6])
# Get drac info
query = "select h.hw_id, h.hw_name, h.hw_model, h.hw_ipaddr, h.hw_userid, h.hw_password, p.port_num from hardwareinfo h, portmap p where p.hw_id = h.hw_id and hw_type = 'drac' and sys_id = " + str(host['sys_id'])
result = self.selectDb(query)
if result.rowcount > 0:
for i in result.fetchall():
host['drac_id'] = int(i[0])
host['drac_name'] = i[1]
host['drac_model'] = i[2]
host['drac_ipaddr'] = i[3]
host['drac_userid'] = i[4]
host['drac_password'] = i[5]
host['drac_port'] = int(i[6])
# Get PDU info
query = "select h.hw_id, h.hw_name, h.hw_model, h.hw_ipaddr, h.hw_userid, h.hw_password, p.port_num from hardwareinfo h, portmap p where p.hw_id = h.hw_id and h.hw_type = 'pdu' and p.sys_id = " + str(host['sys_id'])
result = self.selectDb(query)
for i in result.fetchall():
host['pdu_id'] = int(i[0])
host['pdu_name'] = i[1]
host['pdu_model'] = i[2]
host['pdu_ipaddr'] = i[3]
host['pdu_userid'] = i[4]
host['pdu_password'] = i[5]
host['pdu_port'] = int(i[6])
#print "host is ", host
return host
def getSwitchInfo(self, switchName):
host = {}
# Get switch info
#switchList = self.getAllSwitches()
query = "select h.hw_id, h.hw_name, h.hw_model, h.hw_ipaddr, h.hw_userid, h.hw_password from hardwareinfo h where h.hw_name = \"" + str(switchName) + "\""
result = self.selectDb(query)
for i in result.fetchall():
host['hw_id'] = int(i[0])
host['hw_name'] = i[1]
host['hw_model'] = i[2]
host['hw_ipaddr'] = i[3]
host['hw_userid'] = i[4]
host['hw_password'] = i[5]
return host
def __queryDb(self, query):
self.conn.ping(True)
cursor = self.conn.cursor()
try:
cursor.execute (query)
self.conn.commit()
row = cursor.fetchall()
desc = cursor.description
except MySQLdb.OperationalError, e:
msg = "%s : %s" % (e[1], query)
self.log.error(msg)
#traceback.print_exc(sys.exc_info())
return row
def execQuery(self, query):
self.conn.ping(True)
cursor = self.conn.cursor()
try:
cursor.execute (query)
self.conn.commit()
#except Exception:
#traceback.print_exc(sys.exc_info())
except MySQLdb.OperationalError, e:
msg = "%s : %s" % (e[1], query)
self.log.error(msg)
#traceback.print_exc(sys.exc_info())
return
return cursor
def selectDb(self, query):
self.conn.ping(True)
cursor = self.conn.cursor()
try:
cursor.execute (query)
#except Exception:
#traceback.print_exc(sys.exc_info())
except MySQLdb.OperationalError, e:
msg = "SELECT Failed : %s : %s" % (e[1], query)
self.log.error(msg)
#traceback.print_exc(sys.exc_info())
return -1
return cursor
def __deleteDb(self, query):
self.conn.ping(True)
cursor = self.conn.cursor()
try:
cursor.execute (query)
self.conn.commit()
except MySQLdb.OperationalError, e:
msg = "DELETE Failed : %s : %s" % (e[1], query)
sys.stderr.write(msg)
self.log.error(msg)
#traceback.print_exc(sys.exc_info())
return -1
return cursor
def __updateDb(self, query):
self.conn.ping(True)
cursor = self.conn.cursor()
try:
cursor.execute (query)
self.conn.commit()
except MySQLdb.OperationalError, e:
msg = "UPDATE Failed : %s : %s" % (e[1], query)
sys.stderr.write(msg)
self.log.error(msg)
#traceback.print_exc(sys.exc_info())
return -1
return cursor
def insertDb(self, query):
self.conn.ping(True)
cursor = self.conn.cursor()
try:
cursor.execute (query)
self.conn.commit()
#except Exception:
#traceback.print_exc(sys.exc_info())
except MySQLdb.OperationalError, e:
msg = "INSERT Failed : %s : %s" % (e[1], query)
self.log.error(msg)
#traceback.print_exc(sys.exc_info())
return -1
return cursor
def updateReservation (self, reservationId, userId=None, reservationDuration=None, vlanIsolate=None, allocationNotes=None):
mesg = "Updating reservation %s" % (str(reservationId))
self.log.info(mesg)
if reservationDuration:
if len(resDuration) == 8:
expireDate = resDuration
elif len(resDuration) < 4:
numdays = resDuration
cmd = "date +%Y%m%d --date=\"" + numdays + " day\""
p = os.popen(cmd)
expireDate = string.strip(p.read())
else:
mesg = "ERROR: Invalid reservation duration"
self.log.error(mesg)
return
mesg = "Updating reservationDuration :" + resDuration
self.log.info(mesg)
query = "update reservationinfo set reservation_exiration = \"" + expireDate_ + "\" where reservation_id = \"" + str(reservationId) + "\""
self.__updateDb(query)
if allocationNotes:
mesg = "Updating allocationNotes to " + allocationNotes
self.log.info(mesg)
query = "update reservationinfo set notes = \"" + allocationNotes + "\" where reservation_id = \"" + str(reservationId) + "\""
self.__updateDb(query)
if vlanIsolate:
mesg = "UPDATING Vlan: "
self.log.info(mesg)
query = "update reservationinfo set vlan_num = " + vlanIsolate + " where reservation_id = \"" + str(reservationId) + "\""
self.__updateDb(query)
if userId:
mesg = "UPDATING USER:"
self.log.info(mesg)
query = "update reservationinfo set user_id = " + userId + " where reservation_id = \"" + str(reservationId) + "\""
self.__updateDb(query)
def addReservation (self, userId, reservationDuration=None, reservationNotes=None):
# set default for reservation duration to 15 days
if not reservationDuration:
resDuration = str(15)
else:
resDuration = str(reservationDuration)
if len(resDuration) == 8:
expireDate = resDuration
elif len(resDuration) < 4:
numdays = resDuration
cmd = "date +%Y%m%d --date=\"" + numdays + " day\""
p = os.popen(cmd)
expireDate = string.strip(p.read())
else:
mesg = "ERROR: Invalid reservation duration\n"
self.log.info(mesg)
return
# Create the reservation
print userId, expireDate,reservationNotes
query = "insert into reservationinfo (user_id, reservation_expiration, notes) values ('%s', '%s', '%s')" % (str(userId), str(expireDate), str(reservationNotes))
mesg = "Creating new reservation : %s" % query
self.log.info(mesg)
self.insertDb(query)
# Get the res_id
query = "select max(reservation_id) from reservationinfo"
res_id = self.selectDb(query).fetchone()[0]
mesg = " Reservation created - ID : %s" % str(res_id)
self.log.info(mesg)
return res_id
#def archiveAllocation(self, nodeId, ip_addr, hostName, vlan_id, user_id, reservation_type, res_notes, notes):
#combined_notes = str(res_notes) + "|" + str(notes)
#mesg = "Insert to allocation archive:"
#query = "insert into allocationarchive (sys_id, ip_addr, hostname, vlan_id, user_id, reservation_type, notes) \
#values (\"" + \
#str(nodeId) + "\", \"" + str(ip_addr) + "\", \"" + \
#str(hostName) + "\", \"" + str(vlan_id) + "\", \"" + \
#str(user_id) + "\", \"" + str(reservation_type) + "\", \"" + \
#str(combined_notes) + "\")"
#
#self.insertDb(query)
def allocateNode(self, reservationId, domain, sysId, vlanInfo, imageName, newHostName=None, notes=None):
print "reservationId", reservationId, domain, sysId, vlanInfo, imageName, notes
# Check if node is already allocated
result = self.__checkDup("allocationinfo", "sys_id", sysId)
if len(result) > 0:
mesg = "Node already allocated : %s" % (result)
self.log.info(mesg)
return -1
# Check if reservation exists
result = self.__checkDup("reservationinfo", "reservation_id", reservationId)
if len(result) == 0:
mesg = "Reservation does not exist: " + reservationId + "\n"
self.log.error(mesg)
return -1
else:
resinfo = result[0]
# Check if domain exists
domainId = self.__getSomething("domain_id", "domaininfo", "domain_name", domain)
if len(self.__checkDup("domaininfo", "domain_id", domainId)) == 0:
mesg = "Domain does not exist: %s(%s)" % (domainId, domain)
self.log.error(mesg)
return -1
imageId = self.__getSomething("image_id", "imageinfo", "image_name", imageName)
if len(self.__checkDup("imageinfo", "image_id", imageId)) == 0:
mesg = "Image does not exist: %s(%s)" % (imageName, imageId)
self.log.error(mesg)
return -1
# Check that all the vlans exist
for i in vlanInfo.split(","):
v = i.split(":")[0]
try:
self.getVlanId(v)
except Exception, e:
print e
# Insert to allocationinfo
nodeName = self.getLocationFromSysId(sysId)
mesg = "allocateNode %s : domain %s : reservation %s(%s)" % (nodeName, domain, reservationId, resinfo[4])
self.log.info(mesg)
query = "insert into allocationinfo (sys_id, reservation_id, domain_id, hostname, notes) values ('%s', '%s', '%s', '%s', '%s')" % (sysId, reservationId, domainId, newHostName, notes)
result = self.insertDb(query)
allocationId = result.lastrowid
# Parse vlan info and add to vlanmembermap
for i in vlanInfo.split(","):
v = i.split(":")[0]
t = i.split(":")[1]
self.addNodeToVlan(nodeName, v, t)
# Insert into imagemap
query = "insert into imagemap (allocation_id, image_id) values ('%s', '%s')" % (allocationId, imageId)
result = self.insertDb(query)
self.__updateSysState(sysId, 1)
def addNodeToVlan(self, nodeName, v, t):
sysId = self.getSysIdFromLocation(nodeName)
allocationId = self.__getSomething("allocation_id", "allocationinfo", "sys_id", sysId)
vId = self.getVlanId(v)
if t == "native":
query = "select vlan_id from vlanmembermap where allocation_id = %d and vlan_type = 'native'" % (allocationId)
result = self.selectDb(query)
if result.rowcount > 0:
query = "update vlanmembermap set vlan_type = 'untagged' where allocation_id = %d and vlan_id = %d" % (allocationId, result.fetchall()[0][0])
else:
query = "delete from vlanmembermap where allocation_id = %d and vlan_id = %d" % (allocationId, vId)
result = self.selectDb(query)
if self.__checkDup("vlanmembermap", "vlan_id", vId, "allocation_id", allocationId):
self.log.error("Vlan %s already exists" % (v))
return -1
query = "insert into vlanmembermap (allocation_id, vlan_id, vlan_type) values ('%s', '%s', '%s')" % (allocationId, vId, t)
result = self.insertDb(query)
mesg = "Adding vlan %s to node %s" % (v, nodeName)
self.log.info(mesg)
def removeNodeFromVlan(self, nodeName, v):
sysId = self.getSysIdFromLocation(nodeName)
allocationId = self.__getSomething("allocation_id", "allocationinfo", "sys_id", sysId)
vId = self.getVlanId(v)
query = "delete from vlanmembermap where allocation_id = '%s' and vlan_id = '%s'" % (allocationId, vId)
result = self.insertDb(query)
mesg = "Removing vlan %s from node %s" % (v, nodeName)
self.log.info(mesg)
def __updateSysState(self, sysId, stateId):
query = "update sysinfo set state_id = '%s' where sys_id = '%s'" % (stateId, sysId)
return self.__updateDb(query)
def removeReservation(self, res):
mesg = "Removing reservation (%s)" % str(res)
self.log.info(mesg)
query = "delete from reservationinfo where reservation_id = " + str(res)
self.__updateDb(query)
query = "delete from allocationinfo where reservation_id = " + str(res)
self.__updateDb(query)
def releaseNode(self, location):
# Get the nodeId
sysId = self.__getSomething("sys_id", "sysinfo", "location", location)
query = "select * from allocationinfo where sys_id = '%s'" % (sysId)
result = self.selectDb(query)
if result.rowcount == 0:
mesg = "Node %s not allocated" % (location)
self.log.error(mesg)
return
if result.rowcount > 1:
mesg = "Node %s allocated multiple times(%s)" % (location, str(result.rowcount))
self.log.warning(mesg)
return
# Eventually should add count =1 so deletes do get out of control
query = "delete from allocationinfo where sys_id = '%s'" % (sysId)
result = self.__deleteDb(query)
# Set state to available
self.__updateSysState(sysId, 0)
return
def addImage(self, imageName):
name = ""
dist = ""
dist_ver = ""
if len(imageName.split(":")) > 1:
name = imageName.split(":")[0]
if len(imageName.split(":")) > 2:
dist = imageName.split(":")[1]
if len(imageName.split(":")) >= 3:
dist_ver = imageName.split(":")[2]
query = "select * from imageinfo where image_name = \"" + name + "\""
result = self.selectDb(query)
if result.rowcount > 0:
mesg = "ERROR: Image already exists\n"
sys.stderr.write(mesg)
return
if name == "":
mesg = "ERROR: Image details not specified\n"
self.log.error(mesg)
mesg = "Example amd64-rgass-testing:Ubuntu:8.04\n"
mesg += "or amd64-rgass-testing::\n"
sys.stderr.write(mesg)
return
query = "insert into imageinfo (image_name, dist, dist_ver) values(\"" + name + "\", \"" + dist + "\", \"" + dist_ver + "\")"
self.insertDb(query)
def delImage(self, imageName):
query = "delete from imageinfo where image_name = \"" + imageName + "\""
result = self.__deleteDb(query)
if result.rowcount == 0:
mesg = "ERROR: No images match your entry\n"
sys.stderr.write(mesg)
return
def assignImagetoHost(self, host, image):
# imagemap db should be sys_id instead of mac_addr
# change later
cur_image = host['pxe_image_name']
# Get the id of the new image
query = "select image_id from imageinfo where image_name = " + "\"" + image + "\""
row = self.__queryDb(query)
if len(row) < 1:
mesg = "ERROR: Image \"" + image + "\" does not exist"
self.log.error(mesg)
return
new_image_id = str(row[0][0])
# check for entry and delete in exists
query = "select * from imagemap where mac_addr = \"" + host['mac_addr'] + "\""
result = self.selectDb(query)
if result.rowcount > 0:
query = "delete from imagemap where mac_addr = \"" + host['mac_addr'] + "\""
result = self.__deleteDb(query)
# update the database entry with the new image for the host
query = "insert into imagemap (mac_addr, image_id) values (\"" + host['mac_addr'] + "\", " + new_image_id + ")"
self.insertDb(query)
# Update tftp link
# XXX move to pxe.py
mac_addr = "01-" + string.lower(string.replace(host['mac_addr'], ":", "-"))
maclink = self.tftpImageDir + "/" + mac_addr
#print "mac link is ", maclink
# Check if it exists first
if os.path.exists(maclink):
try:
os.unlink(maclink)
except Exception, e:
traceback.print_exc(sys.exc_info())
if OSError:
print OSError
mesg = "Cannot modify file. Please use sudo\n"
sys.stderr.write(mesg)
return 1
print e
return 1
# Relink
newlink = os.path.basename(self.tftpBootOptionsDir) + "/" + image
try:
os.symlink(newlink, maclink)
mesg = "Image assignment Successful " + host['location'] + " " + host['mac_addr'] + " " + image
self.log.info(mesg)
except Exception, e:
if OSError:
mesg = "Cannot modify file. Please use sudo\n"
sys.stderr.write(mesg)
return 1
print e
return 1
return 0
def getHardwareCapabilities(self, nodeName):
''' getHardwareCapabilities return a list of lists with
[0] = hw method
[1] = hw method userid
[2] = hw method password '''
sysId = self.getSysIdFromLocation(nodeName)
query = "select h.hw_type, h.hw_userid, h.hw_password from hardwareinfo h, portmap p where p.hw_id = h.hw_id and p.sys_id = '%s'" % sysId
result = self.selectDb(query)
cap = []
val = []
for i in result.fetchall():
if i[0] in self.config['hardwareControl']:
val.append(i)
# order the hardware capabilities according to order in config file
for i in [j for j in self.config['hardwareControl']]:
for j in val:
if i in j:
cap.append(j)
return cap
# print out data in a consistent format
def __showIt(data):
pass
def registerHardware(self, data):
if len(self.__checkDup("hardwareinfo", "hw_name", data['hw_name'])) == 0:
statement = "insert into hardwareinfo ("
fields = []
entries = []
for key, value in data.iteritems():
fields.append(key)
entries.append(value)
c = len(fields)
count = 1
for i in fields:
if c != count:
statement += i + ","
else:
statement += i + ")"
count += 1
statement += "values ("
c = len(entries)
count = 1
for i in entries:
if c != count:
statement += "'" + i + "', "
else:
statement += "'" + i + "') "
count += 1
try:
self.insertDb(statement)
mesg = "Device (%s) registered successfully\n" % (data['hw_name'])
self.log.info(mesg)
except Exception, e:
mesg = "Registration failed to add Device (%s) - %s\n" % (data['hw_name'], e)
self.log.warning(mesg)
else:
mesg = "INFO: Device (%s) already registered\n" % (data['hw_name'])
sys.stderr.write(mesg)
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the profile page."""
from constants import constants
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
class SignupTest(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
# Sign in can't be inside an html tag, but can appear inside js code
response.mustcontain(no=['Logout'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 302)
self.assertIn('Logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
def test_default_dashboard_for_new_users(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
# This user should have the creator dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'creatoruser',
'default_dashboard': constants.DASHBOARD_TYPE_CREATOR,
'can_receive_email_updates': None},
csrf_token)
user_id = user_services.get_user_id_from_username('creatoruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_CREATOR)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
# This user should have the learner dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'learneruser',
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER,
'can_receive_email_updates': None},
csrf_token)
user_id = user_services.get_user_id_from_username('learneruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_LEARNER)
self.logout()
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('[email protected]', username='abc')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
# The email update preference should be whatever the setting in feconf
# is.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
# The email update preference should be True in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
# The email update preference should be False in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_email_preferences_updates(self):
"""Test that Preferences Handler correctly updates the email
preferences of the user.
"""
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': True,
'can_receive_editor_role_email': True,
'can_receive_feedback_message_email': True,
'can_receive_subscription_email': True
}
}
# Allow all emails.
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertTrue(email_preferences.can_receive_email_updates)
self.assertTrue(email_preferences.can_receive_editor_role_email)
self.assertTrue(email_preferences.can_receive_feedback_message_email)
self.assertTrue(email_preferences.can_receive_subscription_email)
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': False,
'can_receive_editor_role_email': False,
'can_receive_feedback_message_email': False,
'can_receive_subscription_email': False
}
}
# Disallow all emails.
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
class PreferencesHandlerTests(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
def setUp(self):
super(PreferencesHandlerTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_can_see_subscriptions(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
# Subscribe to user.
subscription_services.subscribe_to_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 1)
self.assertEqual(
response['subscription_list'][0]['creator_username'],
self.OWNER_USERNAME)
# Unsubscribe from user.
subscription_services.unsubscribe_from_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
class ProfileLinkTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = '[email protected]'
PROFILE_PIC_URL = '/preferenceshandler/profile_picture_by_username/'
def test_get_profile_picture_invalid_username(self):
response = self.testapp.get(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME), expect_errors=True
)
self.assertEqual(response.status_int, 404)
def test_get_profile_picture_valid_username(self):
self.signup(self.EMAIL, self.USERNAME)
response_dict = self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME)
)
# Every user must have a profile picture.
self.assertEqual(
response_dict['profile_picture_data_url_for_username'],
user_services.DEFAULT_IDENTICON_DATA_URL)
class ProfileDataHandlerTests(test_utils.GenericTestBase):
def test_preference_page_updates(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
original_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(
['en'], original_preferences['preferred_language_codes'])
self.assertIsNone(original_preferences['preferred_site_language_code'])
self.assertIsNone(original_preferences['preferred_audio_language_code'])
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_site_language_code', 'data': 'en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_audio_language_code', 'data': 'hi-en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_language_codes', 'data': ['de']},
csrf_token=csrf_token)
new_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(new_preferences['preferred_language_codes'], ['de'])
self.assertEqual(new_preferences['preferred_site_language_code'], 'en')
self.assertEqual(
new_preferences['preferred_audio_language_code'], 'hi-en')
def test_profile_data_is_independent_of_currently_logged_in_user(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new editor bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['editor', 'editing']},
csrf_token=csrf_token)
self.logout()
self.signup(self.VIEWER_EMAIL, username=self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
response = self.testapp.get('/preferences')
csrf_token = self.get_csrf_token_from_response(response)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new viewer bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['viewer', 'viewing']},
csrf_token=csrf_token)
self.logout()
# Viewer looks at editor's profile page.
self.login(self.VIEWER_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Editor looks at their own profile page.
self.login(self.EDITOR_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Looged-out user looks at editor's profile page/
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
class FirstContributionDateTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = '[email protected]'
def test_contribution_msec(self):
# Test the contribution time shows up correctly as None.
self.signup(self.EMAIL, self.USERNAME)
self.login(self.EMAIL)
user_id = self.get_user_id_from_email(self.EMAIL)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertIsNone(response_dict['first_contribution_msec'])
# Update the first_contribution_msec to the current time in
# milliseconds.
first_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, first_time_in_msecs)
# Test the contribution date correctly changes to current_time_in_msecs.
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
# Test that the contribution date is not changed after the first time it
# is set.
second_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, second_time_in_msecs)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
class UserContributionsTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = '[email protected]'
USERNAME_B = 'b'
EMAIL_B = '[email protected]'
EXP_ID_1 = 'exp_id_1'
def test_null_case(self):
# Check that the profile page for a user with no contributions shows
# that they have 0 created/edited explorations.
self.signup(self.EMAIL_A, self.USERNAME_A)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(
response_dict['created_exp_summary_dicts'], [])
self.assertEqual(
response_dict['edited_exp_summary_dicts'], [])
def test_created(self):
# Check that the profile page for a user who has created
# a single exploration shows 1 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 1)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['created_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
def test_edited(self):
# Check that the profile page for a user who has created
# a single exploration shows 0 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
user_b_id = self.get_user_id_from_email(self.EMAIL_B)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
exp_services.update_exploration(user_b_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
}], 'Test edit')
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_B)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 0)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['objective'],
'the objective')
class SiteLanguageHandlerTests(test_utils.GenericTestBase):
def test_save_site_language_handler(self):
"""Test the language is saved in the preferences when handler is called.
"""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
language_code = 'es'
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/preferences')
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('/preferenceshandler/data', {
'update_type': 'preferred_site_language_code',
'data': language_code,
}, csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['preferred_site_language_code'], language_code)
self.logout()
class LongUserBioHandlerTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = '[email protected]'
USERNAME_B = 'b'
EMAIL_B = '[email protected]'
def test_userbio_within_limit(self):
self.signup(self.EMAIL_A, self.USERNAME_A)
self.login(self.EMAIL_A)
response = self.testapp.get('/preferences')
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
self.put_json('/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am within 2000 char limit',
}, csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['user_bio'], 'I am within 2000 char limit')
self.logout()
def test_user_bio_exceeds_limit(self):
self.signup(self.EMAIL_B, self.USERNAME_B)
self.login(self.EMAIL_B)
response = self.testapp.get('/preferences')
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
user_bio_response = self.put_json(
'/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am not within 2000 char limit' * 200
},
csrf_token=csrf_token,
expect_errors=True,
expected_status_int=400)
self.assertEqual(user_bio_response['code'], 400)
self.assertIn('User bio exceeds maximum character limit: 2000',
user_bio_response['error'])
self.logout()
|
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
"""
config.py
Configuration settings and storage.
"""
import logging
import collections
import ConfigParser
from locale import getdefaultlocale
DEF_ENC = getdefaultlocale()[1]
if DEF_ENC is None:
DEF_ENC = "UTF-8"
import directories
import weakref
log = logging.getLogger(__name__)
class Config(object):
def __init__(self, definitions):
self.load()
self._sections = {}
for (sectionKey, sectionName), items in definitions.iteritems():
self._sections[sectionKey] = ConfigSection(self.config, sectionName, items)
setattr(self, sectionKey, self._sections[sectionKey])
self.save()
def __getitem__(self, section):
return self._sections[section]
@staticmethod
def getPath():
return directories.configFilePath
@staticmethod
def transformKey(value, i=0):
if 'left' in value and len(value) > 5:
value = value[5:]
elif 'right' in value and len(value) > 6:
value = value[6:]
if 'a' <= value <= 'z':
value = value.replace(value[0], value[0].upper(), 1)
if i >= 36 and "Ctrl-" not in value:
value = "Ctrl-" + value
if value == "Mouse3":
value = "Button 3"
elif value == "Mouse4":
value = "Scroll Up"
elif value == "Mouse5":
value = "Scroll Down"
elif value == "Mouse6":
value = "Button 4"
elif value == "Mouse7":
value = "Button 5"
return value
@staticmethod
def convert(key):
vals = key.replace('-', ' ').translate(None, '()').lower().split(' ')
return vals[0] + "".join(x.title() for x in vals[1:])
def reset(self):
for section in self.config.sections():
self.config.remove_section(section)
def transformConfig(self):
if self.config.has_section("Version") and self.config.has_option("Version", "version"):
version = self.config.get("Version", "version")
else:
self.reset()
return
if version == "1.1.1.1":
i = 1
for (name, value) in self.config.items("Keys"):
if name != "Swap View" and name != "Toggle Fps Counter":
self.config.set("Keys", name, self.transformKey(value, i))
elif name == "Swap View":
self.config.set("Keys", "View Distance", self.transformKey(value, i))
self.config.set("Keys", "Swap View", "None")
elif name == "Toggle Fps Counter":
self.config.set("Keys", "Debug Overlay", self.transformKey(value, i))
self.config.set("Keys", "Toggle Fps Counter", "None")
i += 1
if self.config.get("Keys", "Brake") == "Space":
version = "1.1.2.0-update"
else:
version = "1.1.2.0-new"
self.config.set("Version", "version", version)
self.save()
def load(self):
log.info("Loading config...")
self.config = ConfigParser.RawConfigParser([], ConfigDict)
self.config.observers = {}
try:
self.config.read(self.getPath())
except Exception, e:
log.warn("Error while reading configuration file mcedit.ini: {0}".format(e))
self.transformConfig()
def save(self):
try:
cf = file(self.getPath(), 'w')
self.config.write(cf)
cf.close()
except Exception, e:
try:
log.error("Error saving configuration settings to mcedit.ini: {0}".format(e))
except:
pass
class ConfigSection(object):
def __init__(self, config, section, items):
self.section = section
if not config.has_section(section):
config.add_section(section)
self._items = {}
for item in items:
if isinstance(item, ConfigValue):
value = item
elif type(item[2]) in [list, tuple]:
value = ListValue(item[0], item[1], item[2])
else:
value = ConfigValue(item[0], item[1], item[2])
value.config = config
value.section = section
self._items[value.key] = value
value.get()
def __getitem__(self, key):
return self._items[key]
def __getattr__(self, key):
return self.__getitem__(key)
def items(self):
return [(i.name, i.get()) for k, i in self._items.iteritems()]
class ConfigValue(object):
allowedTypes = [int, float, bool, basestring, str, unicode]
def __init__(self, key, name, default=None):
if default is None:
default = name
name = key
self.key = key
self.name = name
self.default = default
self.type = type(default)
if self.type not in self.allowedTypes:
raise TypeError("Invalid config type %s" % repr(self.type))
def get(self):
try:
if self.type is bool:
return self.config.getboolean(self.section, self.name)
if self.type is unicode:
return self.type(self.config.get(self.section, self.name).decode(DEF_ENC))
return self.type(self.config.get(self.section, self.name))
except:
if self.default is None:
raise
self.set(self.default)
return self.default
def getRaw(self):
return self.config.get(self.section, self.name)
def _setter(self, setter):
def _s(s, value):
if setter is not None:
setter(s, value)
return self.set(value)
return _s
def set(self, value):
log.debug("Property Change: %15s %30s = %s", self.section, self.name, value)
if self.type is unicode and type(value) is unicode:
value = value.encode(DEF_ENC)
self.config.set(self.section, self.name, str(value))
self._notifyObservers(value)
def addObserver(self, target, attr=None, callback=None):
""" Register 'target' for changes in the config var named by section and name.
When the config is changed, calls setattr with target and attr.
attr may be None; it will be created from the name by lowercasing the first
word, uppercasing the rest, and removing spaces.
e.g. "block buffer" becomes "blockBuffer"
"""
observers = self.config.observers.setdefault((self.section, self.name), {})
if not attr:
attr = self.key
log.debug("Subscribing %s.%s", target, attr)
attr = intern(attr)
targetref = weakref.ref(target)
observers.setdefault((targetref, attr), callback)
val = self.get()
setattr(target, attr, val)
if callback:
callback(val)
def _notifyObservers(self, value):
observers = self.config.observers.get((self.section, self.name), {})
newObservers = {}
for targetref, attr in observers:
target = targetref()
if target:
log.debug("Notifying %s", target)
setattr(target, attr, value)
callback = observers[targetref, attr]
if callback:
callback(value)
newObservers[targetref, attr] = callback
self.config.observers[(self.section, self.name)] = newObservers
def property(self, setter=None):
assert self.default is not None
this = self
def _getter(self):
return this.get()
return property(_getter, self._setter(setter), None)
def __repr__(self):
return "<%s>" % " ".join((
self.__class__.__name__,
"section=%r" % self.section,
"key=%r" % self.key,
"name=%r" % self.name,
"default=%s" % self.default,
"type=%s" % self.type
))
def __int__(self):
return int(self.get())
def __float__(self):
return float(self.get())
def __bool__(self):
return bool(self.get())
class ListValue(ConfigValue):
allowedTypes = [list, tuple]
def __init__(self, key, name, default=None):
if default is None or len(default) < 1:
raise ValueError("Default value %s is empty." % repr(default))
self.subtype = type(default[0])
super(ListValue, self).__init__(key, name, default)
def get(self):
try:
return self.type(self.subtype(x.strip()) for x in self._config.get(self.section, self.name).translate(None, '[]()').split(','))
except:
if self.default is None:
raise
self.set(self.default)
return self.default
def __repr__(self):
return "<%s>" % " ".join((
self.__class__.__name__,
"section=%r" % self.section,
"key=%r" % self.key,
"name=%r" % self.name,
"default=%s" % self.default,
"type=%s" % self.type,
"subtype=%s" % self.subtype
))
class ColorValue(ListValue):
allowedTypes = [tuple]
defaultColors = {}
def __init__(self, key, name, default=None):
super(ColorValue, self).__init__(key, name, default)
ColorValue.defaultColors[name] = self
def get(self):
values = super(ColorValue, self).get()
return tuple(min(max(x, 0.0), 1.0) for x in values)
class ConfigDict(collections.MutableMapping):
def __init__(self, *args, **kwargs):
self.dict = dict(*args, **kwargs)
self.keyorder = []
def keys(self):
return list(self.keyorder)
def items(self):
return list(self.__iteritems__())
def __iteritems__(self):
return ((k, self.dict[k]) for k in self.keys())
def __iter__(self):
return self.keys().__iter__()
def __getitem__(self, k):
return self.dict[k]
def __setitem__(self, k, v):
self.dict[k] = v
if k not in self.keyorder:
self.keyorder.append(k)
def __delitem__(self, k):
del self.dict[k]
if k in self.keyorder:
self.keyorder.remove(k)
def __contains__(self, k):
return self.dict.__contains__(k)
def __len__(self):
return self.dict.__len__()
def copy(self):
k = ConfigDict()
k.dict = self.dict.copy()
k.keyorder = list(self.keyorder)
return k
# Quick Reference:
# 7 Bedrock
# 9 Still_Water
# 11 Still_Lava
# 14 Gold_Ore
# 15 Iron_Ore
# 16 Coal_Ore
# 21 Lapis_Lazuli_Ore
# 24 Sandstone
# 49 Obsidian
# 56 Diamond_Ore
# 73 Redstone_Ore
# 129 Emerald_Ore
# 153 Nether_Quartz_Ore
hiddableOres = [7, 16, 15, 21, 73, 14, 56, 153]
definitions = {
("keys", "Keys"): [
("forward", "forward", "W"),
("back", "back", "S"),
("left", "left", "A"),
("right", "right", "D"),
("up", "up", "Space"),
("down", "down", "Shift"),
("brake", "brake", "C"),
("rotateClone", "rotate (clone)", "E"),
("rollClone", "roll (clone)", "R"),
("flip", "flip", "F"),
("mirror", "mirror", "G"),
("rotateBrush", "rotate (brush)", "E"),
("rollBrush", "roll (brush)", "G"),
("increaseBrush", "increase brush", "R"),
("decreaseBrush", "decrease brush", "F"),
("replaceShortcut", "replace shortcut", "R"),
("swap", "swap", "X"),
("panLeft", "pan left", "J"),
("panRight", "pan right", "L"),
("panUp", "pan up", "I"),
("panDown", "pan down", "K"),
("toggleView", "toggle view", "Tab"),
("resetReach", "reset reach", "Button 3"),
("increaseReach", "increase reach", "Scroll Up"),
("decreaseReach", "decrease reach", "Scroll Down"),
("confirmConstruction", "confirm construction", "Return"),
("openLevel", "open level", "O"),
("newLevel", "new level", "N"),
("deleteBlocks", "delete blocks", "Delete"),
("lineTool", "line tool", "Z"),
("longDistanceMode", "long-distance mode", "Alt-Z"),
("flyMode", "fly mode", "None"),
("debugOverlay", "debug overlay", "0"),
("showBlockInfo", "show block info", "Alt"),
("pickBlock", "pick block", "Alt"),
("selectChunks", "select chunks", "Z"),
("deselectChunks", "deselect chunks", "Alt"),
("brushLineTool", "brush line tool", "Z"),
("snapCloneToAxis", "snap clone to axis", "Ctrl"),
("blocksOnlyModifier", "blocks-only modifier", "Alt"),
("fastIncrementModifier", "fast increment modifier", "Ctrl"),
("fastNudge", "fast nudge", "None"),
("takeAScreenshot", "take a screenshot", "F6"),
("quit", "quit", "Ctrl-Q"),
("viewDistance", "view distance", "Ctrl-F"),
("selectAll", "select all", "Ctrl-A"),
("deselect", "deselect", "Ctrl-D"),
("cut", "cut", "Ctrl-X"),
("copy", "copy", "Ctrl-C"),
("paste", "paste", "Ctrl-V"),
("reloadWorld", "reload world", "Ctrl-R"),
("open", "open", "Ctrl-O"),
("quickLoad", "quick load", "Ctrl-L"),
("undo", "undo", "Ctrl-Z"),
("redo", "redo", "Ctrl-Y"),
("save", "save", "Ctrl-S"),
("saveAs", "save as", "Ctrl-Alt-S"),
("newWorld", "new world", "Ctrl-N"),
("closeWorld", "close world", "Ctrl-W"),
("worldInfo", "world info", "Ctrl-I"),
("gotoPanel", "goto panel", "Ctrl-G"),
("exportSelection", "export selection", "Ctrl-E"),
("toggleRenderer", "toggle renderer", "Ctrl-M"),
("uploadWorld", "upload world", "Ctrl-U"),
],
("version", "Version"): [
("version", "version", "1.1.2.0")
],
("settings", "Settings"): [
("flyMode", "Fly Mode", False),
("enableMouseLag", "Enable Mouse Lag", False),
("longDistanceMode", "Long Distance Mode", False),
("shouldResizeAlert", "Window Size Alert", True),
("closeMinecraftWarning", "Close Minecraft Warning", True),
("skin", "MCEdit Skin", "[Current]"),
("fov", "Field of View", 70.0),
("spaceHeight", "Space Height", 64),
("blockBuffer", "Block Buffer", 256 * 1048576),
("reportCrashes", "report crashes new", False),
("reportCrashesAsked", "report crashes asked", False),
("staticCommandsNudge", "Static Coords While Nudging", False),
("moveSpawnerPosNudge", "Change Spawners While Nudging", False),
("rotateBlockBrush", "rotateBlockBrushRow", True),
("langCode", "Language String", "English (US)"),
("viewDistance", "View Distance", 8),
("targetFPS", "Target FPS", 30),
("windowWidth", "window width", 1152),
("windowHeight", "window height", 864),
("windowX", "window x", 0),
("windowY", "window y", 0),
("windowShowCmd", "window showcmd", 1),
("setWindowPlacement", "SetWindowPlacement", True),
("showHiddenOres", "show hidden ores", False),
("hiddableOres", "hiddable ores", hiddableOres),
] + [
("showOre%s" % i, "show ore %s" % i, True) for i in hiddableOres
] + [
("fastLeaves", "fast leaves", True),
("roughGraphics", "rough graphics", False),
("showChunkRedraw", "show chunk redraw", True),
("drawSky", "draw sky", True),
("drawFog", "draw fog", True),
("showCeiling", "show ceiling", True),
("drawEntities", "draw entities", True),
("drawMonsters", "draw monsters", True),
("drawItems", "draw items", True),
("drawTileEntities", "draw tile entities", True),
("drawTileTicks", "draw tile ticks", False),
("drawUnpopulatedChunks", "draw unpopulated chunks", True),
("drawChunkBorders", "draw chunk borders", False),
("vertexBufferLimit", "vertex buffer limit", 384),
("vsync", "vertical sync", 0),
("viewMode", "View Mode", "Camera"),
("undoLimit", "Undo Limit", 20),
("recentWorlds", "Recent Worlds", ['']),
("resourcePack", "Resource Pack", u"Default"),
("maxCopies", "Copy stack size", 32),
("superSecretSettings", "Super Secret Settings", False),
("compassToggle", "Compass Toggle", True),
("compassSize", "Compass Size", 75),
("fontProportion", "Fonts Proportion", 100),
("downloadPlayerSkins", "Download Player Skins", True),
],
("controls", "Controls"): [
("mouseSpeed", "mouse speed", 5.0),
("cameraAccel", "camera acceleration", 125.0),
("cameraDrag", "camera drag", 100.0),
("cameraMaxSpeed", "camera maximum speed", 60.0),
("cameraBrakingSpeed", "camera braking speed", 8.0),
("invertMousePitch", "invert mouse pitch", False),
("autobrake", "autobrake", True),
("swapAxes", "swap axes looking down", False)
],
("brush", "Brush"): [
("brushSizeL", "Brush Shape L", 3),
("brushSizeH", "Brush Shape H", 3),
("brushSizeW", "Brush Shape W", 3),
("updateBrushOffset", "Update Brush Offset", False),
("chooseBlockImmediately", "Choose Block Immediately", False),
("alpha", "Alpha", 0.66)
],
("clone", "Clone"): [
("copyAir", "Copy Air", True),
("copyWater", "Copy Water", True),
("copyBiomes", "Copy Biomes", False),
("staticCommands", "Change Coordinates", False),
("moveSpawnerPos", "Change Spawners Pos", False),
("regenerateUUID", "Regenerate UUIDs", True),
("placeImmediately", "Place Immediately", True)
],
("fill", "Fill"): [
("chooseBlockImmediately", "Choose Block Immediately", True)
],
("spawn", "Spawn"): [
("spawnProtection", "Spawn Protection", True)
],
("selection", "Selection"): [
("showPreviousSelection", "Show Previous Selection", True),
("color", "Color", "white")
],
("selectionColors", "Selection Colors"): [
ColorValue("white", "white", (1.0, 1.0, 1.0)),
ColorValue("blue", "blue", (0.75, 0.75, 1.0)),
ColorValue("green", "green", (0.75, 1.0, 0.75)),
ColorValue("red", "red", (1.0, 0.75, 0.75)),
ColorValue("teal", "teal", (0.75, 1.0, 1.0)),
ColorValue("pink", "pink", (1.0, 0.75, 1.0)),
ColorValue("yellow", "yellow", (1.0, 1.0, 0.75)),
ColorValue("grey", "grey", (0.6, 0.6, 0.6)),
ColorValue("black", "black", (0.0, 0.0, 0.0))
],
("fastNudgeSettings", "Fast Nudge Settings"): [
("blocksWidth", "Blocks Width", False),
("blocksWidthNumber", "Blocks Width Number", 16),
("selectionWidth", "Selection Width", False),
("selectionWidthNumber", "Selection Width Number", 16),
("pointsWidth", "Points Width", False),
("pointsWidthNumber", "Points Width Number", 16),
("cloneWidth", "clone Width", True),
("cloneWidthNumber", "Clone Width Number", 16),
("importWidth", "Import Width", False),
("importWidthNumber", "Import Width Number", 8),
],
("nbtTreeSettings", "NBT Tree Settings"): [
("useBulletStyles", "Use Bullet Styles", True),
("useBulletText", "Use Bullet Text", False),
("useBulletImages", "Use Bullet Images", True),
("bulletFileName", "Bullet Images File", directories.os.path.join(directories.getDataDir(), 'Nbtsheet.png')),
("showAllTags", "Show all the tags in the tree", False),
],
("Filter Keys", "Filter Keys"): [],
}
config = None
if config is None:
config = Config(definitions)
|
|
#!/usr/bin/env python3
#
# This script bootstraps an NSQ cluster in EC2 and runs benchmarks.
#
# Requires python3 and the following packages:
# - boto3
# - paramiko
# - tornado
#
# AWS authentication is delegated entirely to the boto3 environment, see:
#
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
#
# EC2 instances are launched into EC2 Classic, expecting a 'default' security group
# that allows allows SSH (port 22) from 0.0.0.0/0 and an EC2 key pair created
# (named 'default', but configurable via --ssh-key-name).
#
import sys
import logging
import time
import datetime
import socket
import warnings
import hashlib
import boto3
import paramiko.client
import paramiko.ssh_exception
import tornado.options
def ssh_connect_with_retries(host, retries=3, timeout=30):
for i in range(retries):
try:
ssh_client = paramiko.client.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.client.WarningPolicy())
ssh_client.connect(host, username='ubuntu', timeout=timeout)
return ssh_client
except (socket.error, paramiko.ssh_exception.SSHException):
if i == retries - 1:
raise
logging.warning('... re-trying to connect to %s:%d in 15s', host, 22)
time.sleep(15)
def ssh_cmd_async(ssh_client, cmd):
transport = ssh_client.get_transport()
chan = transport.open_session()
chan.exec_command(cmd)
return chan
def ssh_cmd(ssh_client, cmd, timeout=2):
transport = ssh_client.get_transport()
chan = transport.open_session()
chan.settimeout(timeout)
chan.exec_command(cmd)
stdout = b''
stderr = b''
while True:
if chan.recv_ready():
stdout += chan.recv(4096)
continue
if chan.recv_stderr_ready():
stderr += chan.recv_stderr(4096)
continue
if chan.exit_status_ready():
exit_status = chan.recv_exit_status()
break
time.sleep(0.1)
if exit_status != 0:
raise Exception('%r' % stderr)
return stdout, stderr
def get_session():
return boto3.session.Session(region_name=tornado.options.options.region)
def _bootstrap(addr):
commit = tornado.options.options.commit
golang_version = tornado.options.options.golang_version
ssh_client = ssh_connect_with_retries(addr)
for cmd in [
'wget https://storage.googleapis.com/golang/go%s.linux-amd64.tar.gz' % golang_version,
'sudo -S tar -C /usr/local -xzf go%s.linux-amd64.tar.gz' % golang_version,
'sudo -S apt-get update',
'sudo -S apt-get -y install git mercurial',
'mkdir -p go/src/github.com/nsqio',
'cd go/src/github.com/nsqio && git clone https://github.com/nsqio/nsq',
'cd go/src/github.com/nsqio/nsq && git checkout %s' % commit,
'cd go/src/github.com/nsqio/nsq/apps/nsqd && GO111MODULE=on /usr/local/go/bin/go build',
'cd go/src/github.com/nsqio/nsq/bench/bench_writer && GO111MODULE=on /usr/local/go/bin/go build',
'cd go/src/github.com/nsqio/nsq/bench/bench_reader && GO111MODULE=on /usr/local/go/bin/go build',
'sudo -S mkdir -p /mnt/nsq',
'sudo -S chmod 777 /mnt/nsq']:
ssh_cmd(ssh_client, cmd, timeout=10)
def bootstrap():
session = get_session()
ec2 = session.resource('ec2')
total_count = tornado.options.options.nsqd_count + tornado.options.options.worker_count
logging.info('launching %d instances', total_count)
instances = ec2.create_instances(
ImageId=tornado.options.options.ami,
MinCount=total_count,
MaxCount=total_count,
KeyName=tornado.options.options.ssh_key_name,
InstanceType=tornado.options.options.instance_type,
SecurityGroups=['default'])
logging.info('waiting for instances to launch...')
while any(i.state['Name'] != 'running' for i in instances):
waiting_for = [i.id for i in instances if i.state['Name'] != 'running']
logging.info('... sleeping for 5s (waiting for %s)', ', '.join(waiting_for))
time.sleep(5)
for instance in instances:
instance.load()
for instance in instances:
if not instance.tags:
instance.create_tags(Tags=[{'Key': 'nsq_bench', 'Value': '1'}])
try:
c = 0
for i in instances:
c += 1
logging.info('(%d) bootstrapping %s (%s)', c, i.public_dns_name, i.id)
_bootstrap(i.public_dns_name)
except Exception:
logging.exception('bootstrap failed')
decomm()
def run():
instances = _find_instances()
logging.info('launching nsqd on %d host(s)', tornado.options.options.nsqd_count)
nsqd_chans = []
nsqd_hosts = instances[:tornado.options.options.nsqd_count]
for instance in nsqd_hosts:
try:
ssh_client = ssh_connect_with_retries(instance.public_dns_name)
for cmd in [
'sudo -S pkill -f nsqd',
'sudo -S rm -f /mnt/nsq/*.dat',
'GOMAXPROCS=32 ./go/src/github.com/nsqio/nsq/apps/nsqd/nsqd \
--data-path=/mnt/nsq \
--mem-queue-size=10000000 \
--max-rdy-count=%s' % (tornado.options.options.rdy)]:
nsqd_chans.append((ssh_client, ssh_cmd_async(ssh_client, cmd)))
except Exception:
logging.exception('failed')
nsqd_tcp_addrs = [i.public_dns_name for i in nsqd_hosts]
dt = datetime.datetime.utcnow()
deadline = dt + datetime.timedelta(seconds=30)
logging.info('launching %d producer(s) on %d host(s)',
tornado.options.options.nsqd_count * tornado.options.options.worker_count,
tornado.options.options.worker_count)
worker_chans = []
producer_hosts = instances[tornado.options.options.nsqd_count:]
for instance in producer_hosts:
for nsqd_tcp_addr in nsqd_tcp_addrs:
topic = hashlib.md5(instance.public_dns_name.encode('utf-8')).hexdigest()
try:
ssh_client = ssh_connect_with_retries(instance.public_dns_name)
for cmd in [
'GOMAXPROCS=2 \
./go/src/github.com/nsqio/nsq/bench/bench_writer/bench_writer \
--topic=%s --nsqd-tcp-address=%s:4150 --deadline=\'%s\' --size=%d' % (
topic, nsqd_tcp_addr, deadline.strftime('%Y-%m-%d %H:%M:%S'),
tornado.options.options.msg_size)]:
worker_chans.append((ssh_client, ssh_cmd_async(ssh_client, cmd)))
except Exception:
logging.exception('failed')
if tornado.options.options.mode == 'pubsub':
logging.info('launching %d consumer(s) on %d host(s)',
tornado.options.options.nsqd_count * tornado.options.options.worker_count,
tornado.options.options.worker_count)
consumer_hosts = instances[tornado.options.options.nsqd_count:]
for instance in consumer_hosts:
for nsqd_tcp_addr in nsqd_tcp_addrs:
topic = hashlib.md5(instance.public_dns_name.encode('utf-8')).hexdigest()
try:
ssh_client = ssh_connect_with_retries(instance.public_dns_name)
for cmd in [
'GOMAXPROCS=8 \
./go/src/github.com/nsqio/nsq/bench/bench_reader/bench_reader \
--topic=%s --nsqd-tcp-address=%s:4150 --deadline=\'%s\' --size=%d \
--rdy=%d' % (
topic, nsqd_tcp_addr, deadline.strftime('%Y-%m-%d %H:%M:%S'),
tornado.options.options.msg_size, tornado.options.options.rdy)]:
worker_chans.append((ssh_client, ssh_cmd_async(ssh_client, cmd)))
except Exception:
logging.exception('failed')
stats = {
'bench_reader': {
'durations': [],
'mbytes': [],
'ops': []
},
'bench_writer': {
'durations': [],
'mbytes': [],
'ops': []
}
}
while worker_chans:
for ssh_client, chan in worker_chans[:]:
if chan.recv_ready():
sys.stdout.write(chan.recv(4096))
sys.stdout.flush()
continue
if chan.recv_stderr_ready():
line = chan.recv_stderr(4096).decode('utf-8')
if 'duration:' in line:
kind = line.split(' ')[0][1:-1]
parts = line.rsplit('duration:')[1].split('-')
stats[kind]['durations'].append(float(parts[0].strip()[:-1]))
stats[kind]['mbytes'].append(float(parts[1].strip()[:-4]))
stats[kind]['ops'].append(float(parts[2].strip()[:-5]))
sys.stdout.write(line)
sys.stdout.flush()
continue
if chan.exit_status_ready():
worker_chans.remove((ssh_client, chan))
time.sleep(0.1)
for kind, data in stats.items():
if not data['durations']:
continue
max_duration = max(data['durations'])
total_mb = sum(data['mbytes'])
total_ops = sum(data['ops'])
logging.info('[%s] %fs - %fmb/s - %fops/s - %fus/op',
kind, max_duration, total_mb, total_ops,
max_duration / total_ops * 1000 * 1000)
for ssh_client, chan in nsqd_chans:
chan.close()
def _find_instances():
session = get_session()
ec2 = session.resource('ec2')
return [i for i in ec2.instances.all() if
i.state['Name'] == 'running' and any(t['Key'] == 'nsq_bench' for t in i.tags)]
def decomm():
instances = _find_instances()
logging.info('terminating instances %s' % ','.join(i.id for i in instances))
for instance in instances:
instance.terminate()
if __name__ == '__main__':
tornado.options.define('region', type=str, default='us-east-1',
help='EC2 region to launch instances')
tornado.options.define('nsqd_count', type=int, default=3,
help='how many nsqd instances to launch')
tornado.options.define('worker_count', type=int, default=3,
help='how many worker instances to launch')
# ubuntu 18.04 HVM instance store us-east-1
tornado.options.define('ami', type=str, default='ami-0938f2289b3fa3f5b',
help='AMI ID')
tornado.options.define('ssh_key_name', type=str, default='default',
help='SSH key name')
tornado.options.define('instance_type', type=str, default='c3.2xlarge',
help='EC2 instance type')
tornado.options.define('msg_size', type=int, default=200,
help='size of message')
tornado.options.define('rdy', type=int, default=10000,
help='RDY count to use for bench_reader')
tornado.options.define('mode', type=str, default='pubsub',
help='the benchmark mode (pub, pubsub)')
tornado.options.define('commit', type=str, default='master',
help='the git commit')
tornado.options.define('golang_version', type=str, default='1.14.3',
help='the go version')
tornado.options.parse_command_line()
logging.getLogger('paramiko').setLevel(logging.WARNING)
warnings.simplefilter('ignore')
cmd_name = sys.argv[-1]
cmd_map = {
'bootstrap': bootstrap,
'run': run,
'decomm': decomm
}
cmd = cmd_map.get(cmd_name, bootstrap)
sys.exit(cmd())
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase
import errno
from formencode import Invalid
import mock
from tg import expose, config
from nose.tools import assert_equal, assert_raises
from webob.exc import HTTPUnauthorized
from alluratest.controller import TestController
from allura.tests import decorators as td
from allura.lib import helpers as h
from forgeimporters import base
class TestProjectExtractor(TestCase):
@mock.patch('forgeimporters.base.h.urlopen')
@mock.patch('forgeimporters.base.urllib2.Request')
def test_urlopen(self, Request, urlopen):
r = base.ProjectExtractor.urlopen('myurl', data='foo')
Request.assert_called_once_with('myurl', data='foo')
req = Request.return_value
req.add_header.assert_called_once_with(
'User-Agent', 'Allura Data Importer (https://forge-allura.apache.org/p/allura/)')
urlopen.assert_called_once_with(req, retries=3, codes=(408,))
self.assertEqual(r, urlopen.return_value)
@mock.patch.object(base, 'datetime')
@mock.patch.object(base, 'M')
@mock.patch.object(base, 'object_from_path')
@mock.patch.object(base, 'c')
@mock.patch.object(base, 'g')
def test_import_tool(g, c, object_from_path, M, _datetime):
c.project = mock.Mock(name='project')
c.user = mock.Mock(name='user')
object_from_path.return_value = importer = mock.Mock()
importer.return_value.source = 'source'
importer.return_value.tool_label = 'label'
base.import_tool(
'forgeimporters.base.ToolImporter', project_name='project_name',
mount_point='mount_point', mount_label='mount_label')
app = importer.return_value.import_tool.return_value
importer.return_value.import_tool.assert_called_once_with(
c.project,
c.user, project_name='project_name', mount_point='mount_point',
mount_label='mount_label')
M.Project.query.update.assert_called_once_with(
{'_id': c.project._id},
{'$set': {'last_updated': _datetime.utcnow()}})
g.director.create_activity.assert_called_once_with(
c.user, "imported",
app.config, related_nodes=[c.project], tags=['import'])
g.post_event.assert_called_once_with(
'import_tool_task_succeeded',
'source',
'label',
)
@mock.patch.object(base.traceback, 'format_exc')
@mock.patch.object(base, 'ToolImporter')
@mock.patch.object(base, 'g')
def test_import_tool_failed(g, ToolImporter, format_exc):
format_exc.return_value = 'my traceback'
importer = mock.Mock(source='importer_source',
tool_label='importer_tool_label')
importer.import_tool.side_effect = RuntimeError('my error')
ToolImporter.return_value = importer
assert_raises(
RuntimeError, base.import_tool, 'forgeimporters.base.ToolImporter',
project_name='project_name')
g.post_event.assert_called_once_with(
'import_tool_task_failed',
error=str(importer.import_tool.side_effect),
traceback='my traceback',
importer_source='importer_source',
importer_tool_label='importer_tool_label',
project_name='project_name',
)
def ep(name, source=None, importer=None, **kw):
mep = mock.Mock(name='mock_ep', **kw)
mep.name = name
if importer is not None:
mep.load.return_value = importer
else:
mep.load.return_value.source = source
mep.lv = mep.load.return_value.return_value
mep.lv.source = source
return mep
class TestProjectImporter(TestCase):
@mock.patch.object(base.h, 'iter_entry_points')
def test_tool_importers(self, iep):
eps = iep.return_value = [
ep('ep1', 'foo'), ep('ep2', 'bar'), ep('ep3', 'foo')]
pi = base.ProjectImporter(mock.Mock(name='neighborhood'))
pi.source = 'foo'
self.assertEqual(pi.tool_importers,
{'ep1': eps[0].lv, 'ep3': eps[2].lv})
iep.assert_called_once_with('allura.importers')
@mock.patch.object(base.ToolImporter, 'by_name')
@mock.patch.object(base, 'redirect')
@mock.patch.object(base, 'flash')
@mock.patch.object(base, 'import_tool')
@mock.patch.object(base, 'M')
@mock.patch.object(base, 'c')
def test_process(self, c, M, import_tool, flash, redirect, by_name):
base.ToolImporter.target_app_ep_names = []
by_name.return_value = base.ToolImporter()
pi = base.ProjectImporter(mock.Mock())
pi.source = 'Source'
pi.after_project_create = mock.Mock()
pi.neighborhood.register_project.return_value.script_name = 'script_name/'
kw = {
'project_name': 'project_name',
'project_shortname': 'shortname',
'tools': ['tool'],
}
with mock.patch.dict(base.config, {'site_name': 'foo'}):
pi.process(**kw)
pi.neighborhood.register_project.assert_called_once_with(
'shortname', project_name='project_name')
pi.after_project_create.assert_called_once_with(c.project, **kw)
import_tool.post.assert_called_once_with(
'forgeimporters.base.ToolImporter', **kw)
M.AuditLog.log.assert_called_once_with('import project from Source')
self.assertEqual(flash.call_count, 1)
redirect.assert_called_once_with('script_name/admin/overview')
@mock.patch.object(base.h, 'request')
@mock.patch.object(base, 'require_access')
@mock.patch.object(base.h, 'c')
def test_login_overlay(self, c, require_access, request):
pi = base.ProjectImporter(mock.Mock())
require_access.side_effect = HTTPUnauthorized
c.show_login_overlay = False
request.path = '/test-importer/'
pi._check_security()
self.assertEqual(c.show_login_overlay, True)
c.show_login_overlay = False
request.path = '/test-importer/check_names/'
pi._check_security()
self.assertEqual(c.show_login_overlay, True)
c.show_login_overlay = False
request.path = '/test-importer/process/'
with td.raises(HTTPUnauthorized):
pi._check_security()
self.assertEqual(c.show_login_overlay, False)
TA1 = mock.Mock(tool_label='foo', tool_description='foo_desc')
TA2 = mock.Mock(tool_label='qux', tool_description='qux_desc')
TA3 = mock.Mock(tool_label='baz', tool_description='baz_desc')
class TI1Controller(base.ToolImportController):
@expose()
def index(self, *a, **kw):
return 'test importer 1 controller webpage'
class TI1(base.ToolImporter):
target_app = TA1
controller = TI1Controller
class TI2(base.ToolImporter):
target_app = TA2
tool_label = 'bar'
tool_description = 'bar_desc'
class TI3(base.ToolImporter):
target_app = [TA2, TA2]
class TestToolImporter(TestCase):
@mock.patch.object(base.h, 'iter_entry_points')
def test_by_name(self, iep):
eps = iep.return_value = [ep('my-name', 'my-source')]
importer = base.ToolImporter.by_name('my-name')
iep.assert_called_once_with('allura.importers', 'my-name')
self.assertEqual(importer, eps[0].lv)
iep.reset_mock()
iep.return_value = []
importer = base.ToolImporter.by_name('other-name')
iep.assert_called_once_with('allura.importers', 'other-name')
self.assertEqual(importer, None)
@mock.patch.object(base.h, 'iter_entry_points')
def test_by_app(self, iep):
eps = iep.return_value = [
ep('importer1', importer=TI1),
ep('importer2', importer=TI2),
ep('importer3', importer=TI3),
]
importers = base.ToolImporter.by_app(TA2)
self.assertEqual(set(importers.keys()), set([
'importer2',
'importer3',
]))
self.assertIsInstance(importers['importer2'], TI2)
self.assertIsInstance(importers['importer3'], TI3)
def test_tool_label(self):
self.assertEqual(TI1().tool_label, 'foo')
self.assertEqual(TI2().tool_label, 'bar')
self.assertEqual(TI3().tool_label, 'qux')
def test_tool_description(self):
self.assertEqual(TI1().tool_description, 'foo_desc')
self.assertEqual(TI2().tool_description, 'bar_desc')
self.assertEqual(TI3().tool_description, 'qux_desc')
class TestToolsValidator(TestCase):
def setUp(self):
self.tv = base.ToolsValidator('good-source')
@mock.patch.object(base.ToolImporter, 'by_name')
def test_empty(self, by_name):
self.assertEqual(self.tv.to_python(''), [])
self.assertEqual(by_name.call_count, 0)
@mock.patch.object(base.ToolImporter, 'by_name')
def test_no_ep(self, by_name):
eps = by_name.return_value = None
with self.assertRaises(Invalid) as cm:
self.tv.to_python('my-value')
self.assertEqual(cm.exception.msg, 'Invalid tool selected: my-value')
by_name.assert_called_once_with('my-value')
@mock.patch.object(base.ToolImporter, 'by_name')
def test_bad_source(self, by_name):
eps = by_name.return_value = ep('ep1', 'bad-source').lv
with self.assertRaises(Invalid) as cm:
self.tv.to_python('my-value')
self.assertEqual(cm.exception.msg, 'Invalid tool selected: my-value')
by_name.assert_called_once_with('my-value')
@mock.patch.object(base.ToolImporter, 'by_name')
def test_multiple(self, by_name):
eps = by_name.side_effect = [
ep('ep1', 'bad-source').lv, ep('ep2', 'good-source').lv, ep('ep3', 'bad-source').lv]
with self.assertRaises(Invalid) as cm:
self.tv.to_python(['value1', 'value2', 'value3'])
self.assertEqual(cm.exception.msg,
'Invalid tools selected: value1, value3')
self.assertEqual(by_name.call_args_list, [
mock.call('value1'),
mock.call('value2'),
mock.call('value3'),
])
@mock.patch.object(base.ToolImporter, 'by_name')
def test_valid(self, by_name):
eps = by_name.side_effect = [
ep('ep1', 'good-source').lv, ep('ep2', 'good-source').lv, ep('ep3', 'bad-source').lv]
self.assertEqual(
self.tv.to_python(['value1', 'value2']), ['value1', 'value2'])
self.assertEqual(by_name.call_args_list, [
mock.call('value1'),
mock.call('value2'),
])
class TestProjectToolsImportController(TestController):
def test_pages(self):
admin_page = self.app.get('/admin/')
with mock.patch.object(base.h, 'iter_entry_points') as iep:
iep.return_value = [
ep('importer1', importer=TI1),
ep('importer2', importer=TI2),
ep('importer3', importer=TI3),
]
import_main_page = admin_page.click('Import')
url = import_main_page.environ['PATH_INFO']
assert url.endswith('/admin/ext/import/'), url
with mock.patch.object(base.ToolImporter, 'by_name') as by_name:
by_name.return_value = TI1
import1_page = import_main_page.click('Import', href=r'importer1$')
url = import1_page.environ['PATH_INFO']
assert url.endswith('/admin/ext/import/importer1'), url
assert_equal(import1_page.body, 'test importer 1 controller webpage')
@mock.patch.object(base.h, 'iter_entry_points')
def test_hidden(self, iep):
iep.return_value = [
ep('importer1', importer=TI1),
ep('importer2', importer=TI2),
]
admin_page = self.app.get('/admin/')
with h.push_config(config, hidden_importers='importer1'):
import_main_page = admin_page.click('Import')
url = import_main_page.environ['PATH_INFO']
assert url.endswith('/admin/ext/import/'), url
assert not import_main_page.html.find('a', href='importer1')
assert import_main_page.html.find('a', href='importer2')
def test_get_importer_upload_path():
project = mock.Mock(
shortname='prefix/shortname',
is_nbhd_project=False,
is_user_project=False,
is_root=False,
url=lambda: 'n_url/',
neighborhood=mock.Mock(url_prefix='p/'),
)
with h.push_config(config, importer_upload_path='path/{nbhd}/{project}'):
assert_equal(base.get_importer_upload_path(project), 'path/p/prefix')
project.is_nbhd_project = True
assert_equal(base.get_importer_upload_path(project), 'path/p/n_url')
project.is_nbhd_project = False
project.is_user_project = True
assert_equal(base.get_importer_upload_path(project),
'path/p/shortname')
project.is_user_project = False
project.is_root = True
assert_equal(base.get_importer_upload_path(project),
'path/p/prefix/shortname')
@mock.patch.object(base, 'os')
@mock.patch.object(base, 'get_importer_upload_path')
def test_save_importer_upload(giup, os):
os.path.join = lambda *a: '/'.join(a)
giup.return_value = 'path'
os.makedirs.side_effect = OSError(errno.EEXIST, 'foo')
_open = mock.MagicMock()
fp = _open.return_value.__enter__.return_value
with mock.patch('__builtin__.open', _open):
base.save_importer_upload('project', 'file', 'data')
os.makedirs.assert_called_once_with('path')
_open.assert_called_once_with('path/file', 'w')
fp.write.assert_called_once_with('data')
os.makedirs.side_effect = OSError(errno.EACCES, 'foo')
assert_raises(OSError, base.save_importer_upload,
'project', 'file', 'data')
class TestFile(object):
@mock.patch.object(base, 'ProjectExtractor')
def test_type(self, PE):
PE().page = {
'content-type': 'image/png',
'data': 'data',
}
f = base.File('http://example.com/barbaz.jpg')
assert_equal(f.type, 'image/jpeg')
f = base.File('http://example.com/barbaz')
assert_equal(f.type, 'image/png')
|
|
# Natural Language Toolkit: Combinatory Categorial Grammar
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Graeme Gange <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
The lexicon is constructed by calling
``lexicon.fromstring(<lexicon string>)``.
In order to construct a parser, you also need a rule set.
The standard English rules are provided in chart as
``chart.DefaultRuleSet``.
The parser can then be constructed by calling, for example:
``parser = chart.CCGChartParser(<lexicon>, <ruleset>)``
Parsing is then performed by running
``parser.parse(<sentence>.split())``.
While this returns a list of trees, the default representation
of the produced trees is not very enlightening, particularly
given that it uses the same tree class as the CFG parsers.
It is probably better to call:
``chart.printCCGDerivation(<parse tree extracted from list>)``
which should print a nice representation of the derivation.
This entire process is shown far more clearly in the demonstration:
python chart.py
"""
from __future__ import print_function, division, unicode_literals
import itertools
from nltk.parse import ParserI
from nltk.parse.chart import AbstractChartRule, EdgeI, Chart
from nltk.tree import Tree
from nltk.ccg.lexicon import fromstring, Token
from nltk.ccg.combinator import (ForwardT, BackwardT, ForwardApplication,
BackwardApplication, ForwardComposition,
BackwardComposition, ForwardSubstitution,
BackwardBx, BackwardSx)
from nltk.compat import python_2_unicode_compatible, string_types
from nltk.ccg.combinator import *
from nltk.ccg.logic import *
from nltk.sem.logic import *
# Based on the EdgeI class from NLTK.
# A number of the properties of the EdgeI interface don't
# transfer well to CCGs, however.
class CCGEdge(EdgeI):
def __init__(self, span, categ, rule):
self._span = span
self._categ = categ
self._rule = rule
self._comparison_key = (span, categ, rule)
# Accessors
def lhs(self): return self._categ
def span(self): return self._span
def start(self): return self._span[0]
def end(self): return self._span[1]
def length(self): return self._span[1] - self.span[0]
def rhs(self): return ()
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def nextsym(self): return None
def categ(self): return self._categ
def rule(self): return self._rule
class CCGLeafEdge(EdgeI):
'''
Class representing leaf edges in a CCG derivation.
'''
def __init__(self, pos, token, leaf):
self._pos = pos
self._token = token
self._leaf = leaf
self._comparison_key = (pos, token.categ(), leaf)
# Accessors
def lhs(self): return self._token.categ()
def span(self): return (self._pos, self._pos+1)
def start(self): return self._pos
def end(self): return self._pos + 1
def length(self): return 1
def rhs(self): return self._leaf
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def nextsym(self): return None
def token(self): return self._token
def categ(self): return self._token.categ()
def leaf(self): return self._leaf
@python_2_unicode_compatible
class BinaryCombinatorRule(AbstractChartRule):
'''
Class implementing application of a binary combinator to a chart.
Takes the directed combinator to apply.
'''
NUMEDGES = 2
def __init__(self,combinator):
self._combinator = combinator
# Apply a combinator
def apply(self, chart, grammar, left_edge, right_edge):
# The left & right edges must be touching.
if not (left_edge.end() == right_edge.start()):
return
# Check if the two edges are permitted to combine.
# If so, generate the corresponding edge.
if self._combinator.can_combine(left_edge.categ(),right_edge.categ()):
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
new_edge = CCGEdge(span=(left_edge.start(), right_edge.end()),categ=res,rule=self._combinator)
if chart.insert(new_edge,(left_edge,right_edge)):
yield new_edge
# The representation of the combinator (for printing derivations)
def __str__(self):
return "%s" % self._combinator
# Type-raising must be handled slightly differently to the other rules, as the
# resulting rules only span a single edge, rather than both edges.
@python_2_unicode_compatible
class ForwardTypeRaiseRule(AbstractChartRule):
'''
Class for applying forward type raising
'''
NUMEDGES = 2
def __init__(self):
self._combinator = ForwardT
def apply(self, chart, grammar, left_edge, right_edge):
if not (left_edge.end() == right_edge.start()):
return
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
new_edge = CCGEdge(span=left_edge.span(),categ=res,rule=self._combinator)
if chart.insert(new_edge,(left_edge,)):
yield new_edge
def __str__(self):
return "%s" % self._combinator
@python_2_unicode_compatible
class BackwardTypeRaiseRule(AbstractChartRule):
'''
Class for applying backward type raising.
'''
NUMEDGES = 2
def __init__(self):
self._combinator = BackwardT
def apply(self, chart, grammar, left_edge, right_edge):
if not (left_edge.end() == right_edge.start()):
return
for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
new_edge = CCGEdge(span=right_edge.span(),categ=res,rule=self._combinator)
if chart.insert(new_edge,(right_edge,)):
yield new_edge
def __str__(self):
return "%s" % self._combinator
# Common sets of combinators used for English derivations.
ApplicationRuleSet = [BinaryCombinatorRule(ForwardApplication),
BinaryCombinatorRule(BackwardApplication)]
CompositionRuleSet = [BinaryCombinatorRule(ForwardComposition),
BinaryCombinatorRule(BackwardComposition),
BinaryCombinatorRule(BackwardBx)]
SubstitutionRuleSet = [BinaryCombinatorRule(ForwardSubstitution),
BinaryCombinatorRule(BackwardSx)]
TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()]
# The standard English rule set.
DefaultRuleSet = ApplicationRuleSet + CompositionRuleSet + \
SubstitutionRuleSet + TypeRaiseRuleSet
class CCGChartParser(ParserI):
'''
Chart parser for CCGs.
Based largely on the ChartParser class from NLTK.
'''
def __init__(self, lexicon, rules, trace=0):
self._lexicon = lexicon
self._rules = rules
self._trace = trace
def lexicon(self):
return self._lexicon
# Implements the CYK algorithm
def parse(self, tokens):
tokens = list(tokens)
chart = CCGChart(list(tokens))
lex = self._lexicon
# Initialize leaf edges.
for index in range(chart.num_leaves()):
for token in lex.categories(chart.leaf(index)):
new_edge = CCGLeafEdge(index, token, chart.leaf(index))
chart.insert(new_edge, ())
# Select a span for the new edges
for span in range(2,chart.num_leaves()+1):
for start in range(0,chart.num_leaves()-span+1):
# Try all possible pairs of edges that could generate
# an edge for that span
for part in range(1,span):
lstart = start
mid = start + part
rend = start + span
for left in chart.select(span=(lstart,mid)):
for right in chart.select(span=(mid,rend)):
# Generate all possible combinations of the two edges
for rule in self._rules:
edges_added_by_rule = 0
for newedge in rule.apply(chart,lex,left,right):
edges_added_by_rule += 1
# Output the resulting parses
return chart.parses(lex.start())
class CCGChart(Chart):
def __init__(self, tokens):
Chart.__init__(self, tokens)
# Constructs the trees for a given parse. Unfortnunately, the parse trees need to be
# constructed slightly differently to those in the default Chart class, so it has to
# be reimplemented
def _trees(self, edge, complete, memo, tree_class):
assert complete, "CCGChart cannot build incomplete trees"
if edge in memo:
return memo[edge]
if isinstance(edge,CCGLeafEdge):
word = tree_class(edge.token(), [self._tokens[edge.start()]])
leaf = tree_class((edge.token(), "Leaf"), [word])
memo[edge] = [leaf]
return [leaf]
memo[edge] = []
trees = []
for cpl in self.child_pointer_lists(edge):
child_choices = [self._trees(cp, complete, memo, tree_class)
for cp in cpl]
for children in itertools.product(*child_choices):
lhs = (Token(self._tokens[edge.start():edge.end()], edge.lhs(), compute_semantics(children, edge)), str(edge.rule()))
trees.append(tree_class(lhs, children))
memo[edge] = trees
return trees
def compute_semantics(children, edge):
if children[0].label()[0].semantics() is None:
return None
if len(children) is 2:
if isinstance(edge.rule(), BackwardCombinator):
children = [children[1],children[0]]
combinator = edge.rule()._combinator
function = children[0].label()[0].semantics()
argument = children[1].label()[0].semantics()
if isinstance(combinator, UndirectedFunctionApplication):
return compute_function_semantics(function, argument)
elif isinstance(combinator, UndirectedComposition):
return compute_composition_semantics(function, argument)
elif isinstance(combinator, UndirectedSubstitution):
return compute_substitution_semantics(function, argument)
else:
raise AssertionError('Unsupported combinator \'' + combinator + '\'')
else:
return compute_type_raised_semantics(children[0].label()[0].semantics())
#--------
# Displaying derivations
#--------
def printCCGDerivation(tree):
# Get the leaves and initial categories
leafcats = tree.pos()
leafstr = ''
catstr = ''
# Construct a string with both the leaf word and corresponding
# category aligned.
for (leaf, cat) in leafcats:
str_cat = "%s" % cat
nextlen = 2 + max(len(leaf), len(str_cat))
lcatlen = (nextlen - len(str_cat)) // 2
rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
catstr += ' '*lcatlen + str_cat + ' '*rcatlen
lleaflen = (nextlen - len(leaf)) // 2
rleaflen = lleaflen + (nextlen - len(leaf)) % 2
leafstr += ' '*lleaflen + leaf + ' '*rleaflen
print(leafstr.rstrip())
print(catstr.rstrip())
# Display the derivation steps
printCCGTree(0,tree)
# Prints the sequence of derivation steps.
def printCCGTree(lwidth,tree):
rwidth = lwidth
# Is a leaf (word).
# Increment the span by the space occupied by the leaf.
if not isinstance(tree, Tree):
return 2 + lwidth + len(tree)
# Find the width of the current derivation step
for child in tree:
rwidth = max(rwidth, printCCGTree(rwidth,child))
# Is a leaf node.
# Don't print anything, but account for the space occupied.
if not isinstance(tree.label(), tuple):
return max(rwidth,2 + lwidth + len("%s" % tree.label()),
2 + lwidth + len(tree[0]))
(token, op) = tree.label()
if op == 'Leaf':
return rwidth
# Pad to the left with spaces, followed by a sequence of '-'
# and the derivation rule.
print(lwidth*' ' + (rwidth-lwidth)*'-' + "%s" % op)
# Print the resulting category on a new line.
str_res = "%s" % (token.categ())
if token.semantics() is not None:
str_res += " {" + str(token.semantics()) + "}"
respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth
print(respadlen*' ' + str_res)
return rwidth
### Demonstration code
# Construct the lexicon
lex = fromstring('''
:- S, NP, N, VP # Primitive categories, S is the target primitive
Det :: NP/N # Family of words
Pro :: NP
TV :: VP/NP
Modal :: (S\\NP)/VP # Backslashes need to be escaped
I => Pro # Word -> Category mapping
you => Pro
the => Det
# Variables have the special keyword 'var'
# '.' prevents permutation
# ',' prevents composition
and => var\\.,var/.,var
which => (N\\N)/(S/NP)
will => Modal # Categories can be either explicit, or families.
might => Modal
cook => TV
eat => TV
mushrooms => N
parsnips => N
bacon => N
''')
def demo():
parser = CCGChartParser(lex, DefaultRuleSet)
for parse in parser.parse("I might cook and eat the bacon".split()):
printCCGDerivation(parse)
if __name__ == '__main__':
demo()
|
|
import pygatt
import time
import json
import redis
import uuid
import requests
import pexpect
import api
from discovery import discover
from multiprocessing import Process, Queue
r = redis.StrictRedis(host="localhost", port=6379, db=0)
def start():
workers = {}
devices = discover(2)
for mac_address in devices:
if mac_address not in workers\
or not workers[mac_address]["process"].is_alive():
workers[mac_address] = {}
workers[mac_address]["queue"] = Queue()
workers[mac_address]["process"] = Process(target=device_worker,
args=(workers[mac_address]["queue"], mac_address))
workers[mac_address]["process"].start()
# def communication_loop():
# workers = {}
# while True:
# devices = discover(2)
# for mac_address in devices:
# if mac_address not in workers\
# or not workers[mac_address]["process"].is_alive():
# workers[mac_address] = {}
# workers[mac_address]["queue"] = Queue()
# workers[mac_address]["process"] = Process(target=device_worker,
# args=(workers[mac_address]["queue"], mac_address))
# workers[mac_address]["process"].start()
# time.sleep(30)
# # Kill all workers before rescanning
# any_alive = True
# while any_alive:
# any_alive = False
# for mac_address in workers:
# if workers[mac_address]["process"].is_alive():
# workers[mac_address]["queue"].put("stop")
# any_alive = True
# Need to make out own connect method, library one doesn't pass MAC address to
# connect command which causes connection to fail
def bluetooth_connect(dev, mac_address, timeout):
"""Connect to the device."""
dev._logger.info('Connecting with timeout=%s', timeout)
try:
with dev.connection_lock:
dev.con.sendline('connect {}'.format(mac_address))
dev.con.expect(r'Connection successful.*\[LE\]>', timeout)
except pexpect.TIMEOUT:
raise pygatt.exceptions.BluetoothLEError(
"Timed-out connecting to device after %s seconds." % timeout)
def handle_device_message(dev, mac_address, message):
# This is a bodge to ignore the random crap spewed out by the Arduino on
# boot - Need to have proper messages coming from the device
print("Received message from {}: {}".format(mac_address, message))
message_parts = message.split(",")
if (message_parts[0] == "card_scan"):
handle_card_scan(dev, mac_address, message[1])
elif (message_parts[0] == "request_bill"):
request_bill(dev, mac_address)
elif (message_parts[0] == "call_waiter"):
call_waiter(dev, mac_address)
elif (message_parts[0] == "leave_table"):
leave_table(dev, mac_address)
def handle_card_scan(dev, mac_address, card_id):
table_id = api.get_table_id(mac_address)
customer_id = api.get_customer_id(card_id)
table_available = api.table_available(table_id, customer_id)
if customer_id and table_available:
# If the table already has an occupancy, update it to set as occupied
# if not create a new one
occupancy = api.get_occupancy(table_id)
if occupancy:
api.set_occupied(table_id, occupancy)
else:
api.create_new_occupancy(table_id, customer_id)
response = b"access1\x00" if table_available and customer_id\
else b"access0\x00"
dev.char_write(0x0012, bytearray(response))
def request_bill(dev, mac_address):
table_id = api.get_table_id(mac_address)
api.request_bill(table_id)
dev.char_write(0x0012, bytearray(b"ok\x00"))
def call_waiter(dev, mac_address):
table_id = api.get_table_id(mac_address)
api.call_waiter(table_id)
dev.char_write(0x0012, bytearray(b"ok\x00"))
def leave_table(dev, mac_address):
table_id = api.get_table_id(mac_address)
api.leave_table(table_id)
dev.char_write(0x0012, bytearray(b"ok\x00"))
def get_order_status(dev, mac_address):
table_id = api.get_table_id(mac_address)
order_status = api.get_order_status(table_id)
dev.char_write(0x0012, bytearray(order_status.encode("utf-8") + "\x00"))
def device_worker(queue, mac_address):
try:
dev = pygatt.pygatt.BluetoothLEDevice(mac_address,
app_options="-t random")
bluetooth_connect(dev, mac_address, 5)
def callback(_, message):
try:
handle_device_message(dev, mac_address,
message.decode("utf-8").strip())
except UnicodeDecodeError:
print("Could not understand message from device")
dev.subscribe("0000ffe1-0000-1000-8000-00805f9b34fb", callback)
# Do the same as the library's run method but make it be
# possible to stop!
while dev.running:
# This allowes the parent to stop the process
if not queue.empty():
queue_entry = queue.get()
if queue_entry == "stop":
print("Killing worker")
return
with dev.connection_lock:
try:
dev._expect("fooooooo", timeout=.1)
except pygatt.exceptions.BluetoothLEError:
pass
# TODO need some delay to avoid aggresively grabbing the lock,
# blocking out the others. worst case is 1 second delay for async
# not received as a part of another request
time.sleep(.001)
# while True:
# print("working")
# time.sleep(0.1)
# # Atomically get and delete the latest messages for this device
# pipe = r.pipeline()
# messages = pipe.get(mac_address)
# pipe.delete(mac_address)
# messages, _ = pipe.execute()
#
# if messages:
# # Remove trailing comma, wrap in [] then decode as JSON
# messages = json.loads("[{}]".format(messages[:-1]))
# for message in messages:
# if "message" in message and \
# type(message["message"]) == dict:
# process_message(dev, message["message"])
# # Read data from device and make POST requests as required
# value = dev.char_read_hnd(0x0e)
# # If button is pressed,send request
# if value == [0x01]:
# requests.post("http://burger.bleepr.io/buttons/{}".format(
# uuid.uuid4()))
# dev.char_write(0x0e, bytearray([0x00]))
except pygatt.exceptions.BluetoothLEError as ex:
print("Bluetooth error ({}), killing worker for {}".format(str(ex),
mac_address))
return
def process_message(dev, message):
print(message)
displaytext = message["button"] + "\x00"
dev.char_write(0x0012, bytearray(displaytext.encode("UTF-8")))
# if message["button"] == "ledon":
# dev.char_write(0x0e, bytearray([0x00]))
# elif message["button"] == "ledoff":
# dev.char_write(0x0e, bytearray([0x01]))
# try:
# dev = pygatt.pygatt.BluetoothLEDevice(mac_address,
# app_options="-t random")
# dev.connect()
# while True:
# led_status = dev.char_read_hnd(0x0e)
# if led_status[0] == 0x00:
# dev.char_write(0x0e, bytearray([0x01]))
# elif led_status[0] == 0x01:
# dev.char_write(0x0e, bytearray([0x00]))
#
# # This allowes the parent to stop the process
# if not queue.empty():
# queue_entry = queue.get()
# if queue_entry == "stop":
# print("Killing worker")
# return
# except pygatt.exceptions.BluetoothLEError:
# print("Bluetooth error, killing worker for {}".format(mac_address))
# return
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# palette.py
# palette_detect
#
"""
Detect the main colors used in an image.
"""
import colorsys
import matplotlib.colors as colors
import multiprocessing
import sys
from PIL import Image, ImageChops, ImageDraw
from collections import Counter, namedtuple
from colormath.color_objects import RGBColor
from operator import itemgetter, mul, attrgetter
import numpy as np
import scipy
from scipy.misc import imread
import config
Color = namedtuple('Color', ['value', 'prominence'])
Palette = namedtuple('Palette', 'colors bgcolor')
Histogram = namedtuple('Histogram', 'colors bgcolor')
def color_stream_st(istream=sys.stdin, save_palette=False, **kwargs):
"""
Read filenames from the input stream and detect their palette.
"""
for line in istream:
filename = line.strip()
try:
palette = extract_colors(filename, **kwargs)
except Exception, e:
print >> sys.stderr, filename, e
continue
print_colors(filename, palette)
if save_palette:
save_palette_as_image(filename, palette)
def color_stream_mt(istream=sys.stdin, n=config.N_PROCESSES, **kwargs):
"""
Read filenames from the input stream and detect their palette using
multiple processes.
"""
queue = multiprocessing.Queue(1000)
lock = multiprocessing.Lock()
pool = [multiprocessing.Process(target=color_process, args=(queue, lock),
kwargs=kwargs) for i in xrange(n)]
for p in pool:
p.start()
block = []
for line in istream:
block.append(line.strip())
if len(block) == config.BLOCK_SIZE:
queue.put(block)
block = []
if block:
queue.put(block)
for i in xrange(n):
queue.put(config.SENTINEL)
for p in pool:
p.join()
def color_process(queue, lock):
"Receive filenames and get the colors from their images."
while True:
block = queue.get()
if block == config.SENTINEL:
break
for filename in block:
try:
palette = extract_colors(filename)
except: # TODO: it's too broad exception.
continue
lock.acquire()
try:
print_colors(filename, palette)
finally:
lock.release()
def distance(c1, c2):
"""
Calculate the visual distance between the two colors.
"""
return RGBColor(*c1).delta_e(RGBColor(*c2), method='cmc')
def rgb_to_hex(color):
return '#%.02x%.02x%.02x' % color
def hex_to_rgb(color):
assert color.startswith('#') and len(color) == 7
return int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)
def extract_colors(
filename_or_img, min_saturation=config.MIN_SATURATION,
min_distance=config.MIN_DISTANCE, max_colors=config.MAX_COLORS,
min_prominence=config.MIN_PROMINENCE, n_quantized=config.N_QUANTIZED, hist=False):
"""
Determine what the major colors are in the given image.
"""
if Image.isImageType(filename_or_img):
im = filename_or_img
else:
im = Image.open(filename_or_img)
# get point color count
if im.mode != 'RGB':
im = im.convert('RGB')
im = autocrop(im, config.WHITE) # assume white box
im = im.convert(
'P', palette=Image.ADAPTIVE, colors=n_quantized).convert('RGB')
data = im.getdata()
dist = Counter(data)
n_pixels = mul(*im.size)
# aggregate colors
to_canonical = {config.WHITE: config.WHITE, config.BLACK: config.BLACK}
aggregated = Counter({config.WHITE: 0, config.BLACK: 0})
sorted_cols = sorted(dist.iteritems(), key=itemgetter(1), reverse=True)
for c, n in sorted_cols:
if c in aggregated:
# exact match!
aggregated[c] += n
else:
d, nearest = min((distance(c, alt), alt) for alt in aggregated)
if d < min_distance:
# nearby match
aggregated[nearest] += n
to_canonical[c] = nearest
else:
# no nearby match
aggregated[c] = n
to_canonical[c] = c
# order by prominence
colors = sorted(
[Color(c, n / float(n_pixels)) for c, n in aggregated.iteritems()],
key=attrgetter('prominence'), reverse=True)
colors, bg_color = detect_background(im, colors, to_canonical)
# keep any color which meets the minimum saturation
sat_colors = [c for c in colors if meets_min_saturation(c, min_saturation)]
if bg_color and not meets_min_saturation(bg_color, min_saturation):
bg_color = None
if sat_colors:
colors = sat_colors
else:
# keep at least one color
colors = colors[:1]
# keep any color within 10% of the majority color
color_list = []
histogram = []
color_count = 0
for color in colors:
if (color.prominence >= colors[0].prominence * min_prominence and color_count < max_colors):
color_list.append(color)
color_count += 1
if (color_count >= max_colors and hist == False):
break
elif hist == True:
histogram.append(color)
if len(histogram)<5:
result = Palette(color_list, bg_color)
else:
result = Palette(color_list, bg_color),Histogram(histogram, bg_color)
return result
def hist(filename_or_img):
if Image.isImageType(filename_or_img):
im = filename_or_img
else:
im = Image.open(filename_or_img)
hist = np.array(im.histogram())
print hist.shape
if hist.shape[0] > 257:
R,G,B = np.split(hist,3)
result = [R,G,B]
else:
result = hist
return result
def hist_hsv(filename_or_img):
#img = scipy.misc.imread(filename_or_img)
if Image.isImageType(filename_or_img):
img = filename_or_img
else:
img = Image.open(filename_or_img)
array = np.asarray(img)
arr = (array.astype(float))/255.0
img_hsv = colors.rgb_to_hsv(arr[...,:3])
h = np.histogram(img_hsv[..., 0].flatten() * 255, bins=256, range=(0.0, 255.0))[0]
s = np.histogram(img_hsv[..., 1].flatten() * 255, bins=256, range=(0.0, 255.0))[0]
v = np.histogram(img_hsv[..., 2].flatten() * 255, bins=256, range=(0.0, 255.0))[0]
print 'H'
print h.shape
hsv = np.array((h,s,v))
print 'hsv'
print hsv.shape
return hsv
def hist_2d():
#see http://opencvpython.blogspot.dk/2013/03/histograms-3-2d-histograms.html
import cv2
img = cv2.imread('home.jpg')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hist = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
plt.imshow(hist,interpolation = 'nearest')
plt.show()
def norm_color(c):
r, g, b = c
return r / 255.0, g / 255.0, b / 255.0
def detect_background(im, colors, to_canonical):
# more then half the image means background
if colors[0].prominence >= config.BACKGROUND_PROMINENCE:
return colors[1:], colors[0]
# work out the background color
w, h = im.size
points = [
(0, 0), (0, h / 2), (0, h - 1), (w / 2, h - 1), (w - 1, h - 1),
(w - 1, h / 2), (w - 1, 0), (w / 2, 0)]
edge_dist = Counter(im.getpixel(p) for p in points)
(majority_col, majority_count), = edge_dist.most_common(1)
if majority_count >= 3:
# we have a background color
canonical_bg = to_canonical[majority_col]
bg_color, = [c for c in colors if c.value == canonical_bg]
colors = [c for c in colors if c.value != canonical_bg]
else:
# no background color
bg_color = None
return colors, bg_color
def print_colors(filename, palette):
colors = '%s\t%s\t%s' % (
filename, ','.join(rgb_to_hex(c.value) for c in palette.colors),
palette.bgcolor and rgb_to_hex(palette.bgcolor.value) or '')
print(colors)
sys.stdout.flush()
def save_palette_as_image(filename, palette):
"Save palette as a PNG with labeled, colored blocks"
output_filename = '%s_palette.png' % filename[:filename.rfind('.')]
size = (80 * len(palette.colors), 80)
im = Image.new('RGB', size)
draw = ImageDraw.Draw(im)
for i, c in enumerate(palette.colors):
v = colorsys.rgb_to_hsv(*norm_color(c.value))[2]
(x1, y1) = (i * 80, 0)
(x2, y2) = ((i + 1) * 80 - 1, 79)
draw.rectangle([(x1, y1), (x2, y2)], fill=c.value)
if v < 0.6:
# white with shadow
draw.text((x1 + 4, y1 + 4), rgb_to_hex(c.value), (90, 90, 90))
draw.text((x1 + 3, y1 + 3), rgb_to_hex(c.value))
else:
# dark with bright "shadow"
draw.text((x1 + 4, y1 + 4), rgb_to_hex(c.value), (230, 230, 230))
draw.text((x1 + 3, y1 + 3), rgb_to_hex(c.value), (0, 0, 0))
im.save(output_filename, "PNG")
def save_size_palette_as_image(filename, palette):
"Save palette as a PNG with labeled, colored blocks"
output_filename = '%s_palette.png' % filename[:filename.rfind('.')]
sizes = [i.prominence*2000.0 for i in palette.colors]
x_size = np.sum(sizes)
y_size = np.max(sizes)
size = (int(x_size), int(y_size))
im = Image.new('RGBA', size)
draw = ImageDraw.Draw(im)
x_pos = 0.0
for i, c in enumerate(palette.colors):
v = colorsys.rgb_to_hsv(*norm_color(c.value))[2]
(x1, y1) = (x_pos+1, y_size)
(x2, y2) = (x_pos+sizes[i], y_size-sizes[i])
x_pos = x_pos + sizes[i]
draw.rectangle([(x1, y1), (x2, y2)], fill=c.value)
im.save(output_filename, "PNG")
def meets_min_saturation(c, threshold):
return colorsys.rgb_to_hsv(*norm_color(c.value))[1] > threshold
def autocrop(im, bgcolor):
"Crop away a border of the given background color."
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return im # no contents, don't crop to nothing
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import mxnet as mx
import mxnet.lr_scheduler as lr_scheduler
import unittest
from nose.tools import raises
import math
from mxnet.test_utils import *
def test_learning_rate():
o1 = mx.optimizer.Optimizer(learning_rate=0.01)
o1.set_learning_rate(0.2)
assert o1.learning_rate == 0.2
lr_s = lr_scheduler.FactorScheduler(step=1)
o2 = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
assert o2.learning_rate == 0.3
o2.lr_scheduler.base_lr = 0.4
assert o2.learning_rate == 0.4
@raises(UserWarning)
def test_learning_rate_expect_user_warning():
lr_s = lr_scheduler.FactorScheduler(step=1)
o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
o.set_learning_rate(0.5)
def test_lr_wd_mult():
data = mx.sym.Variable('data')
bias = mx.sym.Variable('fc1_bias', lr_mult=1.0)
fc1 = mx.sym.FullyConnected(data=data, bias=bias, name='fc1', num_hidden=10, lr_mult=0)
fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5)
mod = mx.mod.Module(symbol=fc2, label_names=None, context=default_context())
mod.bind(data_shapes=[('data', (5,10))])
mod.init_params(initializer=mx.init.Uniform(1.0))
mod.init_optimizer(optimizer_params={'learning_rate': 1.0})
args1, _ = mod.get_params()
args1 = {k: v.asnumpy() for k, v in args1.items()}
mod.forward(mx.io.DataBatch(data=[mx.random.uniform(low=-1.0, high=1.0, shape=(5,10))], label=None), is_train=True)
mod.backward(mod.get_outputs())
mod.update()
args2, _ = mod.get_params()
args2 = {k: v.asnumpy() for k, v in args2.items()}
assert mod._optimizer.lr_mult == {'fc1_bias': 1.0, 'fc1_weight': 0.0}
assert mod._optimizer.wd_mult == {'fc2_bias': 0.5, 'fc2_weight': 0.5, 'fc1_bias': 0.0}
assert mx.test_utils.almost_equal(args1['fc1_weight'], args2['fc1_weight'], 1e-10)
assert not mx.test_utils.almost_equal(args1['fc1_bias'], args2['fc1_bias'], 1e-1)
assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1)
def compare_ndarray_tuple(t1, t2, rtol=None, atol=None):
if t1 is not None and t2 is not None:
if isinstance(t1, tuple):
for s1, s2 in zip(t1, t2):
compare_ndarray_tuple(s1, s2, rtol, atol)
else:
assert_almost_equal(t1.asnumpy(), t2.asnumpy(), rtol=rtol, atol=atol)
def compare_optimizer(opt1, opt2, shape, dtype, w_stype='default', g_stype='default'):
if w_stype == 'default':
w2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
w1 = w2.copyto(default_context())
elif w_stype == 'row_sparse' or w_stype == 'csr':
w2 = rand_ndarray(shape, w_stype, density=1, dtype=dtype)
w1 = w2.copyto(default_context()).tostype('default')
else:
raise Exception("type not supported yet")
if g_stype == 'default':
g2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
g1 = g2.copyto(default_context())
elif g_stype == 'row_sparse' or g_stype == 'csr':
g2 = rand_ndarray(shape, g_stype, dtype=dtype)
g1 = g2.copyto(default_context()).tostype('default')
else:
raise Exception("type not supported yet")
state1 = opt1.create_state_multi_precision(0, w1)
state2 = opt2.create_state_multi_precision(0, w2)
compare_ndarray_tuple(state1, state2)
opt1.update_multi_precision(0, w1, g1, state1)
opt2.update_multi_precision(0, w2, g2, state2)
compare_ndarray_tuple(state1, state2, rtol=1e-4, atol=1e-5)
assert_almost_equal(w1.asnumpy(), w2.asnumpy(), rtol=1e-4, atol=1e-5)
# SGD
class PySGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.01, momentum=0.0, multi_precision=False, **kwargs):
super(PySGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
self.multi_precision = multi_precision
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
momentum = None
weight_master_copy = None
do_multi_precision = self.multi_precision and weight.dtype == np.float16
if do_multi_precision:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)
weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)
return (momentum, weight_master_copy)
else:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def create_state_multi_precision(self, index, weight):
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
use_multi_precision = isinstance(state, list) or isinstance(state, tuple)
if not use_multi_precision:
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight[:] = ((1 - lr*wd)*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight[:] = (1 - lr*wd)*weight - lr*self.rescale_grad*grad
else:
mom = state
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight - lr*self.rescale_grad*grad
weight += mom
else:
grad32 = array(grad, ctx=grad.context, dtype=np.float32)
mom = state[0]
weight32 = state[1]
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight32[:] = ((1 - lr*wd)*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight32[:] = (1 - lr*wd)*weight32 - lr*self.rescale_grad*grad32
else:
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight32 += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight32 - lr*self.rescale_grad*grad32
weight32 += mom
tmp = weight32.astype(weight.dtype)
tmp.copyto(weight)
def update_multi_precision(self, index, weight, grad, state):
self.update(index, weight, grad, state)
def test_sgd():
mx.random.seed(0)
opt1 = PySGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
# test operator fallback on cpu
if (default_context() == mx.cpu()):
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
g_stype='row_sparse')
if dtype != np.float16:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape[:2],
dtype, w_stype='csr', g_stype='csr')
class PySparseSGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.01, momentum=0.0, **kwargs):
super(PySparseSGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
if self.momentum == 0.0:
return None
else:
return mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
num_rows = weight.shape[0]
if self.momentum == 0.0:
# Update on a per row basis, skip all-zero rows
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
if self.clip_gradient is not None:
weight[row] = ((1 - lr*wd)*weight[row] -
lr*mx.nd.clip(grad[row]*self.rescale_grad,
-self.clip_gradient, self.clip_gradient))
else:
weight[row] = (1 - lr*wd)*weight[row] - lr*self.rescale_grad*grad[row]
else:
mom = state
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
if self.clip_gradient is not None:
mom[row] = (self.momentum*mom[row] - lr*wd*weight[row] -
lr*mx.nd.clip(grad[row]*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight[row] += mom[row]
else:
mom[row] = self.momentum*mom[row] - lr*wd*weight[row] - lr*self.rescale_grad*grad[row]
weight[row] += mom[row]
def test_sparse_sgd():
mx.random.seed(0)
opt1 = PySparseSGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float32]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,
w_stype='row_sparse', g_stype='row_sparse')
# ADAM
class PyAdam(mx.optimizer.Optimizer):
"""python reference implemenation of adam"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
decay_factor=(1 - 1e-8), sparse_update=False, **kwargs):
super(PyAdam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.decay_factor = decay_factor
self.sparse_update = sparse_update
def create_state(self, index, weight):
"""Create additional optimizer state: mean, variance
Parameters
----------
weight : NDArray
The weight data
"""
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
self._update_count(index)
t = self._index_update_count[index]
mean, variance = state
wd = self._get_wd(index)
num_rows = weight.shape[0]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
for row in range(num_rows):
# check row slices of all zeros
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
# skip zeros during sparse update
if all_zeros and self.sparse_update:
continue
grad[row] = grad[row] * self.rescale_grad + wd * weight[row]
# clip gradients
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
# update mean
mean[row] *= self.beta1
mean[row] += grad[row] * (1. - self.beta1)
# update variance
variance[row] *= self.beta2
variance[row] += (1 - self.beta2) * mx.nd.square(grad[row], out=grad[row])
# update weight
weight[row] -= lr*mean[row]/(mx.nd.sqrt(variance[row]) + self.epsilon)
def test_adam():
mx.random.seed(0)
opt1 = PyAdam
opt2 = mx.optimizer.Adam
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
if (default_context() == mx.cpu()):
compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,
dtype, w_stype='row_sparse', g_stype='row_sparse')
# RMSProp
class PyRMSProp(mx.optimizer.Optimizer):
"""RMSProp optimizer of Tieleman & Hinton, 2012,
For centered=False, the code follows the version in
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012
For centered=True, the code follows the version in
http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.
Parameters
----------
learning_rate : float, optional
Step size.
Default value is set to 0.001.
gamma1: float, optional
decay factor of moving average for gradient, gradient^2.
Default value is set to 0.9.
gamma2: float, optional
"momentum" factor.
Default value if set to 0.9.
Only used if centered=True
epsilon : float, optional
Default value is set to 1e-8.
centered : boolean, optional
Use Graves or Tielemans & Hintons version of RMSProp
wd : float, optional
L2 regularization coefficient add to all the weights
rescale_grad : float, optional
rescaling factor of gradient.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
clip_weights : float, optional
clip weights in range [-clip_weights, clip_weights]
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(PyRMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.centered = centered
self.gamma1 = gamma1
self.gamma2 = gamma2
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
"""Create additional optimizer state.
For centered=False: n
For centered=True: n, g, delta
Parameters
----------
weight : NDArray
The weight data
"""
if self.centered:
return (mx.nd.zeros(weight.shape, weight.context), # n
mx.nd.zeros(weight.shape, weight.context), # g
mx.nd.zeros(weight.shape, weight.context)) # delta
else:
return (mx.nd.zeros(weight.shape, weight.context), ) # n
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad + wd * weight
if not self.centered:
(n, ) = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
weight[:] -= lr * grad/(mx.nd.sqrt(n + self.epsilon))
else:
n, g, delta = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
g[:] = (1 - self.gamma1) * grad + self.gamma1 * g
delta[:] = (self.gamma2) * delta - lr * grad/(mx.nd.sqrt(n - g*g + self.epsilon))
weight[:] += delta
if self.clip_weights:
mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, out=weight)
@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8230")
def test_rms():
mx.random.seed(0)
opt1 = PyRMSProp
opt2 = mx.optimizer.RMSProp
shape = (3, 4, 5)
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
cw_options = [{}, {'clip_weights': 0.01}]
center_options = [{}, {'centered': False}, {'centered': True}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32]:
for cw_option in cw_options:
for cg_option in cg_options:
for center_option in center_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(cw_option)
kwarg.update(cg_option)
kwarg.update(center_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
if (default_context() == mx.cpu()):
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, g_stype='row_sparse')
class PyFtrl(mx.optimizer.Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^t}}
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, sparse_update=False, **kwargs):
super(PyFtrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
self.sparse_update = sparse_update
def create_state(self, index, weight):
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # dn
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # n
def update(self, index, weight, grad, state):
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
num_rows = weight.shape[0]
dn, n = state
for row in range(num_rows):
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
if all_zeros and self.sparse_update:
continue
grad[row] = grad[row] * self.rescale_grad
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
#update dn, n
dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr
n[row] += grad[row] * grad[row]
# update weight
weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \
((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1)
def test_ftrl():
mx.random.seed(0)
opt1 = PyFtrl
opt2 = mx.optimizer.Ftrl
shape = (3, 4, 5)
kwargs = [{},
{'clip_gradient': 0.5},
{'clip_gradient': 0.4, 'rescale_grad': 0.14},
{'rescale_grad': 0.8},
{'clip_gradient': 0.5, 'wd': 0.07},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},
{'rescale_grad': 0.8, 'wd': 0.05},
{'rescale_grad': 0.8, 'wd': 0.05, 'lamda1': 0.01},
{'clip_gradient': 0.5, 'wd': 0.07, 'lamda1': 1.0}]
for kwarg in kwargs:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)
compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,
np.float32, w_stype='row_sparse', g_stype='row_sparse')
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# -*- coding: utf-8 -*-
"""
Created on Feb 20, 2014
@author: Aaron Ponti
"""
import re
from ch.systemsx.cisd.openbis.dss.etl.dto.api import SimpleImageDataConfig
from ch.systemsx.cisd.openbis.dss.etl.dto.api import SimpleImageContainerDataConfig
from ch.systemsx.cisd.openbis.dss.etl.dto.api.impl import MaximumIntensityProjectionGenerationAlgorithm
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ChannelColor
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ImageIdentifier
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ImageMetadata
from ch.systemsx.cisd.openbis.dss.etl.dto.api import OriginalDataStorageFormat
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ChannelColorRGB
from ch.systemsx.cisd.openbis.dss.etl.dto.api import Channel
import xml.etree.ElementTree as ET
from GlobalSettings import GlobalSettings
class MicroscopySingleDatasetConfig(SimpleImageContainerDataConfig):
"""Image data configuration class for single image files (with
optional multiple series)."""
_DEBUG = False
# List of metadata attributes obtained either from the settings XML
# file generated by the Annotation Tool or returned by
# BioFormatsProcessor.getMetadata(asXML=False)
# (for all series in the file, sorted by series).
_allSeriesMetadata = None
# Number of the series to register (for a multi-series dataset).
_seriesNum = 0
# Logger
_logger = None
def __init__(self, allSeriesMetadata, logger, seriesNum=0):
"""Constructor.
@param allSeriesMetadata: list of metadata attributes generated either
by the Annotation Tool and parsed from the
settings XML file, or from BioFormatsProcessor
and returned via:
BioFormatsProcessor.getMetadataXML(asXML=False)
@param seriesNum: Int Number of the series to register. All
other series in the file will be ignored.
Set to -1 to register all series to the
same dataset.
@param logger: logger object
"""
# Store the logger
self._logger = logger
# Store the series metadata
self._allSeriesMetadata = allSeriesMetadata
# Store the series number
self._seriesNum = seriesNum
# This is microscopy data
self.setMicroscopyData(True)
# Store raw data in original form
self.setOriginalDataStorageFormat(OriginalDataStorageFormat.UNCHANGED)
# Set the image library
self.setImageLibrary("BioFormats")
# Disable thumbnail generation by ImageMagick
self.setUseImageMagicToGenerateThumbnails(False)
# Specify resolution of image representations explicitly
resolutions = GlobalSettings.ImageResolutions
if not resolutions:
self._logger.info("Skipping thumbnails generation.")
self.setGenerateThumbnails(False)
else:
self._logger.info("Creating thumbnails at resolutions: " + str(resolutions))
self.setGenerateImageRepresentationsUsingImageResolutions(resolutions)
self.setGenerateThumbnails(True)
# Set the recognized extensions to match those in the Annotation Tool
self.setRecognizedImageExtensions([\
"czi", "dv", "ics", "ids", "ims", "lei", "lif",
"liff", "lsm", "nd", "nd2", "oib", "oif", "ome",
"r3d", "stk", "tif", "tiff", "zvi"])
# Set the dataset type
self.setDataSetType("MICROSCOPY_IMG")
# Create representative image (MIP) for series 0 only
if int(seriesNum) == 0:
self.setImageGenerationAlgorithm(
MaximumIntensityProjectionGenerationAlgorithm(
"MICROSCOPY_IMG_THUMBNAIL", 256, 256, "thumbnail.png"))
def createChannel(self, channelCode):
"""Create a channel from the channelCode with the name as read from
the file via the MetadataReader and the color (RGB) as read.
@param channelCode Code of the channel as generated by extractImagesMetadata().
"""
# Get the indices of series and channel from the channel code
(seriesIndx, channelIndx) = self._getSeriesAndChannelNumbers(channelCode)
if self._seriesNum != -1 and seriesIndx != self._seriesNum:
return
# Get the channel name
name = self._getChannelName(seriesIndx, channelIndx)
# Get the channel color (RGB)
colorRGB = self._getChannelColor(seriesIndx, channelIndx)
# Log
if self._DEBUG:
self._logger.info("MICROSCOPYSINGLEDATASETCONFIG::createChannel(): " +
"channel (s = " + str(seriesIndx) + ", c = " +
str(channelIndx) + ") has code " + channelCode +
", color (" + str(colorRGB) + " and name " + name)
# Return the channel with given name and color (the code is set to
# be the same as the channel name).
return Channel(channelCode, name, colorRGB)
def extractImagesMetadata(self, imagePath, imageIdentifiers):
"""Overrides extractImagesMetadata method making sure to store
both series and channel indices in the channel code to be reused
later to extract color information and other metadata.
The channel code is in the form SERIES-(\d+)_CHANNEL-(\d+).
Only metadata for the relevant series number is returned!
@param imagePath Full path to the file to process
@param imageIdentifiers Array of ImageIdentifier's
@see constructor.
"""
# Initialize array of metadata entries
metaData = []
# Iterate over all image identifiers
for id in imageIdentifiers:
# Extract the info from the image identifier
ch = int(id.colorChannelIndex)
plane = int(id.focalPlaneIndex)
series = int(id.seriesIndex)
timepoint = int(id.timeSeriesIndex)
# Make sure to process only the relevant series
if self._seriesNum != -1 and series != self._seriesNum:
continue
# Build the channel code
channelCode = "SERIES-" + str(series) + "_CHANNEL-" + str(ch)
# Initialize a new ImageMetadata object
imageMetadata = ImageMetadata();
# Fill in all information
imageMetadata.imageIdentifier = id
imageMetadata.seriesNumber = series
imageMetadata.timepoint = timepoint
imageMetadata.depth = plane
imageMetadata.channelCode = channelCode
imageMetadata.tileNumber = 1 # + self._seriesNum
imageMetadata.well = "IGNORED"
# Append metadata for current image
metaData.append(imageMetadata)
# Log image geometry information
if self._DEBUG:
self._logger.info("MICROSCOPYSINGLEDATASETCONFIG::extractImagesMetadata(): " +
"Current image: series = " + str(series) +
" channel = " + str(ch) +
" plane = " + str(plane) +
" timepoint = " + str(timepoint) +
" channel code = " + str(channelCode))
# Now return the metaData array
return metaData
def _getChannelName(self, seriesIndx, channelIndx):
"""Returns the channel name (from the parsed metadata) for
a given channel in a given series."
"""
# Get the metadata for the requested series
metadata = self._allSeriesMetadata[seriesIndx]
# Try extracting the name for the given series and channel
try:
key = "channelName" + str(channelIndx)
name = metadata[key]
except KeyError:
err = "MICROSCOPYSINGLEDATASETCONFIG::createChannel(): " + \
"Could not create channel name for channel " + str(channelIndx) + \
" and series " + str(seriesIndx) + "for key = " + \
key + " from metadata = " + \
str(metadata)
self._logger.error(err)
raise(err)
# In case no name was found, assign default name
if name == "":
name = "No name"
return name
def _getChannelColor(self, seriesIndx, channelIndx):
"""Returns the channel color (from the parsed metadata) for
a given channel in a given series."
"""
# Get the metadata for the requested series
metadata = self._allSeriesMetadata[seriesIndx]
# Try extracting the color for the given series and channel
try:
color = metadata["channelColor" + str(channelIndx)]
except KeyError:
err = "MICROSCOPYSINGLEDATASETCONFIG::createChannel(): " + \
"Could not extract channel color for channel " + \
str(channelIndex) + " and series " + str(seriesIndx) + \
" from metadata."
self._logger.error(err)
raise(err)
# Try extracting the color for current channel
colorComponents = color.split(",")
assert(len(colorComponents) == 4)
try:
R = int(float(colorComponents[0]))
G = int(float(colorComponents[1]))
B = int(float(colorComponents[2]))
except:
err = "MICROSCOPYSINGLEDATASETCONFIG::createChannel(): " + \
"Could not extract color with index " + str(channelIndx)
self._logger.error(err)
raise(err)
# Create the ChannelColorRGB object
colorRGB = ChannelColorRGB(R, G, B)
return colorRGB
def _getSeriesAndChannelNumbers(self, channelCode):
"""Extract series and channel number from channel code in
the form SERIES-(\d+)_CHANNEL-(\d+) to a tuple
(seriesIndx, channelIndx).
@param channelCode Code of the channel as generated by extractImagesMetadata().
"""
# Get the indices of series and channel from the channel code
p = re.compile("SERIES-(\d+)_CHANNEL-(\d+)")
m = p.match(channelCode)
if m is None or len(m.groups()) != 2:
err = "MICROSCOPYSINGLEDATASETCONFIG::_getSeriesAndChannelNumbers(): " + \
"Could not extract series and channel number!"
self._logger.error(err)
raise Exception(err)
# Now assign the indices
seriesIndx = int(m.group(1))
channelIndx = int(m.group(2))
# Return them
return seriesIndx, channelIndx
|
|
from lxml import etree
from bioagents import add_agent_type, infer_agent_type
from indra.sources.trips import process_xml
from kqml import KQMLList
from indra.statements import RefContext, BioContext, Agent
class EKB(object):
def __init__(self, graph, term_node):
self.graph = graph
self.graph.draw('test.pdf')
self.root_term = term_node
self.ekb = None
self.type = None
self.components = []
self.stack = []
self._stack_history = []
self.build()
def _dump_stack_history(self):
ret = ''
for stack, pol, term in self._stack_history:
ret += ('+' if pol > 0 else '-') + term + ' = ' + str(stack) + '\n'
return ret
def _add_to_stack(self, term_id):
self.stack.append(term_id)
self._stack_history.append((self.stack[:], 1, term_id))
def _pop_stack(self, term_id):
stack_id = self.stack[-1]
assert term_id == stack_id, \
("Bad stack: %s\n removing id=%s but top of stack=%s.\n"
"history:\n%s"
% (self.stack, term_id, stack_id, self._dump_stack_history()))
self.stack.pop()
self._stack_history.append((self.stack[:], -1, term_id))
self.components.append(term_id)
def _is_new_id(self, id):
return id not in (self.components + self.stack)
def build(self):
self.ekb = etree.Element('ekb')
# Determine if the root term is a TERM or EVENT
root_node = self.graph.nodes[self.root_term]
if root_node['category'] == 'ONT::TERM' \
or root_node['type'] == 'ONT::SIGNALING':
self.term_to_ekb(self.root_term)
elif root_node['category'] == 'ONT::CC':
self.cc_to_ekb(self.root_term)
else:
self.generic_event_to_ekb(self.root_term)
def to_string(self):
ekb_str = etree.tounicode(self.ekb, pretty_print=True)
ekb_str = '<?xml version="1.0"?>' + ekb_str
return ekb_str
def set_cell_line_context_for_stmts(self, stmts):
cell_line_context = get_cell_line(self.ekb)
if cell_line_context:
set_cell_line_context(stmts, cell_line_context)
return get_cell_line(self.ekb)
def get_entity(self):
ekb_str = self.to_string()
# Now process the EKB using the TRIPS processor to extract Statements
tp = process_xml(ekb_str)
# If there are any statements then we can return the CL-JSON of those
if tp.statements:
self.set_cell_line_context_for_stmts(tp.statements)
res = tp.statements
# Otherwise, we try extracting an Agent and return that
else:
agent = tp._get_agent_by_id(self.root_term, None)
if agent is None:
return None
# Set the TRIPS ID in db_refs
agent.db_refs['TRIPS'] = 'ONT::' + self.root_term
# Fix some namings
if self.type.upper() in {'ONT::SIGNALING-PATHWAY',
'ONT::SIGNALING'}:
simple_name = agent.name.lower().replace('-', ' ')
if not simple_name.endswith('signaling pathway'):
agent.name += ' signaling pathway'
elif agent.name.isupper() \
and ' ' not in agent.name \
and '-' in agent.name:
agent.name = simple_name
agent.db_refs['TEXT'] = agent.name
elif self.type.upper() == 'ONT::RNA':
agent.name = (agent.db_refs['TEXT']
.upper()
.replace('-', '')
.replace('PUNCMINUS', '-'))
# Set the agent type
inferred_type = infer_agent_type(agent)
if inferred_type is not None \
and self.type not in {'ONT::SIGNALING-PATHWAY',
'ONT::SIGNALING'}:
agent.db_refs['TYPE'] = inferred_type
elif self.type:
agent.db_refs['TYPE'] = self.type.upper()
res = agent
return res
def event_to_ekb(self, event_node):
node = self.graph.nodes[event_node]
if node['type'].upper() in {'ONT::ATTACH', 'ONT::BIND'}:
self.binding_to_ekb(event_node)
else:
self.generic_event_to_ekb(event_node)
def binding_to_ekb(self, event_node):
self._add_to_stack(event_node)
event = etree.Element('EVENT', id=event_node)
type = etree.Element('type')
event.append(type)
type.text = 'ONT::BIND'
ekb_args = {'affected': ('arg1', ':AGENT'),
'affected1': ('arg2', ':AFFECTED'),
'agent': ('arg2', ':AFFECTED')}
for kqml_link, (tag_name, tag_type) in ekb_args.items():
arg = self.graph.get_matching_node(event_node, link=kqml_link)
if arg is None:
continue
if self._is_new_id(arg):
self.term_to_ekb(arg)
arg_tag = etree.Element(tag_name, id=arg, type=tag_type)
event.append(arg_tag)
negation = self.graph.get_matching_node(event_node, 'negation')
if negation:
neg_tag = etree.Element('negation')
neg_tag.text = '+'
event.append(neg_tag)
self._pop_stack(event_node)
self.components.append(event_node)
self.ekb.append(event)
def cc_to_ekb(self, cc_node):
self._add_to_stack(cc_node)
node = self.graph.nodes[cc_node]
cc = etree.Element('CC', id=cc_node)
type = etree.Element('type')
type.text = node['type']
self.type = node['type']
cc.append(type)
arg_counter = 1
possible_event_args = ['factor', 'outcome']
for event_arg in possible_event_args:
arg_node = self.graph.get_matching_node(cc_node, link=event_arg)
if arg_node:
tag_name = 'arg'
tag_type = ':%s' % event_arg.upper()
arg_tag = etree.Element(tag_name, id=arg_node, role=tag_type)
cc.append(arg_tag)
arg_counter += 1
if self._is_new_id(arg_node):
self.term_to_ekb(arg_node)
self._pop_stack(cc_node)
self.ekb.append(cc)
def generic_event_to_ekb(self, event_node):
self._add_to_stack(event_node)
node = self.graph.nodes[event_node]
event = etree.Element('EVENT', id=event_node)
type = etree.Element('type')
type.text = node['type']
self.type = node['type']
event.append(type)
arg_counter = 1
possible_event_args = ['affected', 'affected1', 'agent',
'affected-result']
for event_arg in possible_event_args:
arg_node = self.graph.get_matching_node(event_node, link=event_arg)
if arg_node:
tag_name = 'arg%d' % arg_counter
tag_type = ':%s' % event_arg.upper()
arg_tag = etree.Element(tag_name, id=arg_node, role=tag_type)
event.append(arg_tag)
arg_counter += 1
if self._is_new_id(arg_node):
self.term_to_ekb(arg_node)
# Extract any sites attached to the event
site_node = self.graph.get_matching_node(event_node, link='site')
if site_node:
site_tag = etree.Element('site', id=site_node)
event.append(site_tag)
site_term = self.get_site_term(site_node)
self.ekb.append(site_term)
# Extract manner-undo if available
modn = self.graph.get_matching_node(event_node, link='modn')
if modn:
manner_label = self.graph.nodes[modn].get('label')
if manner_label and manner_label.lower() == 'ont::manner-undo':
mods_tag = etree.Element('mods')
mod_tag = etree.Element('mod')
type_tag = etree.Element('type')
type_tag.text = 'ONT::MANNER-UNDO'
mod_tag.append(type_tag)
mods_tag.append(mod_tag)
event.append(mods_tag)
self._pop_stack(event_node)
self.ekb.append(event)
def get_site_term(self, site_node):
site_term = etree.Element('TERM', id=site_node)
type_elem = etree.Element('type')
site_term.append(type_elem)
type_elem.text = 'ONT::MOLECULAR-SITE'
# Now we need to look for the site
site_dbname = self.graph.get_matching_node_value(site_node, link='dbname')
site_name = self.graph.get_matching_node_value(site_node, link='site-name')
site_code = self.graph.get_matching_node_value(site_node, link='site-code')
if site_dbname:
if site_dbname.lower().startswith('serine'):
code = 'S'
elif site_dbname.lower().startswith('threonine'):
code = 'T'
elif site_dbname.lower().startswith('tyrosine'):
code = 'Y'
elif site_code:
label = site_name
code = site_code
else:
raise ValueError('No site code found')
site_pos = self.graph.get_matching_node_value(site_node, link='site-pos')
name_elem = etree.Element('name')
name_elem.text = label
site_term.append(name_elem)
features_tag = etree.Element('features')
site_tag = etree.Element('site')
site_name_tag = etree.Element('name')
site_name_tag.text = label
site_code_tag = etree.Element('code')
site_code_tag.text = code
site_pos_tag = etree.Element('pos')
site_pos_tag.text = site_pos
site_tag.append(site_name_tag)
site_tag.append(site_code_tag)
site_tag.append(site_pos_tag)
features_tag.append(site_tag)
site_term.append(features_tag)
return site_term
def get_term_name(self, term_id):
"""Find the name of the TERM and get the value with W:: stripped"""
name_node = self.graph.get_matching_node(term_id, link='name')
if not name_node:
name_node = self.graph.get_matching_node(term_id, link='W')
if name_node:
name_val = self.graph.nodes[name_node]['label']
if name_val.startswith('W::'):
name_val = name_val[3:]
else:
name_val = ''
return name_val
def term_to_ekb(self, term_id):
self._add_to_stack(term_id)
node = self.graph.nodes[term_id]
term = etree.Element('TERM', id=term_id)
# Set the type of the TERM
type = etree.Element('type')
type.text = node['type']
term.append(type)
self.type = node['type']
if node['type'].upper() == 'ONT::MACROMOLECULAR-COMPLEX':
c1 = self.graph.get_matching_node(term_id, link='m-sequence')
c2 = self.graph.get_matching_node(term_id, link='m-sequence1')
components = etree.Element('components')
if c1:
self.term_to_ekb(c1)
c1tag = etree.Element('component', id=c1)
components.append(c1tag)
if c2:
self.term_to_ekb(c2)
c2tag = etree.Element('component', id=c2)
components.append(c2tag)
term.append(components)
self._pop_stack(term_id)
self.ekb.append(term)
return
elif node['type'].upper() == 'ONT::SEQUENCE':
aggregate = etree.Element('aggregate', operator='AND')
for seq_counter in [''] + list(range(1, 10)):
member = \
self.graph.get_matching_node(
term_id,
link='sequence%s' % seq_counter)
if member is None:
break
membert = etree.Element('member', id=member)
self.term_to_ekb(member)
aggregate.append(membert)
term.append(aggregate)
self._pop_stack(term_id)
self.ekb.append(term)
return
# Handle the case of the signaling pathways.
# Note: It turns out this will be wiped out by TRIPS further down the
# line.
elif node['type'].upper() in {'ONT::SIGNALING-PATHWAY',
'ONT::SIGNALING'}:
path_subject_id = self.graph.get_matching_node(term_id,
link='assoc-with')
if not path_subject_id:
name_val = self.get_term_name(term_id)
else:
path_subject_name = self.get_term_name(path_subject_id)
name_val = path_subject_name.upper() + '-SIGNALING-PATHWAY'
# This is a LITTLE bit hacky: all further information should come
# from this associated-with term, because the root term has no
# information.
self._pop_stack(term_id)
term_id = path_subject_id
self._add_to_stack(term_id)
# Handle the case where this is just another protein.
else:
name_val = self.get_term_name(term_id)
name = etree.Element('name')
name.text = name_val
term.append(name)
# Now deal with DRUM content
drum_node = self.graph.get_matching_node(term_id, link='drum')
if drum_node:
drum_kqml = KQMLList.from_string(
self.graph.nodes[drum_node]['kqml'])
drum_terms = etree.Element('drum-terms')
for drum_term in drum_kqml[0][1:]:
dt = drum_term_to_ekb(drum_term)
if dt is not None:
drum_terms.append(dt)
term.append(drum_terms)
# Deal next with modifier events
mod = self.graph.get_matching_node(term_id, link='mod')
activity_id = self.graph.get_matching_node(term_id, link='active')
cell_line = self.graph.get_matching_node(term_id, link='cell-line')
if mod or activity_id or cell_line:
features = etree.Element('features')
if mod:
if self._is_new_id(mod):
self.event_to_ekb(mod)
event = self.graph.nodes[mod]
activity = event['type'].upper()[5:]
if activity in {'ACTIVE', 'INACTIVE'}:
active = etree.Element('active')
if activity == 'ACTIVE':
active.text = 'TRUE'
else:
active.text = 'FALSE'
features.append(active)
else:
inevent = etree.Element('inevent', id=mod)
features.append(inevent)
if activity_id:
activity = self.graph.nodes[activity_id]
if activity.get('label') == 'ONT::TRUE':
active = etree.Element('active')
active.text = 'TRUE'
features.append(active)
if cell_line:
cle = etree.Element('cell-line', id=cell_line)
cle_type = etree.Element('type')
cle.append(cle_type)
features.append(cle)
self.term_to_ekb(cell_line)
term.append(features)
self._pop_stack(term_id)
self.ekb.append(term)
def drum_term_to_ekb(drum_term):
def get_dbid(drum_id):
term_id_ns, term_id_id = drum_id.split('::')
term_id_id = term_id_id.strip('|')
dbid = '%s:%s' % (term_id_ns, term_id_id)
return dbid
# Get dbid attribute
term_id = drum_term.gets('id')
if term_id is None:
return None
dbid = get_dbid(term_id)
# Get the first element of matches and its content
match = drum_term.get('matches')[0]
match_score = match.gets('score')
match_matched = match.gets('matched')
match_input = match.gets('input')
# NOTE: these two below don't seem to be added to the EKB
# match_status = match.gets('status')
# match_exact = int(match.gets('exact'))
# Get the xrefs
dbxrefs_entry = drum_term.get('dbxrefs')
if dbxrefs_entry:
dbxrefs = [get_dbid(xr.string_value())
for xr in dbxrefs_entry]
else:
dbxrefs = []
# Get ont type
ont_type = drum_term.get('ont-types')[0].string_value()
# Now that we have all the pieces we can assemble
# the XML structure
dt = etree.Element('drum-term', dbid=dbid, name=match_matched)
dt.attrib['match-score'] = match_score
dt.attrib['matched-name'] = match_matched
if match_input:
dt.attrib['input'] = match_input
types = etree.Element('types')
type = etree.Element('type')
type.text = ont_type
types.append(type)
if dbxrefs:
xrefs = etree.Element('xrefs')
for dbxref in dbxrefs:
xref = etree.Element('xref', dbid=dbxref)
xrefs.append(xref)
dt.append(xrefs)
return dt
def get_cell_line(ekb):
# Look for a term representing a cell line
cl_tag = ekb.find("TERM/[type='ONT::CELL-LINE']")
if cl_tag is not None:
name_tag = cl_tag.find('name')
if name_tag is not None:
name = name_tag.text
name = name.replace('CELLS', '')
name = name.replace('CELL', '')
name = name.replace('-', '')
# TODO: add grounding here if available
clc = RefContext(name)
return clc
return None
def set_cell_line_context(stmts, context):
# Set cell line context if available
for stmt in stmts:
ev = stmt.evidence[0]
if not ev.context:
ev.context = BioContext(cell_line=context)
def agent_from_term(graph, term_id):
ekb = EKB(graph, term_id)
agent = ekb.get_entity()
if not isinstance(agent, Agent):
return None
return agent
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Arm(R) Ethos(TM)-N NPU supported operators."""
from enum import Enum
import tvm.ir
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from ... import qnn as _qnn
from ...dataflow_pattern import is_constant, is_op, wildcard
from . import _ethosn as support
from .register import register_pattern_table
class Available(Enum):
UNAVAILABLE = 0
SW_ONLY = 1
SW_AND_HW = 2
def __bool__(self):
return self != Available.UNAVAILABLE
def ethosn_available():
"""Return whether Ethos-N software and hardware support is available"""
if not tvm.get_global_func("relay.ethos-n.query", True):
print("skip because Ethos-N module is not available")
return Available.UNAVAILABLE
hw = tvm.get_global_func("relay.ethos-n.query")()
return Available.SW_AND_HW if hw else Available.SW_ONLY
def partition_for_ethosn77(mod, params=None, **opts):
"""Partition the graph greedily offloading supported
operators to Arm Ethos-N NPU.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
ret : annotated and partitioned module.
"""
if opts:
tops = opts.get("tops", None)
ple_ratio = opts.get("ple_ratio", None)
sram_size = opts.get("sram_size", None)
if tops or ple_ratio or sram_size:
raise ValueError(
"Setting tops, ple_ratio or sram_size has no effect when targeting Ethos(TM)-N77"
)
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("ethos-n"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
return seq(mod)
def partition_for_ethosn78(mod, params=None, **opts):
"""Partition the graph greedily offloading supported
operators to Arm Ethos-N NPU.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
ret : annotated and partitioned module.
"""
if not opts or opts.get("variant", "").lower() != "ethos-n78":
raise ValueError("When targeting Ethos(TM)-N78, -variant=Ethos-N78 should be set.")
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("ethos-n"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
return seq(mod)
@register_pattern_table("ethos-n")
def pattern_table():
"""Get the Ethos-N compiler pattern table."""
def qnn_conv_pattern():
pattern = is_op("nn.pad")(wildcard(), wildcard()) | wildcard()
pattern = is_op("qnn.conv2d")(
pattern, is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = is_op("nn.bias_add")(pattern, is_constant())
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def qnn_fc_pattern():
pattern = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = is_op("nn.bias_add")(pattern, is_constant())
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def qnn_avg_pool2d_pattern():
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
return pattern
def qnn_sigmoid_pattern():
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("sigmoid")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
def check_conv2d(extract):
"""Check if a conv2d is supported by Ethos-N."""
if not ethosn_available():
return False
return support.conv2d(extract)
def check_fc(extract):
"""Check if a fully connected is supported by Ethos-N."""
if not ethosn_available():
return False
return support.fc(extract)
def check_avg_pool2d(extract):
"""Check if a avg pool2d is supported by Ethos-N."""
if not ethosn_available():
return False
return support.avg_pool2d(extract)
def check_sigmoid(extract):
"""Check if a sigmoid is supported by Ethos-N."""
if not ethosn_available():
return False
if extract.attrs.out_dtype != "uint8":
return False
return support.sigmoid(extract)
return [
("ethos-n.qnn_conv2d", qnn_conv_pattern(), check_conv2d),
("ethos-n.qnn_avg_pool2d", qnn_avg_pool2d_pattern(), check_avg_pool2d),
("ethos-n.qnn_sigmoid", qnn_sigmoid_pattern(), check_sigmoid),
("ethos-n.qnn_fc", qnn_fc_pattern(), check_fc),
]
def _is_ethosn_composite(node):
if isinstance(node, tvm.relay.expr.Call) and isinstance(node.op, tvm.relay.Function):
if "Composite" in node.op.attrs:
comp_name = node.op.attrs["Composite"]
return comp_name.split(".")[0] == "ethos-n"
return False
@tvm.ir.register_op_attr("nn.max_pool2d", "target.ethos-n")
def max_pool2d(expr):
"""Check if a max pool2d is supported by Ethos-N."""
if not ethosn_available():
return False
attrs, args = expr.attrs, expr.args
pool = tvm.relay.nn.max_pool2d(*args, **attrs)
return support.max_pool2d(pool)
@tvm.ir.register_op_attr("reshape", "target.ethos-n")
def reshape(expr):
"""Check if a reshape is supported by Ethos-N."""
if not ethosn_available():
return False
attrs, args = expr.attrs, expr.args
if not _is_ethosn_composite(args[0]):
return False
rs = tvm.relay.op.reshape(*args, attrs["newshape"])
return support.reshape(rs)
@tvm.ir.register_op_attr("qnn.add", "target.ethos-n")
def qnn_add(expr):
"""Check if an addition is supported by Ethos-N."""
if not ethosn_available():
return False
args = expr.args
add = _qnn.op.add(*args)
return support.addition(add)
@tvm.ir.register_op_attr("qnn.concatenate", "target.ethos-n")
def qnn_concatenate(expr):
"""Check if a concatenate is supported by Ethos-N."""
if not ethosn_available():
return False
attrs, args = expr.attrs, expr.args
conc = _qnn.op.concatenate(*args, **attrs)
if not support.concatenate(conc):
return False
# Support library has some unenforced restrictions on qnn params
min_range = 1e9
max_range = -1e9
qnn_params = []
for i in range(len(args[1].fields)):
scale = args[1].fields[i].data.numpy()
zero_point = args[2].fields[i].data.numpy()
min_range = min(-1 * zero_point * scale, min_range)
max_range = max((255 - zero_point) * scale, max_range)
qnn_params.append((scale, zero_point))
scale = (max_range - min_range) / 255
zero_point = int(-min_range / scale)
if (scale, zero_point) in qnn_params:
return True
return False
@tvm.ir.register_op_attr("split", "target.ethos-n")
def split(expr):
"""Check if a split is supported by Ethos-N."""
if not ethosn_available():
return False
attrs, args = expr.attrs, expr.args
if isinstance(attrs["indices_or_sections"], tvm.tir.IntImm):
sp = tvm.relay.split(
*args, indices_or_sections=attrs["indices_or_sections"].value, axis=attrs["axis"]
)
else:
sp = tvm.relay.split(
*args, indices_or_sections=attrs["indices_or_sections"], axis=attrs["axis"]
)
if not support.split(sp.astuple()):
return False
return True
@tvm.ir.register_op_attr("nn.depth_to_space", "target.ethos-n")
def depth_to_space(expr):
"""Check if a depth_to_space is supported by Ethos-N."""
if not ethosn_available():
return False
attrs, args = expr.attrs, expr.args
depth = tvm.relay.nn.depth_to_space(*args, **attrs)
if not support.depth_to_space(depth):
return False
return True
@tvm.ir.register_op_attr("clip", "target.ethos-n")
def clip(expr):
"""Check if a clip is supported by Ethos-N."""
if not ethosn_available():
return False
attrs, args = expr.attrs, expr.args
c = tvm.relay.clip(*args, **attrs)
if not support.relu(c):
return False
return True
|
|
__author__ = 'Christoph Jansen, HTW Berlin'
import theano
import theano.tensor as T
from theano import dot
from theano.tensor.nnet import sigmoid as sigm
from theano.tensor import tanh
from theano.tensor.nnet import softmax
from theano.tensor.nnet import categorical_crossentropy
import os
import numpy as np
from datetime import datetime
import helper
home_dir = os.path.expanduser('~')
### BEGING SETTINGS ###
# text corpus
corpus_file = os.path.join(home_dir, 'brown_tagged.txt')
# work dir will contain pickled lstm weights and pickled list of training errors
work_dir = os.path.join(home_dir, 'training_lstm_word_level_lm')
# if training should be continued from existing weights, timestamp, start_epoch and start_iteration must be given
# every training is identified by a generated timestamp
# else set values to None
timestamp = None # string
start_epoch = None # int
start_iteration = None # int
# number of neurons in hidden layer of lstm
hidden_layer_size = 512
# 40% of occurrences of these tokens will be excluded from training corpus for cv and test
preserve_tokens = ['than', 'then', 'except', 'accept', 'well', 'good']
# number of training epochs
# complete corpus will be given to lstm for training once per epoch
max_epochs = 1
# after training lstm language model will be applied to this confusion set
# order matters: algorithm will generate rules for occurrences of first word in list
confusion_set = ['than', 'then']
# minimum occurence of tokens in training data
# tokens with less occurences will be substituted to 'U' for unknown
# 'U' can also serve as substitute for unseen tokens at test time
min_occurrence = 20
### END SETTINGS ###
# init
if not os.path.exists(work_dir):
os.makedirs(work_dir)
with open(corpus_file) as f:
sents = [[helper.normalization(twp.split('|')[0].lower()) for twp in line.split()] for line in f]
train_sents = list(helper.acs(sents, preserve_tokens))
token_embeddings = helper.TokenEmbeddings(train_sents, min_occurrence)
if timestamp and start_epoch and start_iteration:
errors = helper.load_errors('%s-%d-%d.errors' % (timestamp, start_epoch, start_iteration), work_dir)
load_weights = '%s-%d-%d.weights' % (timestamp, start_epoch, start_iteration)
print('init previous states...')
print('timestamp: ', timestamp)
print('start_epoch: ', start_epoch)
print('start_iteration: ', start_iteration)
else:
errors = []
start_epoch = 0
start_iteration = 0
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
load_weights = None
print('init new states...')
print('timestamp: ', timestamp)
print()
# initialize lstm weights
io_size = io_size = token_embeddings.num_tokens # input/output size
h_size = hidden_layer_size # hidden size
if not load_weights:
W_xi = helper.init_weights((io_size, h_size))
W_hi = helper.init_weights((h_size, h_size))
W_ci = helper.init_weights((h_size, h_size))
b_i = helper.init_zero_vec(h_size)
W_xf = helper.init_weights((io_size, h_size))
W_hf = helper.init_weights((h_size, h_size))
W_cf = helper.init_weights((h_size, h_size))
b_f = helper.init_zero_vec(h_size)
W_xc = helper.init_weights((io_size, h_size))
W_hc = helper.init_weights((h_size, h_size))
b_c = helper.init_zero_vec(h_size)
W_xo = helper.init_weights((io_size, h_size))
W_ho = helper.init_weights((h_size, h_size))
W_co = helper.init_weights((h_size, h_size))
b_o = helper.init_zero_vec(h_size)
W_hy = helper.init_weights((h_size, io_size))
b_y = helper.init_zero_vec(io_size)
else:
W_xi, W_hi, W_ci, b_i, \
W_xf, W_hf, W_cf, b_f, \
W_xc, W_hc, b_c, \
W_xo, W_ho, W_co, b_o, \
W_hy, b_y = helper.load_states(load_weights, work_dir)
# LSTM code
S_h = helper.init_zero_vec(h_size) # init values for hidden units
S_c = helper.init_zero_vec(h_size) # init values for cell units
S_x = T.matrix() # inputs
Y = T.matrix() # targets
# BEGIN code inspired by Christian Herta
# http://christianherta.de/lehre/dataScience/machineLearning/neuralNetworks/LSTM.php
def step(S_x, S_h, S_c,
W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y):
S_i = sigm(dot(S_x, W_xi) + dot(S_h, W_hi) + dot(S_c, W_ci) + b_i)
S_f = sigm(dot(S_x, W_xf) + dot(S_h, W_hf) + dot(S_c, W_cf) + b_f)
S_c = S_f * S_c + S_i * tanh(dot(S_x, W_xc) + dot(S_h, W_hc) + b_c)
S_o = sigm(dot(S_x, W_xo) + dot(S_h, W_ho) + dot(S_c, W_co) + b_o)
S_h = S_o * tanh(S_c)
S_y = dot(S_h, W_hy) + b_y
return [S_h, S_c, S_y]
# scan loops through input sequence and applies step function to each time step
(S_h_r, S_c_r, S_y_r ), _ = theano.scan(fn = step,
sequences = S_x,
outputs_info = [S_h, S_c, None],
non_sequences = [W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y])
# END code inspired by Christian Herta
# cost and gradient descent
cost = T.mean(categorical_crossentropy(softmax(S_y_r), Y))
def gradient_descent(cost, weights, lr=0.05):
grads = T.grad(cost=cost, wrt=weights)
updates = []
for w, g in zip(weights, grads):
updates.append([w, w - lr * g])
return updates
updates = gradient_descent(cost,
[W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y])
# training theano function
train = theano.function(inputs=[S_x, Y],
outputs=cost,
updates=updates,
allow_input_downcast=True)
# prediction theano function
predict = theano.function(inputs=[S_x],
outputs=S_y_r,
allow_input_downcast=True)
# sampling theano functions
S_h_v = T.vector()
S_c_v = T.vector()
S_h_s, S_c_s, S_y_s = step(S_x, S_h_v, S_c_v,
W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y)
sampling = theano.function(inputs = [S_x, S_h_v, S_c_v],
outputs = [S_h_s, S_c_s, S_y_s],
allow_input_downcast=True)
# sampling python functions
def apply_sampling(token_embeddings, hid, start='S', end='E', t=1.0, max_tokens=50):
token_list = list(token_embeddings.token_to_index.keys())
S_x = token_embeddings.token_to_vec(start)
S_h = np.zeros(hid, dtype=theano.config.floatX)
S_c = np.zeros(hid, dtype=theano.config.floatX)
sampled_tokens = [start]
counter = 0
while sampled_tokens[-1] != end:
if counter == max_tokens:
sampled_tokens.append(end)
break
S_x = np.reshape(S_x, (1, -1))
S_h, S_c, S_y = sampling(S_x, S_h.flatten(), S_c.flatten())
S_y = S_y.flatten()
distribution = helper.t_softmax(S_y, t=t)
S_x = np.random.multinomial(n=1, pvals=distribution)
idx = helper.vec_to_index(S_x)
sampled_token = token_list[idx]
sampled_tokens.append(sampled_token)
counter += 1
return sampled_tokens[1:-1]
def resample(token_embeddings, hid, min_tokens=0, max_tokens=50, trials=100, t=1.0):
for i in range(trials):
try:
sample = apply_sampling(token_embeddings, hid, t=t, max_tokens=max_tokens)
if len(sample) < min_tokens:
continue
return ' '.join(sample)
except:
pass
return 'NO SAMPLE IN %d STEPS' % trials
# training
print('start training...')
print()
log_steps = 500
save_steps = 5000
weights_changed = False
for e in range(max_epochs):
if e < start_epoch:
continue
error = 0
for i, (inp, tar) in enumerate(helper.token_sequence_generator(train_sents, token_embeddings)):
if e == start_epoch and i < start_iteration:
continue
cost = train(inp, tar)
error += cost
weights_changed = True
if (i+1) % log_steps == 0:
error /= log_steps
errors.append(error)
print('epoch: %d\titerations: %d\terror: %f' %(e, (i+1), error))
print(resample(token_embeddings, h_size))
print()
error = 0
if (i+1) % save_steps == 0:
helper.save_states([W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y],
'%s-%d-%d.weights' % (timestamp, e, (i+1)), work_dir)
helper.save_errors(errors, '%s-%d-%d.errors' % (timestamp, e, (i+1)), work_dir)
weights_changed = False
print('weights saved:')
print('%s-%d-%d.weights' % (timestamp, e, (i+1)))
print('errors saved:')
print('%s-%d-%d.errors' % (timestamp, e, (i+1)))
print()
print('end training')
print()
# save current weights if training has been performed
if weights_changed:
helper.save_states([W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y], '%s-%d-%d.weights' % (timestamp, e, (i+1)), work_dir)
helper.save_errors(errors, '%s-%d-%d.errors' % (timestamp, e, (i+1)), work_dir)
print('final weights saved:')
print('%s-%d-%d.weights' % (timestamp, e, (i+1)))
print('final errors saved:')
print('%s-%d-%d.errors' % (timestamp, e, (i+1)))
print()
# generate samples
min_tokens = 5
max_tokens = 50
num_samples = 20
print('genrate samples')
print('minimum number of tokens per sample: ', min_tokens)
print()
for t in [0.8, 1.0, 1.2]:
print('temperature: ', t)
print()
for i in range(num_samples):
print(resample(token_embeddings, h_size, min_tokens=min_tokens, max_tokens=max_tokens, trials=100, t=t))
print()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ...tf3d.semantic_segmentation.preprocessor."""
import math
import numpy as np
import six
import tensorflow as tf
from tf3d import standard_fields
from tf3d.semantic_segmentation import preprocessor
class PreprocessTest(tf.test.TestCase):
def test_rotate_points_and_normals_around_axis(self):
points = tf.constant([[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], dtype=tf.float32)
normals = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
motions = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
rotation_angle = 90.0 * math.pi / 180.0
(rotated_points, rotated_normals, rotated_motions
) = preprocessor.rotate_points_and_normals_motions_around_axis(
points=points,
normals=normals,
motions=motions,
rotation_angle=rotation_angle,
axis=2)
expected_rotated_points = np.array([[-1.0, 1.0, 1.0],
[0.0, 1.0, 0.0]], dtype=np.float32)
expected_rotated_normals = np.array([[-1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]], dtype=np.float32)
expected_rotated_motions = np.array([[-1.0, 1.0, 0.0], [0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(rotated_points.numpy(), expected_rotated_points)
self.assertAllClose(rotated_normals.numpy(), expected_rotated_normals)
self.assertAllClose(rotated_motions.numpy(), expected_rotated_motions)
def test_rotate_randomly(self):
points = tf.constant([[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], dtype=tf.float32)
normals = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
motions = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
(rotated_points, rotated_normals,
rotated_motions) = preprocessor.rotate_randomly(
points=points,
normals=normals,
motions=motions,
x_min_degree_rotation=-10,
x_max_degree_rotation=10,
y_min_degree_rotation=-10,
y_max_degree_rotation=10,
z_min_degree_rotation=-180,
z_max_degree_rotation=180)
points_norm = tf.norm(points, axis=1)
normals_norm = tf.norm(normals, axis=1)
motions_norm = tf.norm(motions, axis=1)
rotated_points_norm = tf.norm(rotated_points, axis=1)
rotated_normals_norm = tf.norm(rotated_normals, axis=1)
rotated_motions_norm = tf.norm(rotated_motions, axis=1)
self.assertAllClose(points_norm.numpy(), rotated_points_norm.numpy())
self.assertAllClose(normals_norm.numpy(), rotated_normals_norm.numpy())
self.assertAllClose(motions_norm.numpy(), rotated_motions_norm.numpy())
self.assertAllEqual(rotated_points.shape, [2, 3])
self.assertAllEqual(rotated_normals.shape, [2, 3])
self.assertAllEqual(rotated_motions.shape, [2, 3])
def test_flip_points_and_normals(self):
points = tf.constant([[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], dtype=tf.float32)
normals = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
motions = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
(rotated_points, rotated_normals,
rotated_motions) = preprocessor.flip_points_and_normals_motions(
points=points,
normals=normals,
motions=motions,
x_rotate=tf.convert_to_tensor(-1.0, dtype=tf.float32),
y_rotate=tf.convert_to_tensor(-1.0, dtype=tf.float32))
expected_rotated_points = np.array([[-1.0, -1.0, 1.0],
[-1.0, 0.0, 0.0]], dtype=np.float32)
expected_rotated_normals = np.array([[-1.0, -1.0, 0.0],
[-1.0, 0.0, 1.0]], dtype=np.float32)
expected_rotated_motions = np.array([[-1.0, -1.0, 0.0], [-1.0, 0.0, 1.0]],
dtype=np.float32)
self.assertAllClose(rotated_points.numpy(), expected_rotated_points)
self.assertAllClose(rotated_normals.numpy(), expected_rotated_normals)
self.assertAllClose(rotated_motions.numpy(), expected_rotated_motions)
def test_flip_randomly_points_and_normals(self):
points = tf.constant([[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], dtype=tf.float32)
normals = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
motions = tf.constant([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=tf.float32)
(rotated_points, rotated_normals,
rotated_motions) = preprocessor.flip_randomly_points_and_normals_motions(
points=points, normals=normals, motions=motions, is_training=True)
points_norm = tf.norm(points, axis=1)
normals_norm = tf.norm(normals, axis=1)
motions_norm = tf.norm(motions, axis=1)
rotated_points_norm = tf.norm(rotated_points, axis=1)
rotated_normals_norm = tf.norm(rotated_normals, axis=1)
rotated_motions_norm = tf.norm(rotated_motions, axis=1)
self.assertAllClose(points_norm, rotated_points_norm.numpy())
self.assertAllClose(normals_norm, rotated_normals_norm.numpy())
self.assertAllClose(motions_norm, rotated_motions_norm.numpy())
self.assertAllEqual(rotated_points.shape, [2, 3])
self.assertAllEqual(rotated_normals.shape, [2, 3])
self.assertAllEqual(rotated_motions.shape, [2, 3])
def test_randomly_crop_points(self):
mesh_inputs = {
standard_fields.InputDataFields.point_positions:
tf.random.uniform([100000, 3],
minval=-10.0,
maxval=10.0,
dtype=tf.float32)
}
preprocessor.randomly_crop_points(
mesh_inputs=mesh_inputs,
view_indices_2d_inputs={},
x_random_crop_size=1.0,
y_random_crop_size=2.0)
cropped_points = mesh_inputs[
standard_fields.InputDataFields.point_positions]
min_cropped_points = tf.reduce_min(cropped_points, axis=0)
max_cropped_points = tf.reduce_max(cropped_points, axis=0)
self.assertLessEqual(max_cropped_points.numpy()[0] - 1.0,
min_cropped_points.numpy()[0])
self.assertLessEqual(max_cropped_points.numpy()[1] - 2.0,
min_cropped_points.numpy()[1])
def test_pick_labeled_image(self):
view_image_inputs = {
'rgb_image':
tf.random.uniform([10, 200, 300, 3],
minval=0.0,
maxval=255.0,
dtype=tf.float32),
'depth_image':
tf.random.uniform([4, 100, 150, 1],
minval=0.0,
maxval=10.0,
dtype=tf.float32),
}
mesh_inputs = {
standard_fields.InputDataFields.point_loss_weights:
tf.random.uniform([10000, 1],
minval=0.0,
maxval=1.0,
dtype=tf.float32),
standard_fields.InputDataFields.point_positions:
tf.random.uniform([10000, 3],
minval=-2.0,
maxval=2.0,
dtype=tf.float32),
}
view_indices_2d_inputs = {
'rgb_image':
tf.random.uniform([10, 10000, 2],
minval=0,
maxval=10,
dtype=tf.int32),
'depth_image':
tf.random.uniform([4, 10000, 2],
minval=-1,
maxval=10,
dtype=tf.int32)
}
preprocessor.pick_labeled_image(
mesh_inputs=mesh_inputs,
view_image_inputs=view_image_inputs,
view_indices_2d_inputs=view_indices_2d_inputs,
view_name='rgb_image')
self.assertAllEqual(view_image_inputs['rgb_image'].shape,
np.array([1, 200, 300, 3]))
self.assertAllEqual(view_image_inputs['depth_image'].shape,
np.array([4, 100, 150, 1]))
self.assertEqual(
mesh_inputs[
standard_fields.InputDataFields.point_loss_weights].shape[1], 1)
self.assertEqual(
mesh_inputs[
standard_fields.InputDataFields.point_positions].shape[1], 3)
self.assertEqual(
mesh_inputs[
standard_fields.InputDataFields.point_loss_weights].shape[0],
mesh_inputs[
standard_fields.InputDataFields.point_positions].shape[0])
self.assertEqual(
mesh_inputs[
standard_fields.InputDataFields.point_loss_weights].shape[0],
view_indices_2d_inputs['rgb_image'].shape[1])
self.assertEqual(view_indices_2d_inputs['rgb_image'].shape[0], 1)
self.assertEqual(view_indices_2d_inputs['rgb_image'].shape[2], 2)
def test_empty_inputs_raises_value_error(self):
with self.assertRaises(ValueError):
empty_input = {}
preprocessor.preprocess(inputs=empty_input)
def test_inputs_missing_image_raises_value_error(self):
inputs = {
'depth': tf.ones((50, 50, 1)),
'ignore_label': 255,
}
with self.assertRaises(ValueError):
preprocessor.preprocess(inputs=inputs)
def test_points_with_wrong_dimension_raises_value_error(self):
inputs = {
'points': tf.zeros((1000, 1, 3)),
'normals': tf.zeros((1000, 1, 3)),
'colors': tf.zeros((1000, 3), dtype=tf.uint8),
'semantic_labels': tf.zeros((1000, 1), dtype=tf.int32),
}
with self.assertRaises(ValueError):
preprocessor.preprocess(inputs=inputs)
def test_preprocess_points(self):
for is_training in [True, False]:
points = tf.random.uniform(
(1000, 3), minval=10.0, maxval=50.0, dtype=tf.float32)
normals = tf.random.uniform(
(1000, 3), minval=-0.5, maxval=0.5, dtype=tf.float32)
colors = tf.random.uniform((1000, 3),
minval=0,
maxval=255,
dtype=tf.int32)
colors = tf.cast(colors, dtype=tf.uint8)
semantic_labels = tf.random.uniform((1000, 1),
minval=0,
maxval=10,
dtype=tf.int32)
points_centered = points - tf.expand_dims(
tf.reduce_mean(points, axis=0), axis=0)
inputs = {
'points': points,
'normals': normals,
'colors': colors,
'semantic_labels': semantic_labels,
'ignore_label': 255,
}
inputs = preprocessor.preprocess(
inputs=inputs,
z_min_degree_rotation=-50.0,
z_max_degree_rotation=50.0,
is_training=is_training)
self.assertEqual(inputs['ignore_label'], 255)
inputs = {
k: v for k, v in six.iteritems(inputs) if isinstance(v, tf.Tensor)
}
self.assertEqual(
[1000, 3],
list(inputs[standard_fields.InputDataFields.point_positions].shape))
self.assertAllClose(
points_centered.numpy()[:, 2],
inputs[standard_fields.InputDataFields.point_positions].numpy()[:, 2])
def test_preprocess_points_with_padding(self):
for is_training in [True, False]:
points = tf.random.uniform(
(1000, 3), minval=10.0, maxval=50.0, dtype=tf.float32)
normals = tf.random.uniform(
(1000, 3), minval=-0.5, maxval=0.5, dtype=tf.float32)
colors = tf.random.uniform((1000, 3),
minval=0,
maxval=255,
dtype=tf.int32)
semantic_labels = tf.random.uniform((1000, 1),
minval=0,
maxval=10,
dtype=tf.int32)
colors = tf.cast(colors, dtype=tf.uint8)
points_centered = points - tf.expand_dims(
tf.reduce_mean(points, axis=0), axis=0)
points_centered = tf.pad(points_centered, paddings=[[0, 1000], [0, 0]])
inputs = {
'points': points,
'normals': normals,
'colors': colors,
'semantic_labels': semantic_labels,
'ignore_label': 255,
}
inputs = preprocessor.preprocess(
inputs=inputs,
z_min_degree_rotation=-50.0,
z_max_degree_rotation=50.0,
is_training=is_training,
points_pad_or_clip_size=2000)
self.assertEqual(inputs['ignore_label'], 255)
inputs = {
k: v for k, v in six.iteritems(inputs) if isinstance(v, tf.Tensor)
}
self.assertEqual(
[2000, 3],
list(inputs[standard_fields.InputDataFields.point_positions].shape))
self.assertEqual(
[2000, 3],
list(inputs[standard_fields.InputDataFields.point_normals].shape))
self.assertEqual(
[2000, 3],
list(inputs[standard_fields.InputDataFields.point_colors].shape))
self.assertEqual(
[2000, 1],
list(
inputs[standard_fields.InputDataFields.point_loss_weights].shape))
self.assertEqual(1000,
inputs[standard_fields.InputDataFields.num_valid_points])
self.assertAllClose(
points_centered.numpy()[:, 2],
inputs[standard_fields.InputDataFields.point_positions].numpy()[:, 2])
def test_preprocess_points_without_normals_and_colors(self):
for is_training in [True, False]:
points = tf.random.uniform(
(1000, 3), minval=10.0, maxval=50.0, dtype=tf.float32)
semantic_labels = tf.random.uniform((1000, 1),
minval=0,
maxval=10,
dtype=tf.int32)
points_centered = points - tf.expand_dims(
tf.reduce_mean(points, axis=0), axis=0)
points_centered = tf.pad(points_centered, paddings=[[0, 1000], [0, 0]])
inputs = {
'points': points,
'semantic_labels': semantic_labels,
'ignore_label': 255,
}
inputs = preprocessor.preprocess(
inputs=inputs,
z_min_degree_rotation=-50.0,
z_max_degree_rotation=50.0,
is_training=is_training,
points_pad_or_clip_size=2000)
self.assertEqual(inputs['ignore_label'], 255)
inputs = {
k: v for k, v in six.iteritems(inputs) if isinstance(v, tf.Tensor)
}
self.assertEqual(
[2000, 3],
list(inputs[standard_fields.InputDataFields.point_positions].shape))
self.assertEqual(
[2000, 1],
list(
inputs[standard_fields.InputDataFields.point_loss_weights].shape))
self.assertEqual(
1000,
inputs[standard_fields.InputDataFields.num_valid_points].numpy())
self.assertAllClose(
points_centered.numpy()[:, 2],
inputs[standard_fields.InputDataFields.point_positions].numpy()[:, 2])
if __name__ == '__main__':
tf.test.main()
|
|
"""Undocumented Module"""
__all__ = ['DirectRadioButton']
from pandac.PandaModules import *
import DirectGuiGlobals as DGG
from DirectButton import *
from DirectLabel import *
class DirectRadioButton(DirectButton):
"""
DirectRadioButton(parent) - Create a DirectGuiWidget which responds
to mouse clicks by setting given value to given variable and
execute a callback function (passing that state through) if defined
"""
def __init__(self, parent = None, **kw):
# Inherits from DirectButton
# A Direct Frame can have:
# - A background texture (pass in path to image, or Texture Card)
# - A midground geometry item (pass in geometry)
# - A foreground text Node (pass in text string or Onscreen Text)
# For a direct button:
# Each button has 4 states (ready, press, rollover, disabled)
# The same image/geom/text can be used for all four states or each
# state can have a different text/geom/image
# State transitions happen automatically based upon mouse interaction
# Responds to click event and calls command if None
self.colors = None
optiondefs = (
('indicatorValue', 0, self.setIndicatorValue),
# variable is a list whose value will be set by this radio button
('variable', [], None),
# value is the value to be set when this radio button is selected
('value', [], None),
# others is a list of other radio buttons sharing same variable
('others', [], None),
# boxBorder defines the space created around the check box
('boxBorder', 0, None),
# boxPlacement maps left, above, right, below
('boxPlacement', 'left', None),
# boxGeom defines geom to indicate current radio button is selected or not
('boxGeom', None, None),
('boxGeomColor', None, None),
('boxGeomScale', 1.0, None),
('boxImage', loader.loadModel('models/gui/radio_button_gui'), None),
('boxImageScale', 1.0, None),
('boxImageColor', VBase4(1, 1, 1, 1), None),
('boxRelief', None, None),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize superclasses
DirectButton.__init__(self, parent)
self.indicator = self.createcomponent("indicator", (), None,
DirectLabel, (self,),
numStates = 2,
image = self['boxImage'],
image_scale = self['boxImageScale'],
image_color = self['boxImageColor'],
geom = self['boxGeom'],
geom_scale = self['boxGeomScale'],
geom_color = self['boxGeomColor'],
state = 'disabled',
text = ('X', 'X'),
relief = self['boxRelief'],
)
# Call option initialization functions
self.initialiseoptions(DirectRadioButton)
# After initialization with X giving it the correct size, put back space
if self['boxGeom'] == None:
self.indicator['text'] = (' ', '*')
self.indicator['text_pos'] = (0, -.5)
else:
self.indicator['text'] = (' ', ' ')
if self['boxGeomColor'] != None and self['boxGeom'] != None:
self.colors = [VBase4(1, 1, 1, 0), self['boxGeomColor']]
self.component('indicator')['geom_color'] = VBase4(1, 1, 1, 0)
needToCheck = True
if len(self['value']) == len(self['variable']) != 0:
for i in range(len(self['value'])):
if self['variable'][i] != self['value'][i]:
needToCheck = False
break
if needToCheck:
self.check()
# Override the resetFrameSize of DirectGuiWidget inorder to provide space for label
def resetFrameSize(self):
self.setFrameSize(fClearFrame = 1)
def setFrameSize(self, fClearFrame = 0):
if self['frameSize']:
# Use user specified bounds
self.bounds = self['frameSize']
frameType = self.frameStyle[0].getType()
ibw = self.indicator['borderWidth']
else:
# Use ready state to compute bounds
frameType = self.frameStyle[0].getType()
if fClearFrame and (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(PGFrameStyle.TNone)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# To force an update of the button
self.guiItem.getStateDef(0)
# Clear out frame before computing bounds
self.getBounds()
# Restore frame style if necessary
if (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(frameType)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# Ok, they didn't set specific bounds,
# let's add room for the label indicator
# get the difference in height
ibw = self.indicator['borderWidth']
indicatorWidth = (self.indicator.getWidth() + (2*ibw[0]))
indicatorHeight = (self.indicator.getHeight() + (2*ibw[1]))
diff = (indicatorHeight + (2*self['boxBorder']) -
(self.bounds[3] - self.bounds[2]))
# If background is smaller then indicator, enlarge background
if diff > 0:
if self['boxPlacement'] == 'left': #left
self.bounds[0] += -(indicatorWidth + (2*self['boxBorder']))
self.bounds[3] += diff/2
self.bounds[2] -= diff/2
elif self['boxPlacement'] == 'below': #below
self.bounds[2] += -(indicatorHeight+(2*self['boxBorder']))
elif self['boxPlacement'] == 'right': #right
self.bounds[1] += indicatorWidth + (2*self['boxBorder'])
self.bounds[3] += diff/2
self.bounds[2] -= diff/2
else: #above
self.bounds[3] += indicatorHeight + (2*self['boxBorder'])
# Else make space on correct side for indicator
else:
if self['boxPlacement'] == 'left': #left
self.bounds[0] += -(indicatorWidth + (2*self['boxBorder']))
elif self['boxPlacement'] == 'below': #below
self.bounds[2] += -(indicatorHeight + (2*self['boxBorder']))
elif self['boxPlacement'] == 'right': #right
self.bounds[1] += indicatorWidth + (2*self['boxBorder'])
else: #above
self.bounds[3] += indicatorHeight + (2*self['boxBorder'])
# Set frame to new dimensions
if ((frameType != PGFrameStyle.TNone) and
(frameType != PGFrameStyle.TFlat)):
bw = self['borderWidth']
else:
bw = (0, 0)
# Set frame to new dimensions
self.guiItem.setFrame(
self.bounds[0] - bw[0],
self.bounds[1] + bw[0],
self.bounds[2] - bw[1],
self.bounds[3] + bw[1])
# If they didn't specify a position, put it in the center of new area
if not self.indicator['pos']:
bbounds = self.bounds
lbounds = self.indicator.bounds
newpos = [0, 0, 0]
if self['boxPlacement'] == 'left': #left
newpos[0] += bbounds[0]-lbounds[0] + self['boxBorder'] + ibw[0]
dropValue = (bbounds[3]-bbounds[2]-lbounds[3]+lbounds[2])/2 + self['boxBorder']
newpos[2] += (bbounds[3]-lbounds[3] + self['boxBorder'] -
dropValue)
elif self['boxPlacement'] == 'right': #right
newpos[0] += bbounds[1]-lbounds[1] - self['boxBorder'] - ibw[0]
dropValue = (bbounds[3]-bbounds[2]-lbounds[3]+lbounds[2])/2 + self['boxBorder']
newpos[2] += (bbounds[3]-lbounds[3] + self['boxBorder']
- dropValue)
elif self['boxPlacement'] == 'above': #above
newpos[2] += bbounds[3]-lbounds[3] - self['boxBorder'] - ibw[1]
else: #below
newpos[2] += bbounds[2]-lbounds[2] + self['boxBorder'] + ibw[1]
self.indicator.setPos(newpos[0], newpos[1], newpos[2])
def commandFunc(self, event):
if len(self['value']) == len(self['variable']) != 0:
for i in range(len(self['value'])):
self['variable'][i] = self['value'][i]
self.check()
def check(self):
self['indicatorValue'] = 1
self.setIndicatorValue()
for other in self['others']:
if other != self:
other.uncheck()
if self['command']:
# Pass any extra args to command
apply(self['command'], self['extraArgs'])
def setOthers(self, others):
self['others'] = others
def uncheck(self):
self['indicatorValue'] = 0
if self.colors != None:
self.component('indicator')['geom_color'] = self.colors[self['indicatorValue']]
def setIndicatorValue(self):
self.component('indicator').guiItem.setState(self['indicatorValue'])
if self.colors != None:
self.component('indicator')['geom_color'] = self.colors[self['indicatorValue']]
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import enum
from typing import Optional, Type
import simplejson as json
from croniter import croniter
from flask import flash, g
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import gettext as __, lazy_gettext as _
from wtforms import BooleanField, StringField
from superset import app, appbuilder, db, security_manager
from superset.exceptions import SupersetException
from superset.models.core import Dashboard, Slice
from superset.models.schedules import (
DashboardEmailSchedule,
ScheduleType,
SliceEmailSchedule,
)
from superset.tasks.schedules import schedule_email_report
from superset.utils.core import get_email_address_list, json_iso_dttm_ser
from superset.views.core import json_success
from .base import DeleteMixin, SupersetModelView
class EmailScheduleView(SupersetModelView, DeleteMixin):
_extra_data = {"test_email": False, "test_email_recipients": None}
schedule_type: Optional[Type] = None
schedule_type_model: Optional[Type] = None
page_size = 20
add_exclude_columns = [
"user",
"created_on",
"changed_on",
"created_by",
"changed_by",
]
edit_exclude_columns = add_exclude_columns
description_columns = {
"deliver_as_group": "If enabled, send a single email to all "
"recipients (in email/To: field)",
"crontab": "Unix style crontab schedule to deliver emails. "
"Changes to schedules reflect in one hour.",
"delivery_type": "Indicates how the rendered content is delivered",
}
add_form_extra_fields = {
"test_email": BooleanField(
"Send Test Email",
default=False,
description="If enabled, we send a test mail on create / update",
),
"test_email_recipients": StringField(
"Test Email Recipients",
default=None,
description="List of recipients to send test email to. "
"If empty, we send it to the original recipients",
),
}
edit_form_extra_fields = add_form_extra_fields
def process_form(self, form, is_created):
if form.test_email_recipients.data:
test_email_recipients = form.test_email_recipients.data.strip()
else:
test_email_recipients = None
self._extra_data["test_email"] = form.test_email.data
self._extra_data["test_email_recipients"] = test_email_recipients
def pre_add(self, obj):
try:
recipients = get_email_address_list(obj.recipients)
obj.recipients = ", ".join(recipients)
except Exception:
raise SupersetException("Invalid email list")
obj.user = obj.user or g.user
if not croniter.is_valid(obj.crontab):
raise SupersetException("Invalid crontab format")
def pre_update(self, obj):
self.pre_add(obj)
def post_add(self, obj):
# Schedule a test mail if the user requested for it.
if self._extra_data["test_email"]:
recipients = self._extra_data["test_email_recipients"] or obj.recipients
args = (self.schedule_type, obj.id)
kwargs = dict(recipients=recipients)
schedule_email_report.apply_async(args=args, kwargs=kwargs)
# Notify the user that schedule changes will be activate only in the
# next hour
if obj.active:
flash("Schedule changes will get applied in one hour", "warning")
def post_update(self, obj):
self.post_add(obj)
@has_access
@expose("/fetch/<int:item_id>/", methods=["GET"])
def fetch_schedules(self, item_id):
query = db.session.query(self.datamodel.obj)
query = query.join(self.schedule_type_model).filter(
self.schedule_type_model.id == item_id
)
schedules = []
for schedule in query.all():
info = {"schedule": schedule.id}
for col in self.list_columns + self.add_exclude_columns:
info[col] = getattr(schedule, col)
if isinstance(info[col], enum.Enum):
info[col] = info[col].name
elif isinstance(info[col], security_manager.user_model):
info[col] = info[col].username
info["user"] = schedule.user.username
info[self.schedule_type] = getattr(schedule, self.schedule_type).id
schedules.append(info)
return json_success(json.dumps(schedules, default=json_iso_dttm_ser))
class DashboardEmailScheduleView(EmailScheduleView):
schedule_type = ScheduleType.dashboard.value
schedule_type_model = Dashboard
add_title = _("Schedule Email Reports for Dashboards")
edit_title = add_title
list_title = _("Manage Email Reports for Dashboards")
datamodel = SQLAInterface(DashboardEmailSchedule)
order_columns = ["user", "dashboard", "created_on"]
list_columns = [
"dashboard",
"active",
"crontab",
"user",
"deliver_as_group",
"delivery_type",
]
add_columns = [
"dashboard",
"active",
"crontab",
"recipients",
"deliver_as_group",
"delivery_type",
"test_email",
"test_email_recipients",
]
edit_columns = add_columns
search_columns = [
"dashboard",
"active",
"user",
"deliver_as_group",
"delivery_type",
]
label_columns = {
"dashboard": _("Dashboard"),
"created_on": _("Created On"),
"changed_on": _("Changed On"),
"user": _("User"),
"active": _("Active"),
"crontab": _("Crontab"),
"recipients": _("Recipients"),
"deliver_as_group": _("Deliver As Group"),
"delivery_type": _("Delivery Type"),
}
def pre_add(self, obj):
if obj.dashboard is None:
raise SupersetException("Dashboard is mandatory")
super(DashboardEmailScheduleView, self).pre_add(obj)
class SliceEmailScheduleView(EmailScheduleView):
schedule_type = ScheduleType.slice.value
schedule_type_model = Slice
add_title = _("Schedule Email Reports for Charts")
edit_title = add_title
list_title = _("Manage Email Reports for Charts")
datamodel = SQLAInterface(SliceEmailSchedule)
order_columns = ["user", "slice", "created_on"]
list_columns = [
"slice",
"active",
"crontab",
"user",
"deliver_as_group",
"delivery_type",
"email_format",
]
add_columns = [
"slice",
"active",
"crontab",
"recipients",
"deliver_as_group",
"delivery_type",
"email_format",
"test_email",
"test_email_recipients",
]
edit_columns = add_columns
search_columns = [
"slice",
"active",
"user",
"deliver_as_group",
"delivery_type",
"email_format",
]
label_columns = {
"slice": _("Chart"),
"created_on": _("Created On"),
"changed_on": _("Changed On"),
"user": _("User"),
"active": _("Active"),
"crontab": _("Crontab"),
"recipients": _("Recipients"),
"deliver_as_group": _("Deliver As Group"),
"delivery_type": _("Delivery Type"),
"email_format": _("Email Format"),
}
def pre_add(self, obj):
if obj.slice is None:
raise SupersetException("Slice is mandatory")
super(SliceEmailScheduleView, self).pre_add(obj)
def _register_schedule_menus():
appbuilder.add_separator("Manage")
appbuilder.add_view(
DashboardEmailScheduleView,
"Dashboard Email Schedules",
label=__("Dashboard Emails"),
category="Manage",
category_label=__("Manage"),
icon="fa-search",
)
appbuilder.add_view(
SliceEmailScheduleView,
"Chart Emails",
label=__("Chart Email Schedules"),
category="Manage",
category_label=__("Manage"),
icon="fa-search",
)
if app.config["ENABLE_SCHEDULED_EMAIL_REPORTS"]:
_register_schedule_menus()
|
|
import unittest
from troposphere import (
MAX_MAPPINGS,
MAX_OUTPUTS,
MAX_PARAMETERS,
MAX_RESOURCES,
Output,
Parameter,
Template,
)
from troposphere.s3 import Bucket
from troposphere.serverless import Globals
class TestInitArguments(unittest.TestCase):
def test_description_default(self):
template = Template()
self.assertIsNone(template.description)
def test_description(self):
value = "foo"
template = Template(Description=value)
self.assertEqual(template.description, value)
def test_metadata_default(self):
template = Template()
self.assertEqual(template.metadata, {})
def test_metadata(self):
value = "foo"
template = Template(Metadata=value)
self.assertEqual(template.metadata, value)
def test_transform(self):
transform = "AWS::Serverless-2016-10-31"
template = Template()
template.set_transform(transform)
self.assertEqual(template.transform, transform)
def test_globals(self):
template = Template()
globals = Globals()
with self.assertRaises(ValueError):
template.set_globals(globals)
transform = "AWS::Serverless-2016-10-31"
template.set_transform(transform)
template.set_globals(globals)
self.assertEqual(template.globals, globals)
with self.assertRaises(ValueError):
template.set_transform("other_transform")
class TestValidate(unittest.TestCase):
def test_max_parameters(self):
template = Template()
for i in range(0, MAX_PARAMETERS):
template.add_parameter(Parameter(str(i), Type="String"))
with self.assertRaises(ValueError):
template.add_parameter(Parameter("parameter", Type="String"))
def test_max_resources(self):
template = Template()
for i in range(0, MAX_RESOURCES):
template.add_resource(Bucket(str(i)))
with self.assertRaises(ValueError):
template.add_resource(Bucket("bucket"))
def test_max_outputs(self):
template = Template()
for i in range(0, MAX_OUTPUTS):
template.add_output(Output(str(i), Value=str(i)))
with self.assertRaises(ValueError):
template.add_output(Output("output", Value="output"))
def test_max_mappings(self):
template = Template()
for i in range(0, MAX_MAPPINGS):
template.add_mapping(str(i), {"n": "v"})
with self.assertRaises(ValueError):
template.add_mapping("mapping", {"n": "v"})
class TestEquality(unittest.TestCase):
def test_eq(self):
metadata = "foo"
description = "bar"
resource = Bucket("Baz")
output = Output("qux", Value="qux")
t1 = Template(Description=description, Metadata=metadata)
t1.add_resource(resource)
t1.add_output(output)
t2 = Template(Description=description, Metadata=metadata)
t2.add_resource(resource)
t2.add_output(output)
self.assertEqual(t1, t2)
def test_ne(self):
t1 = Template(Description="foo1", Metadata="bar1")
t1.add_resource(Bucket("Baz1"))
t1.add_output(Output("qux1", Value="qux1"))
t2 = Template(Description="foo2", Metadata="bar2")
t2.add_resource(Bucket("Baz2"))
t2.add_output(Output("qux2", Value="qux2"))
self.assertNotEqual(t1, t2)
def test_hash(self):
metadata = "foo"
description = "bar"
resource = Bucket("Baz")
output = Output("qux", Value="qux")
t1 = Template(Description=description, Metadata=metadata)
t1.add_resource(resource)
t1.add_output(output)
t2 = Template(Description=description, Metadata=metadata)
t2.add_resource(resource)
t2.add_output(output)
self.assertEqual(len(set([t1, t2])), 1)
class TestAwsInterface(unittest.TestCase):
def test_parameter_label(self):
t = Template()
p1 = t.add_parameter(Parameter("Foo"))
t.add_parameter(Parameter("Bar"))
t.set_parameter_label(p1, "Foo label")
t.set_parameter_label("Bar", "Bar label")
self.assertEqual(
t.metadata,
{
"AWS::CloudFormation::Interface": {
"ParameterLabels": {
"Foo": {"default": "Foo label"},
"Bar": {"default": "Bar label"},
},
},
},
)
def test_parameter_label_replace(self):
t = Template()
p1 = t.add_parameter(Parameter("Foo"))
t.add_parameter(Parameter("Bar"))
t.set_parameter_label(p1, "Foo label")
t.set_parameter_label("Foo", "Bar label")
self.assertEqual(
t.metadata,
{
"AWS::CloudFormation::Interface": {
"ParameterLabels": {
"Foo": {"default": "Bar label"},
},
},
},
)
def test_parameter_group(self):
t = Template()
p1 = t.add_parameter(Parameter("Foo"))
t.add_parameter(Parameter("Bar"))
t.add_parameter_to_group(p1, "gr")
t.add_parameter_to_group("Bar", "gr")
self.assertEqual(
t.metadata,
{
"AWS::CloudFormation::Interface": {
"ParameterGroups": [
{
"Label": {"default": "gr"},
"Parameters": ["Foo", "Bar"],
},
],
},
},
)
class TestRules(unittest.TestCase):
def test_rules(self):
t = Template()
t.add_parameter("One")
t.add_parameter("Two")
rule = {
"Assertions": [
{
"Assert": {
"Fn::Equals": [
{"Ref": "One"},
{"Ref": "Two"},
],
},
},
],
}
t.add_rule("ValidateEqual", rule)
self.assertTrue("ValidateEqual" in t.rules)
rendered = t.to_dict()
self.assertEqual(rendered["Rules"]["ValidateEqual"], rule)
if __name__ == "__main__":
unittest.main()
|
|
"""Document fields."""
import abc
import re
from datetime import datetime
from decimal import Decimal
import bson.errors
from bson import ObjectId, Decimal128
from aiomongodel.errors import ValidationError, StopValidation
from aiomongodel.utils import _Empty, import_class
__all__ = ['AnyField', 'StrField', 'EmailField', 'IntField',
'FloatField', 'DecimalField', 'DateTimeField',
'EmbDocField', 'ListField', 'RefField', 'SynonymField',
'ObjectIdField']
class Field(abc.ABC):
"""Base class for all fields.
Attributes:
name (str): Name of the field.
mongo_name (str): Name of the field in mongodb.
required (bool): Is field required.
allow_none (bool): Can field be assigned with ``None``.
default: Default value for field.
verbose_name (str): Verbose field name for met information about field.
choices (dict, set): Dict or set of choices for a field. If it is a
``dict`` keys are used as choices.
"""
def __init__(self, *, required=True, default=_Empty, mongo_name=None,
name=None, allow_none=False, choices=None, field_type=None,
verbose_name=None):
"""Create field.
Args:
required (bool): Is field required. Defaults to ``True``.
default: Default value for a field. When document has no value for
field in ``__init__`` it try to use default value (if it is
not ``_Empty``). Defaults to ``_Empty``.
.. note::
Default value is ignored if field is not required.
.. note::
Default can be a value or a callable with no arguments.
mongo_name (str): Name of the field in MongoDB.
Defaults to ``None``.
.. note::
If ``mongo_name`` is None it is set to ``name`` of the
field.
name (str): Name of the field. Should not be used explicitly as
it is set by metaclass. Defaults to ``None``.
allow_none (bool): Can field be assign with ``None``. Defaults
to ``False``.
verbose_name (str): Verbose field name for met information about field.
Defaults to ``None``.
choices (dict, set): Possible values for field. If it is a
``dict``, keys should be possible values. To preserve values
order use ``collections.OrderedDict``. Defaults to ``None``.
.. note::
If ``choices`` are given then other constraints are ignored.
"""
self.field_type = field_type
self.mongo_name = mongo_name
self.name = name
self.required = required
self.allow_none = allow_none
self._default = default
self.verbose_name = verbose_name
if choices is None or isinstance(choices, dict):
self.choices = choices
else:
self.choices = set(choices)
self.validators = [self._validate_none,
self._validate_type]
if self.choices is not None:
self.validators.append(self._validate_choices)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
if self.mongo_name is None:
self.mongo_name = value
@property
def default(self):
try:
return self._default()
except TypeError: # is not callable
return self._default
def __get__(self, instance, instance_type):
if instance is None:
return self
try:
return instance._data[self.name]
except KeyError:
# TODO: should we try to return default here?
return None
def __set__(self, instance, value):
instance._data[self.name] = self.from_data(value)
def to_mongo(self, value):
"""Convert value to mongo format."""
return value
def from_mongo(self, value):
"""Convert value from mongo format to python field format."""
return value
def from_data(self, value):
"""Convert value from user provided data to field type.
Args:
value: Value provided by user.
Returns:
Converted value or value as is if error occured. If value is
``None`` return ``None``.
"""
try:
return None if value is None else self.field_type(value)
except (ValueError, TypeError):
return value
@property
def s(self):
"""Return mongodb name of the field.
This property can be used wherever mongodb field's name is required.
Example:
.. code-block:: python
User.q(db).find({User.name.s: 'Francesco', User.is_admin.s: True},
{User.posts.s: 1, User._id.s: 0})
.. note::
Field's ``name`` and ``mongo_name`` could be different so
``User.is_admin.s`` could be for example ``'isadm'``.
"""
return self.mongo_name
def _validate_none(self, value):
if value is None:
if self.allow_none:
raise StopValidation()
raise ValidationError('none value is not allowed')
def _validate_type(self, value):
if not isinstance(value, self.field_type):
raise ValidationError('invalid value type')
def _validate_choices(self, value):
if value in self.choices:
raise StopValidation()
raise ValidationError("value does not match any variant")
def validate(self, value):
try:
for func in self.validators:
func(value)
except StopValidation:
return
class AnyField(Field):
"""Any type field.
Can store any type of value. Store a value as is.
It's up to developer if a value can be stored in mongodb.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators = [self._validate_none]
if self.choices is not None:
self.validators.append(self._validate_choices)
def from_data(self, value):
return value
class StrField(Field):
"""String field."""
def __init__(self, *, regex=None, allow_blank=False,
min_length=None, max_length=None, **kwargs):
"""Create string field.
Args:
regex (str): Regular expression for field's values.
Defaults to ``None``.
allow_blank (bool): Can field be assigned with blank string.
Defaults to ``False``.
min_length (int): Minimum length of field's values.
Defaults to ``None``.
max_length (int): Maximum length of field's values.
Defaults to ``None``.
**kwargs: Other arguments from ``Field``.
"""
super().__init__(field_type=str, **kwargs)
self.regex = re.compile(regex) if isinstance(regex, str) else regex
self.allow_blank = allow_blank
self.min_length = min_length
self.max_length = max_length
if self.regex is not None:
self.validators.append(self._validate_regex)
self.validators.append(self._validate_blank)
if self.min_length:
self.validators.append(self._validate_min_length)
if self.max_length is not None:
self.validators.append(self._validate_max_length)
def _validate_max_length(self, value):
if len(value) > self.max_length:
raise ValidationError('length is greater than {constraint}',
constraint=self.max_length)
def _validate_min_length(self, value):
if len(value) < self.min_length:
raise ValidationError('length is less than {constraint}',
constraint=self.min_length)
def _validate_blank(self, value):
if value == '':
if self.allow_blank:
raise StopValidation()
raise ValidationError('blank value is not allowed')
def _validate_regex(self, value):
if not self.regex.match(value):
raise ValidationError(
'value does not match pattern {constraint}',
constraint=self.regex.pattern)
class BoolField(Field):
"""Boolean field."""
def __init__(self, **kwargs):
super().__init__(field_type=bool, **kwargs)
class NumberField(Field, metaclass=abc.ABCMeta):
"""Base class for number fields."""
def __init__(self, *, gte=None, lte=None, gt=None, lt=None, **kwargs):
"""Create number field.
Args:
gte: Greater than or equal limit. Defaults to ``None``.
lte: Less than or equal limit. Defaults to ``None``.
gt: Greater than limit. Defaults to ``None``.
lt: Less than limit. Defaults to ``None``.
**kwargs: Other arguments from ``Field``.
"""
super().__init__(**kwargs)
self.gte = gte
self.lte = lte
self.gt = gt
self.lt = lt
if gte is not None:
self.validators.append(self._validate_gte)
if lte is not None:
self.validators.append(self._validate_lte)
if gt is not None:
self.validators.append(self._validate_gt)
if lt is not None:
self.validators.append(self._validate_lt)
def _validate_gte(self, value):
if value < self.gte:
raise ValidationError('value is less than {constraint}',
constraint=self.gte)
def _validate_lte(self, value):
if value > self.lte:
raise ValidationError('value is greater than {constraint}',
constraint=self.lte)
def _validate_gt(self, value):
if value <= self.gt:
raise ValidationError('value should be greater than {constraint}',
constraint=self.gt)
def _validate_lt(self, value):
if value >= self.lt:
raise ValidationError('value should be less than {constraint}',
constraint=self.lt)
class IntField(NumberField):
"""Integer field."""
def __init__(self, **kwargs):
"""Create int field."""
super().__init__(field_type=int, **kwargs)
class FloatField(NumberField):
"""Float field."""
def __init__(self, **kwargs):
"""Create float field."""
super().__init__(field_type=float, **kwargs)
class DateTimeField(Field):
"""Date and time field based on datetime.datetime."""
def __init__(self, **kwargs):
super().__init__(field_type=datetime, **kwargs)
def from_data(self, value):
return value
class ObjectIdField(Field):
"""ObjectId field."""
def __init__(self, **kwargs):
super().__init__(field_type=ObjectId, **kwargs)
def from_data(self, value):
"""Convert value to ObjectId.
Args:
value (ObjectId, str): ObjectId value or 24-character hex string.
Returns:
None or ObjectId value. If value is not ObjectId and can't
be converted return as is.
"""
if value is None or isinstance(value, ObjectId):
return value
try:
return ObjectId(value)
except (bson.errors.InvalidId, TypeError):
return value
class CompoundFieldNameBuilder:
"""Helper class to encapsulate compound name join."""
__slots__ = ['_obj', '_prefix']
def __init__(self, obj, prefix):
self._obj = obj
self._prefix = prefix
def __getattr__(self, name):
document_class = getattr(self._obj, 'document_class', None)
if not document_class:
raise AttributeError(
"'{0}' has no attribute {1}".format(
self._obj.__class__.__name__, name))
return CompoundFieldNameBuilder(getattr(self._obj, name),
self._prefix)
@property
def s(self):
return self._prefix + '.' + self._obj.s
class CompoundField(Field):
"""Base class for complex fields.
This class should be base for embedded document fields or list fields
which could contain embedded documents as their elements.
This class makes it possible to build a complex fields name using
attribute syntax and `s` property, i.e.:
.. code-block:: python
assert Comment.author.name.s == 'author.name'
assert Article.tags._id.s == 'tags._id'
assert Hotel.rooms.category.s == 'rooms.category'
assert Hotel.rooms.category.name.s == 'rooms.category.name'
so you can use them to build queries:
.. code-block:: python
Hotel.q(db).find({Hotel.rooms.category.name.s: 'Lux'})
"""
def __init__(self, document_class, base_document_class, **kwargs):
if (isinstance(document_class, str) or
document_class is None
or issubclass(document_class, base_document_class)):
self._document_class = document_class
else:
raise TypeError(
("document_class should be a "
"subclass of '{0}' or str, not a '{1}'").format(
base_document_class, document_class))
self._base_document_class = base_document_class
super().__init__(**kwargs)
@property
def document_class(self):
if isinstance(self._document_class, str):
self._document_class = import_class(self._document_class)
if not issubclass(self._document_class, self._base_document_class):
raise TypeError(
("document_class should be a "
"subclass of '{0}', not a '{1}'").format(
self._base_document_class, self._document_class))
return self._document_class
def __getattr__(self, name):
if self.document_class is None:
raise AttributeError(
"'{0}' has no attribute '{1}'".format(
self.__class__.__name__, name))
return CompoundFieldNameBuilder(
getattr(self.document_class, name), self.mongo_name)
class EmbDocField(CompoundField):
"""Embedded Document Field."""
def __init__(self, document_class, **kwargs):
"""Create Embedded Document field.
Args:
document_class: A subclass of the
``aiomongodel.EmbeddedDocument`` class or string with
absolute path to such class.
**kwargs: Other arguments from ``Field``.
"""
EmbeddedDocument = import_class('aiomongodel.EmbeddedDocument')
super().__init__(document_class, EmbeddedDocument, **kwargs)
self.validators.append(lambda value: value.validate())
def validate(self, value):
self.field_type = self.document_class
super().validate(value)
def to_mongo(self, value):
if value is None:
return None
return value.to_mongo()
def from_mongo(self, value):
if value is None:
return None
return self.document_class.from_mongo(value)
def from_data(self, value):
if value is None or isinstance(value, self.document_class):
return value
try:
return self.document_class.from_data(value)
except (TypeError, ValueError):
return value
class ListField(CompoundField):
"""List field."""
def __init__(self, item_field, *,
min_length=None, max_length=None, **kwargs):
"""Create List field.
Args:
item_field (Field): Instance of the field to reflect list
items' type.
min_length (int): Minimum length of the list. Defaults to ``None``.
max_length (int): Maximum length of the list. Defaults to ``None``.
**kwargs: Other arguments from ``Field``.
Raises:
TypeError: If item_field is not instance of the ``Field`` subclass.
"""
if not isinstance(item_field, Field):
raise TypeError(
('item_field should be an instance of the `Field` '
'subclass, not of the `{0}`').format(type(item_field)))
EmbeddedDocument = import_class('aiomongodel.EmbeddedDocument')
document_class, base_document_class = (
(item_field._document_class, EmbeddedDocument)
if isinstance(item_field, EmbDocField)
else (None, None))
super().__init__(document_class, base_document_class,
field_type=list, **kwargs)
self.item_field = item_field
self.min_length = min_length
self.max_length = max_length
if min_length is not None:
self.validators.append(self._validate_min_length)
if max_length is not None:
self.validators.append(self._validate_max_length)
self.validators.append(self._validate_items)
def _validate_min_length(self, value):
if len(value) < self.min_length:
raise ValidationError('list length is less than {constraint}',
constraint=self.min_length)
def _validate_max_length(self, value):
if len(value) > self.max_length:
raise ValidationError('list length is greater than {constraint}',
constraint=self.max_length)
def _validate_items(self, value):
errors = {}
for index, item in enumerate(value):
try:
self.item_field.validate(item)
except ValidationError as e:
errors[index] = e
if errors:
raise ValidationError(errors)
def to_mongo(self, value):
if value is None:
return None
return [self.item_field.to_mongo(item) for item in value]
def from_mongo(self, value):
if value is None:
return None
return [self.item_field.from_mongo(item) for item in value]
def from_data(self, value):
# if value not a list just return as is as well as None
if value is None or not isinstance(value, list):
return value
return [self.item_field.from_data(item) for item in value]
class RefField(CompoundField):
"""Reference field."""
def __init__(self, document_class, **kwargs):
"""Create Reference field.
Args:
document_class: A subclass of the ``aiomongodel.Document`` class
or string with absolute path to such class.
**kwargs: Other arguments from ``Field``.
"""
Document = import_class('aiomongodel.Document')
super().__init__(document_class, Document, **kwargs)
self.validators = [self._validate_none, self._validate_ref]
def _validate_ref(self, value):
# ref value could be reference instance
_id = value._id if isinstance(value, self.document_class) else value
self.document_class._id.validate(_id)
def to_mongo(self, value):
if isinstance(value, self.document_class):
return self.document_class._id.to_mongo(value._id)
return self.document_class._id.to_mongo(value)
def from_mongo(self, value):
return self.document_class._id.from_mongo(value)
def from_data(self, value):
if isinstance(value, self.document_class):
return value
return self.document_class._id.from_data(value)
class EmailField(StrField):
"""Email field."""
EMAIL_REGEX = re.compile(
r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$')
def __init__(self, *, regex=EMAIL_REGEX, **kwargs):
"""Create Email field.
Args:
regex (str, re.regex): Pattern for email address.
**kwargs: Other arguments from ``Field`` and ``StrField``.
"""
super().__init__(regex=regex, **kwargs)
def _validate_regex(self, value):
try:
super()._validate_regex(value)
except ValidationError:
raise ValidationError('value is not a valid email address')
class DecimalField(NumberField):
"""Decimal number field.
This field can be used only with MongoDB 3.4+.
"""
def __init__(self, **kwargs):
"""Create Decimal field."""
super().__init__(field_type=Decimal, **kwargs)
def to_mongo(self, value):
if value is None:
return None
return Decimal128(value)
def from_mongo(self, value):
if value is None:
return None
if not isinstance(value, Decimal128):
value = Decimal128(str(value))
return value.to_decimal()
class SynonymField(object):
"""Create synonym name for real field."""
def __init__(self, original_field):
"""Create synonym for real document's field.
Args:
original_field: Field instance or string name of field.
Example:
.. code-block:: python
class Doc(Document):
_id = StrField()
name = SynonymField(_id)
class OtherDoc(Document):
# _id field will be added automaticly.
obj_id = SynonymField('_id')
"""
self._original_field = original_field
def __get__(self, instance, instance_type):
if not instance:
return instance_type.meta.fields[self.original_field_name]
return getattr(instance, self.original_field_name)
def __set__(self, instance, value):
setattr(instance, self.original_field_name, value)
@property
def original_field_name(self):
try:
return self._original_field.name
except AttributeError: # original field is a string name of the field
return self._original_field
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix interactive shell
Provides a console interface for the Pelix shell, based on readline when
available.
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import argparse
import logging
import os
import shlex
import sys
import threading
# Initialization file handler
from pelix.misc.init_handler import InitFileHandler, remove_duplicates
# Shell constants
from pelix.constants import BundleActivator
from pelix.shell import SERVICE_SHELL
from pelix.shell.beans import IOHandler, ShellSession, safe_input
import pelix.framework as pelix
# Shell completion
from pelix.shell.completion.core import completion_hints
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
PROP_INIT_FILE = "pelix.shell.console.init_file"
""" Shell script to execute before starting the console """
PROP_RUN_FILE = "pelix.shell.console.script_file"
""" Script to run as shell input """
# ------------------------------------------------------------------------------
try:
# Set up readline if available
import readline
readline.parse_and_bind("tab: complete")
readline.set_completer(None)
except ImportError:
# Readline is missing, not critical
readline = None
# ------------------------------------------------------------------------------
class InteractiveShell(object):
"""
The interactive shell handler
"""
def __init__(self, context):
"""
Sets up the members
:param context: The bundle context
"""
self._context = context # type: pelix.BundleContext
self._shell_ref = None # type: pelix.ServiceReference
self._shell = None
# Single session
self.__session = ShellSession(IOHandler(sys.stdin, sys.stdout), {})
# Read line cache
self._readline_matches = []
# Rendez-vous events
self._lock = threading.RLock()
self._stop_event = threading.Event()
self._shell_event = threading.Event()
# Try to find a shell service
self.search_shell()
# Register as a service listener
self._context.add_service_listener(self, None, SERVICE_SHELL)
def __get_ps1(self):
"""
Gets the prompt string from the session of the shell service
:return: The prompt string
"""
try:
return self.__session.get("PS1")
except KeyError:
return self._shell.get_ps1()
def _readline_prompt(self):
"""
Prompt using the readline module (no pre-flush)
:return: The command line
"""
sys.stdout.flush()
return safe_input(self.__get_ps1())
def _normal_prompt(self):
"""
Flushes the prompt before requesting the input
:return: The command line
"""
sys.stdout.write(self.__get_ps1())
sys.stdout.flush()
return safe_input()
def loop_input(self, on_quit=None):
"""
Reads the standard input until the shell session is stopped
:param on_quit: A call back method, called without argument when the
shell session has ended
"""
# Start the init script
self._run_script(
self.__session, self._context.get_property(PROP_INIT_FILE)
)
# Run the script
script_file = self._context.get_property(PROP_RUN_FILE)
if script_file:
self._run_script(self.__session, script_file)
else:
# No script: run the main loop (blocking)
self._run_loop(self.__session)
# Nothing more to do
self._stop_event.set()
sys.stdout.write("Bye !\n")
sys.stdout.flush()
if on_quit is not None:
# Call a handler if needed
on_quit()
def _run_script(self, session, file_path):
"""
Runs the given script file
:param session: Current shell session
:param file_path: Path to the file to execute
:return: True if a file has been execute
"""
if file_path:
# The 'run' command returns False in case of error
# The 'execute' method returns False if the run command fails
return self._shell.execute('run "{0}"'.format(file_path), session)
return None
def _run_loop(self, session):
"""
Runs the main input loop
:param session: Current shell session
"""
try:
first_prompt = True
# Set up the prompt
prompt = (
self._readline_prompt
if readline is not None
else self._normal_prompt
)
while not self._stop_event.is_set():
# Wait for the shell to be there
# Before Python 2.7, wait() doesn't return a result
if self._shell_event.wait(.2) or self._shell_event.is_set():
# Shell present
if first_prompt:
# Show the banner on first prompt
sys.stdout.write(self._shell.get_banner())
first_prompt = False
# Read the next line
line = prompt()
with self._lock:
if self._shell_event.is_set():
# Execute it
self._shell.execute(line, session)
elif not self._stop_event.is_set():
# Shell service lost while not stopping
sys.stdout.write("Shell service lost.")
sys.stdout.flush()
except (EOFError, KeyboardInterrupt, SystemExit):
# Input closed or keyboard interruption
pass
def readline_completer(self, text, state):
"""
A completer for the readline library
"""
if state == 0:
# New completion, reset the list of matches and the display hook
self._readline_matches = []
try:
readline.set_completion_display_matches_hook(None)
except AttributeError:
pass
# Get the full line
full_line = readline.get_line_buffer()
begin_idx = readline.get_begidx()
# Parse arguments as best as we can
try:
arguments = shlex.split(full_line)
except ValueError:
arguments = full_line.split()
# Extract the command (maybe with its namespace)
command = arguments.pop(0)
if begin_idx > 0:
# We're completing after the command (and maybe some args)
try:
# Find the command
ns, command = self._shell.get_ns_command(command)
except ValueError:
# Ambiguous command: ignore
return None
# Use the completer associated to the command, if any
try:
configuration = self._shell.get_command_completers(
ns, command
)
if configuration is not None:
self._readline_matches = completion_hints(
configuration,
self.__get_ps1(),
self.__session,
self._context,
text,
arguments,
)
except KeyError:
# Unknown command
pass
elif "." in command:
# Completing the command, and a name space is given
namespace, prefix = text.split(".", 2)
commands = self._shell.get_commands(namespace)
# Filter methods according to the prefix
self._readline_matches = [
"{0}.{1}".format(namespace, command)
for command in commands
if command.startswith(prefix)
]
else:
# Completing a command or namespace
prefix = command
# Default commands goes first...
possibilities = [
"{0} ".format(command)
for command in self._shell.get_commands(None)
if command.startswith(prefix)
]
# ... then name spaces
namespaces = self._shell.get_namespaces()
possibilities.extend(
"{0}.".format(namespace)
for namespace in namespaces
if namespace.startswith(prefix)
)
# ... then commands in those name spaces
possibilities.extend(
"{0} ".format(command)
for namespace in namespaces
if namespace is not None
for command in self._shell.get_commands(namespace)
if command.startswith(prefix)
)
# Filter methods according to the prefix
self._readline_matches = possibilities
if not self._readline_matches:
return None
# Return the first possibility
return self._readline_matches[0]
elif state < len(self._readline_matches):
# Next try
return self._readline_matches[state]
return None
def search_shell(self):
"""
Looks for a shell service
"""
with self._lock:
if self._shell is not None:
# A shell is already there
return
reference = self._context.get_service_reference(SERVICE_SHELL)
if reference is not None:
self.set_shell(reference)
def service_changed(self, event):
"""
Called by Pelix when an events changes
"""
kind = event.get_kind()
reference = event.get_service_reference()
if kind in (pelix.ServiceEvent.REGISTERED, pelix.ServiceEvent.MODIFIED):
# A service matches our filter
self.set_shell(reference)
else:
with self._lock:
# Service is not matching our filter anymore
self.clear_shell()
# Request for a new binding
self.search_shell()
def set_shell(self, svc_ref):
"""
Binds the given shell service.
:param svc_ref: A service reference
"""
if svc_ref is None:
return
with self._lock:
# Get the service
self._shell_ref = svc_ref
self._shell = self._context.get_service(self._shell_ref)
# Set the readline completer
if readline is not None:
readline.set_completer(self.readline_completer)
# Set the flag
self._shell_event.set()
def clear_shell(self):
"""
Unbinds the active shell service
"""
with self._lock:
# Clear the flag
self._shell_event.clear()
# Clear the readline completer
if readline is not None:
readline.set_completer(None)
del self._readline_matches[:]
if self._shell_ref is not None:
# Release the service
self._context.unget_service(self._shell_ref)
self._shell_ref = None
self._shell = None
def stop(self):
"""
Clears all members
"""
# Exit the loop
with self._lock:
self._stop_event.set()
self._shell_event.clear()
if self._context is not None:
# Unregister from events
self._context.remove_service_listener(self)
# Release the shell
self.clear_shell()
self._context = None
# ------------------------------------------------------------------------------
@BundleActivator
class Activator(object):
"""
The bundle activator
"""
def __init__(self):
"""
Sets up the members
"""
self._context = None
self._shell = None
self._thread = None
def start(self, context):
"""
Bundle started
"""
self._context = context
self._shell = InteractiveShell(context)
# Run the loop thread
self._thread = threading.Thread(
target=self._shell.loop_input,
args=[self._quit],
name="Pelix-Shell-TextConsole",
)
# Set the thread as a daemon, to let it be killed by the interpreter
# once all other threads stopped.
self._thread.daemon = True
self._thread.start()
def stop(self, _):
"""
Bundle stopped
"""
self._cleanup()
self._context = None
def _quit(self):
"""
Called when the shell session has ended
"""
# Clean up members
self._cleanup()
# Stop the framework
if self._context is not None:
self._context.get_framework().stop()
def _cleanup(self):
"""
Cleans up the members
"""
if self._shell is not None:
# Stop the shell
self._shell.stop()
self._thread = None
self._shell = None
# ------------------------------------------------------------------------------
def _resolve_file(file_name):
"""
Checks if the file exists.
If the file exists, the method returns its absolute path.
Else, it returns None
:param file_name: The name of the file to check
:return: An absolute path, or None
"""
if not file_name:
return None
path = os.path.realpath(file_name)
if os.path.isfile(path):
return path
return None
def make_common_parser():
"""
Creates an argument parser (argparse module) with the options that should
be common to all shells.
The result can be used as a parent parser (``parents`` argument in
``argparse.ArgumentParser``)
:return: An ArgumentParser object
"""
parser = argparse.ArgumentParser(add_help=False)
# Version number
parser.add_argument(
"--version",
action="version",
version="Pelix {0} from {1}".format(pelix.__version__, pelix.__file__),
)
# Framework options
group = parser.add_argument_group("Framework options")
group.add_argument(
"-D",
nargs="+",
dest="properties",
metavar="KEY=VALUE",
help="Sets framework properties",
)
group.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set loggers to DEBUG level",
)
# Initial configuration
group = parser.add_argument_group("Initial configuration")
group.add_argument(
"-c",
"--conf",
dest="init_conf",
metavar="FILE",
help="Name of an initial configuration file to use "
"(default configuration is also loaded)",
)
group.add_argument(
"-C",
"--exclusive-conf",
dest="init_conf_exclusive",
metavar="FILE",
help="Name of an initial configuration file to use "
"(without the default configuration)",
)
group.add_argument(
"-e",
"--empty-conf",
dest="init_empty",
action="store_true",
help="Don't load any initial configuration",
)
# Initial script
group = parser.add_argument_group("Script execution arguments")
group.add_argument(
"--init",
action="store",
dest="init_script",
metavar="SCRIPT",
help="Runs the given shell script before starting the console",
)
group.add_argument(
"--run",
action="store",
dest="run_script",
metavar="SCRIPT",
help="Runs the given shell script then stops the framework",
)
return parser
def handle_common_arguments(parsed_args):
"""
Handles the arguments defined by :meth:`~make_common_parser`
:param parsed_args: Argument parsed with ``argparse`` (``Namespace``)
:return: An :class:`~InitFileHandler` object
:raise IOError: Initial or run script not found
"""
# Setup the logger
logging.basicConfig(
level=logging.DEBUG if parsed_args.verbose else logging.WARNING
)
# Framework properties dictionary
props = {}
# Read the initial configuration script
init = InitFileHandler()
if not parsed_args.init_empty:
if not parsed_args.init_conf_exclusive:
# Load default configuration
init.load()
# Load the given configuration file
conf_file = parsed_args.init_conf_exclusive or parsed_args.init_conf
if conf_file:
init.load(conf_file)
# Normalize configuration
init.normalize()
# Set initial framework properties
props.update(init.properties)
# Compute framework properties
for prop_def in parsed_args.properties or []:
key, value = prop_def.split("=", 1)
props[key] = value
# Check initial run script(s)
if parsed_args.init_script:
path = props[PROP_INIT_FILE] = _resolve_file(parsed_args.init_script)
if not path:
raise IOError(
"Initial script file not found: {0}".format(
parsed_args.init_script
)
)
if parsed_args.run_script:
# Find the file
path = props[PROP_RUN_FILE] = _resolve_file(parsed_args.run_script)
if not path:
raise IOError(
"Script file not found: {0}".format(parsed_args.run_script)
)
# Update the stored configuration
init.properties.update(props)
return init
def main(argv=None):
"""
Entry point
:param argv: Script arguments (None for sys.argv)
:return: An exit code or None
"""
# Parse arguments
parser = argparse.ArgumentParser(
prog="pelix.shell.console",
parents=[make_common_parser()],
description="Pelix Shell Console",
)
# Parse arguments
args = parser.parse_args(argv)
# Handle arguments
init = handle_common_arguments(args)
# Set the initial bundles
bundles = [
"pelix.ipopo.core",
"pelix.shell.core",
"pelix.shell.ipopo",
"pelix.shell.completion.pelix",
"pelix.shell.completion.ipopo",
"pelix.shell.console",
]
bundles.extend(init.bundles)
# Use the utility method to create, run and delete the framework
framework = pelix.create_framework(
remove_duplicates(bundles), init.properties
)
framework.start()
# Instantiate components
init.instantiate_components(framework.get_bundle_context())
try:
framework.wait_for_stop()
except KeyboardInterrupt:
framework.stop()
if __name__ == "__main__":
# Run the entry point
sys.exit(main() or 0)
|
|
import collections
import struct
from ..util import (slicebyn, boolean_bitarray_tuple, lazy_property,
CachedSequence)
from ..exceptions import MalformedDataError, NotRecordedError
class SongData(CachedSequence):
"""
Container for all the useful data in a song section of a bulk dump.
Element access provides the usersongs (0-based indexing).
"""
__slots__ = ('data', '_mystery', '_block_system')
SONGS_OFFSET = 0x00
MYSTERY_SLICE = slice(0x01, 0x15D)
TRACKS_SLICE = slice(0x15D, 0x167)
SONG_DURATION_SLICE = slice(0x167, 0x17B)
TRACK_DURATION_SLICE = slice(0x17B, 0x1F3)
PRESETSTYLES_SLICE = slice(0x1F3, 0x22F)
BEGINNING_BLOCKS_SLICE = slice(0x22F, 0x24D)
NEXT_BLOCKS_SLICE = slice(0x24D, 0x2CF)
START_MARKER_SLICE = slice(0x2CF, 0x2D5)
BLOCK_DATA_SLICE = slice(0x2D5, 0x106D5)
END_MARKER_SLICE = slice(0x106D5, None)
EXPECTED_SIZE = 0x106DB
PRESETSTYLE = b'PresetStyle\0'
MARKER = b'PK0001'
def _message_format_checks(self):
if len(self.data) != self.EXPECTED_SIZE:
raise MalformedDataError("Data wrong length!")
startmarker = self.data[self.START_MARKER_SLICE]
endmarker = self.data[self.END_MARKER_SLICE]
if not (startmarker == endmarker == self.MARKER):
raise MalformedDataError("Invalid format: markers not present")
# PresetStyle checks. Not present when not recorded, so it should
# probably be moved elsewhere instead of the message format checks.
# presetstyles = self.data[self.PRESETSTYLES_SLICE]
# for presetstyle in slicebyn(presetstyles, len(self.PRESETSTYLE)):
# if presetstyle != self.PRESETSTYLE:
# raise MalformedDataError("Invalid format: presetstyles")
if (self.data[self.SONGS_OFFSET] >= (1 << 5)
or any(x >= (1 << 6) for x in self.data[self.TRACKS_SLICE])):
raise MalformedDataError("Unexpected high bits in the fields")
def __init__(self, data):
"""
data = the concatenated payload data.
songs are available through the songs attribute.
"""
self.data = data
self._message_format_checks()
self._mystery = self.data[self.MYSTERY_SLICE]
self._block_system = SongDataBlockSystem(
data[self.NEXT_BLOCKS_SLICE], data[self.BLOCK_DATA_SLICE])
def make_song(idx,
block_system=self._block_system,
song_field=boolean_bitarray_tuple(
data[self.SONGS_OFFSET], 5),
song_durations=struct.unpack(
'>5I', data[self.SONG_DURATION_SLICE]),
track_fields=[boolean_bitarray_tuple(x, 6)
for x in data[self.TRACKS_SLICE]],
track_durations=list(slicebyn(struct.unpack(
'>30I', data[self.TRACK_DURATION_SLICE]), 6)),
track_beginning_blocks=list(slicebyn(
data[self.BEGINNING_BLOCKS_SLICE], 6))):
"""
Create UserSong object. Note that we use zero based indexing, so
UserSong1 corresponds to [0] and so on.
"""
return UserSong(
block_system, idx+1,
song_field[idx], song_durations[idx],
track_fields[idx], track_durations[idx],
track_beginning_blocks[idx])
super().__init__(5, make_song)
def get_song(self, number):
"""
Get song, 1-based indexing. (if you can call it that).
"""
if not (1 <= number <= 5):
raise ValueError("song number out of range")
return self[number-1]
# cereal!
def _cereal(self):
return [song._cereal() for song in self]
class SongDataBlockSystem(object):
"""
Helper object for getting tracks out of the 'blocksystem'.
"""
BLOCK_COUNT = 0x82
BLOCK_SIZE = 0x200
def __init__(self, next_blocks_table, block_data):
self._next_blocks_table = next_blocks_table
self._block_data = block_data
def get_block_data(self, n):
"""
Returns the specified block data of block n
"""
if 1 <= n <= self.BLOCK_COUNT:
end = self.BLOCK_SIZE * n
start = end - self.BLOCK_SIZE
return self._block_data[start:end]
else:
raise IndexError("Invalid index: {}".format(n))
def get_next_block_number(self, n):
"""
Returns the number of the block following block n
"""
if n < 1:
raise IndexError("Invalid index: {}".format(n))
return self._next_blocks_table[n-1]
def _block_data_iter(self, start_block, length):
"""
Yields data blocks up to length from start_block
"""
num = start_block
rem = length
while rem > 0:
if num == 0xFF:
raise MalformedDataError("ran out too early")
elif num == 0x00:
raise MalformedDataError("referenced empty block")
block = self.get_block_data(num)
if rem < self.BLOCK_SIZE:
block = block[:rem]
# We don't want to read too much, so chop off the end
rem -= len(block)
num = self.get_next_block_number(num)
yield block
def get_track_blocks(self, start_block):
"""
Gets a track chunk's size and blocks from its starting block number.
MalformedDataError raised if chunk is invalid somehow
returns (size, blocks), where:
size is the total number of bytes in the chunk (including header)
blocks is a list of the blocks (as memoryviews, with the last one
truncated appropriately for the chunk size)
"""
# First, we need to check if this is actually a block.
try:
block = self.get_block_data(start_block)
except IndexError:
raise MalformedDataError("Invalid starting block")
# Then, we see if the block indeed contains the start of a track:
# b'MTrk' followed by 32 bit integer, the length remaining
tag, dlength = struct.unpack_from('>4sL', block, 0)
if tag != b'MTrk':
raise MalformedDataError("Chunk start not found")
# The start of the track chunk tells us its remaining length
# i.e. not including the 8-byte header.
# so, we need to add 8 to get the total size of the chunk
size = dlength + 8
blocks = list(self._block_data_iter(start_block, size))
return size, blocks
class UserSong(object):
"""
Represents one UserSong and associated data and metadata
"""
UserSongTrack = collections.namedtuple(
"UserSongTrack", "track name active duration size blocks")
def __init__(self, block_system, number, active, duration,
tracks_active, tracks_duration, start_blocks):
self.number = number
self.active = active
self.duration = duration
self.name = "User Song {}".format(number)
# create the tracks.
self._tracks = []
# transpose the last track to first so that
# index 0 = time/chord track A, index 1 = track 1 etc
# transposing the tracks like that makes the indices match up
# and also puts the time track (A) first, which is helpful for
# reconstructing MIDI files.
TRACK_NAMES = ('Track 1', 'Track 2', 'Track 3',
'Track 4', 'Track 5', 'Track A')
for i in range(-1, 5):
start_block = start_blocks[i]
if start_block == 0xFF:
size = 0
blocks = None
else:
size, blocks = block_system.get_track_blocks(start_block)
track = self.UserSongTrack(i+1, TRACK_NAMES[i],
tracks_active[i], tracks_duration[i],
size, blocks)
self._tracks.append(track)
# create a list of the tracks that are actually recorded
self._datatracks = [track for track in self._tracks
if track.blocks is not None]
# Calculate the size of the resulting smf file
# i.e. total track sizes plus 14 byte MThd chunk
if self._datatracks:
self.size = 14 + sum(track.size for track in self._datatracks)
else:
self.size = 0
def print_info(self):
"""
Prints the recorded (active) status, duration (in measures),
and size (in bytes) for the song overall and each track within, in a
table.
Note that Track A can still have data even if not recorded,
as the track is also used as the time track for the whole song.
"""
columns = "{:>12} {!s:>10} {:>10} {:>10}".format
print(columns("", "Recorded", "Duration", "Size"))
for item in (self, *self._tracks):
print(columns(item.name, item.active, item.duration, item.size))
def _midi_blocks_iter(self):
# yield the blocks for a Type 1 smf MIDI file.
if not self._datatracks:
raise NotRecordedError("Song not recorded")
# Construct the header:
# chunk length 6, MIDI file format 1, 96 ticks per quarter note.
header = struct.pack('>4sL3H',
b'MThd', 6, 1, len(self._datatracks), 96)
# yield header then all the blocks for all the tracks.
# This is why Track A went first.
yield header
for track in self._datatracks:
yield from track.blocks
@lazy_property
def midi(self):
"""
The MIDI file, as bytes.
"""
return b''.join(self._midi_blocks_iter())
def _cereal(self):
return collections.OrderedDict([
('number', self.number),
('name', self.name),
('active', self.active),
('duration', self.duration),
('size', self.size),
('tracks', [collections.OrderedDict([
('track', track.track),
('name', track.name),
('active', track.active),
('duration', track.duration),
('size', track.size)
]) for track in self._tracks])
])
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry.primitives import Primitive
from compas.geometry.primitives import Point
class Line(Primitive):
"""A line is defined by two points.
Parameters
----------
p1 : [float, float, float] | :class:`~compas.geometry.Point`
The first point.
p2 : [float, float, float] | :class:`~compas.geometry.Point`
The second point.
Attributes
----------
start : :class:`~compas.geometry.Point`
The start point of the line.
end : :class:`~compas.geometry.Point`
The end point of the line.
vector : :class:`~compas.geometry.Vector`, read-only
A vector pointing from start to end.
length : float, read-only
The length of the vector from start to end.
direction : :class:`~compas.geometry.Vector`, read-only
A unit vector pointing from start and end.
midpoint : :class:`~compas.geometry.Point`, read-only
The midpoint between start and end.
Examples
--------
>>> line = Line([0, 0, 0], [1, 1, 1])
>>> line
Line(Point(0.000, 0.000, 0.000), Point(1.000, 1.000, 1.000))
>>> line.start
Point(0.000, 0.000, 0.000)
>>> line.midpoint
Point(0.500, 0.500, 0.500)
>>> line.length == math.sqrt(1 + 1 + 1)
True
>>> line.direction
Vector(0.577, 0.577, 0.577)
"""
__slots__ = ['_start', '_end']
def __init__(self, p1, p2, **kwargs):
super(Line, self).__init__(**kwargs)
self._start = None
self._end = None
self.start = p1
self.end = p2
# ==========================================================================
# data
# ==========================================================================
@property
def DATASCHEMA(self):
""":class:`schema.Schema` : Schema of the data representation."""
from schema import Schema
return Schema({
'start': Point.DATASCHEMA.fget(None),
'end': Point.DATASCHEMA.fget(None)
})
@property
def JSONSCHEMANAME(self):
"""str : Name of the schema of the data representation in JSON format."""
return 'line'
@property
def data(self):
"""dict : The data dictionary that represents the line."""
return {'start': self.start.data, 'end': self.end.data}
@data.setter
def data(self, data):
self.start = Point.from_data(data['start'])
self.end = Point.from_data(data['end'])
@classmethod
def from_data(cls, data):
"""Construct a frame from a data dict.
Parameters
----------
data : dict
The data dictionary.
Examples
--------
>>> line = Line.from_data({'start': [0.0, 0.0, 0.0], 'end': [1.0, 0.0, 0.0]})
>>> line.end
Point(1.000, 0.000, 0.000)
"""
return cls(Point.from_data(data['start']), Point.from_data(data['end']))
# ==========================================================================
# properties
# ==========================================================================
@property
def start(self):
return self._start
@start.setter
def start(self, point):
self._start = Point(*point)
@property
def end(self):
return self._end
@end.setter
def end(self, point):
self._end = Point(*point)
@property
def vector(self):
return self.end - self.start
@property
def length(self):
return self.vector.length
@property
def direction(self):
return self.vector * (1 / self.length)
@property
def midpoint(self):
v = self.direction * (0.5 * self.length)
return self.start + v
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return 'Line({0!r}, {1!r})'.format(self.start, self.end)
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.start
if key == 1:
return self.end
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.start = value
return
if key == 1:
self.end = value
return
raise KeyError
def __iter__(self):
return iter([self.start, self.end])
def __eq__(self, other):
try:
other_start = other[0]
other_end = other[1]
except: # noqa: E722
return False
return self.start == other_start and self.end == other_end
# ==========================================================================
# constructors
# ==========================================================================
# ==========================================================================
# static
# ==========================================================================
@staticmethod
def transform_collection(collection, X):
"""Transform a collection of Line objects.
Parameters
----------
collection : list[[point, point] | :class:`~compas.geometry.Line`]
The collection of lines.
Returns
-------
None
The lines are modified in-place.
Examples
--------
>>> from math import radians
>>> from compas.geometry import Point
>>> from compas.geometry import Vector
>>> from compas.geometry import Rotation
>>> R = Rotation.from_axis_and_angle(Vector.Zaxis(), radians(90))
>>> a = Line(Point(0.0, 0.0, 0.0), Point(1.0, 0.0, 0.0))
>>> lines = [a]
>>> Line.transform_collection(lines, R)
>>> b = lines[0]
>>> b.end
Point(0.000, 1.000, 0.000)
>>> a is b
True
"""
points = [line.start for line in collection] + [line.end for line in collection]
Point.transform_collection(points, X)
@staticmethod
def transformed_collection(collection, X):
"""Create a collection of transformed Line objects.
Parameters
----------
collection : list[[point, point] | :class:`~compas.geometry.Line`]
The collection of lines.
Returns
-------
list[:class:`~compas.geometry.Line`]
The transformed lines.
Examples
--------
>>> from math import radians
>>> from compas.geometry import Vector
>>> from compas.geometry import Point
>>> from compas.geometry import Rotation
>>> R = Rotation.from_axis_and_angle(Vector.Zaxis(), radians(90))
>>> a = Line(Point(0.0, 0.0, 0.0), Point(1.0, 0.0, 0.0))
>>> lines = [a]
>>> lines = Line.transformed_collection(lines, R)
>>> b = lines[0]
>>> b.end
Point(0.000, 1.000, 0.000)
>>> a is b
False
"""
lines = [line.copy() for line in collection]
Line.transform_collection(lines, X)
return lines
# ==========================================================================
# methods
# ==========================================================================
def point(self, t):
"""A point between start and end at a specific normalized parameter.
Parameters
----------
t : float
The line parameter.
Returns
-------
:class:`~compas.geometry.Point`
A point on the line.
Examples
--------
>>> line = Line([0.0, 0.0, 0.0], [1.0, 0.0, 0.0])
>>> line.point(0.5)
Point(0.500, 0.000, 0.000)
"""
if t == 0:
return self.start
if t == 1:
return self.end
v = self.direction * (t * self.length)
return self.start + v
def transform(self, T):
"""Transform this line.
Parameters
----------
T : :class:`~compas.geometry.Transformation` | list[list[float]]
The transformation.
Returns
-------
None
Examples
--------
>>> from math import radians
>>> from compas.geometry import Rotation
>>> line = Line([0.0, 0.0, 0.0], [1.0, 0.0, 0.0])
>>> R = Rotation.from_axis_and_angle([0.0, 0.0, 1.0], radians(90))
>>> line.transform(R)
>>> line.end
Point(0.000, 1.000, 0.000)
"""
self.start.transform(T)
self.end.transform(T)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.